file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
flash.go | //
// Copyright (c) 2014-2019 Cesanta Software Limited
// All rights reserved
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package flasher
import (
"crypto/md5"
"encoding/hex"
"io/ioutil"
"math/bits"
"sort"
"strings"
"time"
"github.com/juju/errors"
moscommon "github.com/mongoose-os/mos/cli/common"
"github.com/mongoose-os/mos/cli/flash/common"
"github.com/mongoose-os/mos/cli/flash/esp"
"github.com/mongoose-os/mos/cli/flash/esp32"
"github.com/mongoose-os/mos/common/fwbundle"
glog "k8s.io/klog/v2"
)
const (
flashSectorSize = 0x1000
flashBlockSize = 0x10000
// Pre-3.0 SDK, location of sys_params is hard-coded.
sysParamsPartType = "sys_params"
// 3.0+ SDK control placement of sys_params through partition table,
// no need for special handling.
sysParams3PartType = "sys_params3"
sysParamsAreaSize = 4 * flashSectorSize
espImageMagicByte = 0xe9
)
type image struct {
Name string
Type string
Addr uint32
Data []byte
ESP32Encrypt bool
}
type imagesByAddr []*image
func (pp imagesByAddr) Len() int { return len(pp) }
func (pp imagesByAddr) Swap(i, j int) { pp[i], pp[j] = pp[j], pp[i] }
func (pp imagesByAddr) Less(i, j int) bool {
return pp[i].Addr < pp[j].Addr
}
func enDis(enabled bool) string {
if enabled {
return "enabled"
}
return "disabled"
}
func Flash(ct esp.ChipType, fw *fwbundle.FirmwareBundle, opts *esp.FlashOpts) error {
if opts.KeepFS && opts.EraseChip {
return errors.Errorf("--keep-fs and --esp-erase-chip are incompatible")
}
cfr, err := ConnectToFlasherClient(ct, opts)
if err != nil {
return errors.Trace(err)
}
defer cfr.rc.Disconnect()
if ct == esp.ChipESP8266 {
// Based on our knowledge of flash size, adjust type=sys_params image.
adjustSysParamsLocation(fw, cfr.flashParams.Size())
}
// Sort images by address
var images []*image
for _, p := range fw.Parts {
if p.Type == fwbundle.FSPartType && opts.KeepFS {
continue
}
// For ESP32, resolve partition name to address
if p.ESP32PartitionName != "" {
pti, err := esp32.GetPartitionInfo(fw, p.ESP32PartitionName)
if err != nil {
return errors.Annotatef(err, "%s: failed to get respolve partition %q", p.Name, p.ESP32PartitionName)
}
// If partition is specified, addr can be optionally used to specify offset within the partition.
// The exception is app partition - these had both set fro compatibility since Feb 2018
// (https://github.com/cesanta/mongoose-os/commit/b8960587f4d564542c903f854e4fe1cef7bbde33)
// It's been removed in Oct 2021
// (https://github.com/cesanta/mongoose-os/commit/8d9a53f76898d736dcac96594bd4eae0cb6b83a0)
newAddr, newSize := p.Addr, p.Size
if p.Type != "app" {
newAddr = pti.Pos.Offset + p.Addr
} else {
newAddr = pti.Pos.Offset
}
if p.Size == 0 { // size = 0 -> until the end of the partition.
newSize = pti.Pos.Offset + pti.Pos.Size - newAddr
}
glog.V(1).Infof("%s: %s 0x%x %d -> 0x%x %d", p.Name, p.ESP32PartitionName, p.Addr, p.Size, newAddr, newSize)
p.Addr, p.Size = newAddr, newSize
}
data, err := fw.GetPartData(p.Name)
if err != nil {
return errors.Annotatef(err, "%s: failed to get data", p.Name)
}
im := &image{
Name: p.Name,
Type: p.Type,
Addr: p.Addr,
Data: data,
ESP32Encrypt: p.ESP32Encrypt,
}
images = append(images, im)
}
return errors.Trace(writeImages(ct, cfr, images, opts, true))
}
func writeImages(ct esp.ChipType, cfr *cfResult, images []*image, opts *esp.FlashOpts, sanityCheck bool) error {
var err error
common.Reportf("Flash size: %d, params: %s", cfr.flashParams.Size(), cfr.flashParams)
encryptionEnabled := false
secureBootEnabled := false
var esp32EncryptionKey []byte
var fusesByName map[string]*esp32.Fuse
kcs := esp32.KeyEncodingSchemeNone
if ct == esp.ChipESP32 { // TODO(rojer): Flash encryption support for ESP32-C3
_, _, fusesByName, err = esp32.ReadFuses(cfr.fc)
if err == nil {
if fcnt, err := fusesByName[esp32.FlashCryptCntFuseName].Value(true /* withDiffs */); err == nil {
encryptionEnabled = (bits.OnesCount64(fcnt.Uint64())%2 != 0)
kcs = esp32.GetKeyEncodingScheme(fusesByName)
common.Reportf("Flash encryption: %s, scheme: %s", enDis(encryptionEnabled), kcs)
}
if abs0, err := fusesByName[esp32.AbstractDone0FuseName].Value(true /* withDiffs */); err == nil {
secureBootEnabled = (abs0.Int64() != 0)
common.Reportf("Secure boot: %s", enDis(secureBootEnabled))
}
} else {
// Some boards (ARDUINO NANO 33 IOT) do not support memory reading commands to read efuses.
// Allow to proceed anyway.
common.Reportf("Failed to read eFuses, assuming no flash encryption")
}
}
for _, im := range images {
if im.Addr == 0 || im.Addr == 0x1000 && len(im.Data) >= 4 && im.Data[0] == 0xe9 {
im.Data[2], im.Data[3] = cfr.flashParams.Bytes()
}
if ct == esp.ChipESP32 && im.ESP32Encrypt && encryptionEnabled {
if esp32EncryptionKey == nil {
if opts.ESP32EncryptionKeyFile != "" {
mac := strings.ToUpper(strings.Replace(fusesByName[esp32.MACAddressFuseName].MACAddressString(), ":", "", -1))
ekf := moscommon.ExpandPlaceholders(opts.ESP32EncryptionKeyFile, "?", mac)
common.Reportf("Flash encryption key: %s", ekf)
esp32EncryptionKey, err = ioutil.ReadFile(ekf)
if err != nil {
return errors.Annotatef(err, "failed to read encryption key")
}
} else {
return errors.Errorf("flash encryption is enabled but encryption key is not provided")
}
}
encrKey := esp32EncryptionKey[:]
switch kcs {
case esp32.KeyEncodingSchemeNone:
if len(esp32EncryptionKey) != 32 {
return errors.Errorf("encryption key must be 32 bytes, got %d", len(esp32EncryptionKey))
}
case esp32.KeyEncodingScheme34:
if len(esp32EncryptionKey) != 24 {
return errors.Errorf("encryption key must be 24 bytes, got %d", len(esp32EncryptionKey))
}
// Extend the key, per 3/4 encoding scheme.
encrKey = append(encrKey, encrKey[8:16]...)
}
encData, err := esp32.ESP32EncryptImageData(
im.Data, encrKey, im.Addr, opts.ESP32FlashCryptConf)
if err != nil {
return errors.Annotatef(err, "%s: failed to encrypt", im.Name)
}
im.Data = encData
}
}
sort.Sort(imagesByAddr(images))
if sanityCheck {
err = sanityCheckImages(ct, images, cfr.flashParams.Size(), flashSectorSize)
if err != nil {
return errors.Trace(err)
}
}
imagesToWrite := images
if opts.EraseChip {
common.Reportf("Erasing chip...")
if err = cfr.fc.EraseChip(); err != nil {
return errors.Annotatef(err, "failed to erase chip")
}
} else if opts.MinimizeWrites {
common.Reportf("Deduping...")
imagesToWrite, err = dedupImages(cfr.fc, images)
if err != nil {
return errors.Annotatef(err, "failed to dedup images")
}
}
if len(imagesToWrite) > 0 {
common.Reportf("Writing...")
start := time.Now()
totalBytesWritten := 0
for _, im := range imagesToWrite {
data := im.Data
numAttempts := 3
imageBytesWritten := 0
addr := im.Addr
if len(data)%flashSectorSize != 0 {
newData := make([]byte, len(data))
copy(newData, data)
paddingLen := flashSectorSize - len(data)%flashSectorSize
for i := 0; i < paddingLen; i++ {
newData = append(newData, 0xff)
}
data = newData
}
for i := 1; imageBytesWritten < len(im.Data); i++ {
common.Reportf(" %7d @ 0x%x", len(data), addr)
bytesWritten, err := cfr.fc.Write(addr, data, true /* erase */, opts.EnableCompression)
if err != nil {
if bytesWritten >= flashSectorSize {
// We made progress, restart the retry counter.
i = 1
}
err = errors.Annotatef(err, "write error (attempt %d/%d)", i, numAttempts)
if i >= numAttempts {
return errors.Annotatef(err, "%s: failed to write", im.Name)
}
glog.Warningf("%s", err)
if err := cfr.fc.Sync(); err != nil {
return errors.Annotatef(err, "lost connection with the flasher")
}
// Round down to sector boundary
bytesWritten = bytesWritten - (bytesWritten % flashSectorSize)
data = data[bytesWritten:]
}
imageBytesWritten += bytesWritten
addr += uint32(bytesWritten)
}
totalBytesWritten += len(im.Data)
}
seconds := time.Since(start).Seconds()
bytesPerSecond := float64(totalBytesWritten) / seconds
common.Reportf("Wrote %d bytes in %.2f seconds (%.2f KBit/sec)", totalBytesWritten, seconds, bytesPerSecond*8/1024)
}
if !opts.NoVerify {
common.Reportf("Verifying...")
numBytes := 0
start := time.Now()
for _, im := range images {
numBytes += len(im.Data)
common.Reportf(" %7d @ 0x%x", len(im.Data), im.Addr)
addr, done := im.Addr, 0
for done < len(im.Data) {
size := len(im.Data) - done
if size > 0x100000 {
size = 0x100000
}
data := im.Data[done : done+size]
digest, err := cfr.fc.Digest(addr, uint32(size), 0 /* blockSize */)
if err != nil {
return errors.Annotatef(err, "%s: failed to compute digest %d @ 0x%x", im.Name, size, addr)
}
if len(digest) != 1 || len(digest[0]) != 16 {
return errors.Errorf("unexpected digest packetresult %+v", digest)
}
digestHex := strings.ToLower(hex.EncodeToString(digest[0]))
expectedDigest := md5.Sum(data)
expectedDigestHex := strings.ToLower(hex.EncodeToString(expectedDigest[:]))
if digestHex != expectedDigestHex {
return errors.Errorf("%d @ 0x%x: digest mismatch: expected %s, got %s", size, addr, expectedDigestHex, digestHex)
}
addr += uint32(size)
done += size
}
}
elapsed := time.Since(start)
glog.Infof("Verified %d bytes in %s, %.2f Kbit/sec", numBytes, elapsed, float64(numBytes*8)/elapsed.Seconds()/1000)
}
if opts.BootFirmware {
common.Reportf("Booting firmware...")
if err = cfr.fc.BootFirmware(); err != nil {
return errors.Annotatef(err, "failed to reboot into firmware")
}
}
return nil
}
func adjustSysParamsLocation(fw *fwbundle.FirmwareBundle, flashSize int) {
sysParamsAddr := uint32(flashSize - sysParamsAreaSize)
for _, p := range fw.Parts {
if p.Type != sysParamsPartType {
continue
}
if p.Addr != sysParamsAddr {
glog.Infof("Sys params image moved from 0x%x to 0x%x", p.Addr, sysParamsAddr)
p.Addr = sysParamsAddr
}
}
}
func sanityCheckImages(ct esp.ChipType, images []*image, flashSize, flashSectorSize int) error {
// Note: we require that images are sorted by address.
sort.Sort(imagesByAddr(images))
esp8266CheckSysParams := true
for _, im := range images {
if im.Type == sysParams3PartType {
// No need to check, firmware controls palcement of sys_params.
esp8266CheckSysParams = false
}
}
for i, im := range images {
imageBegin := int(im.Addr)
imageEnd := imageBegin + len(im.Data)
if imageBegin >= flashSize || imageEnd > flashSize {
return errors.Errorf(
"Image %d @ 0x%x will not fit in flash (size %d)", len(im.Data), imageBegin, flashSize)
}
if imageBegin%flashSectorSize != 0 {
return errors.Errorf("Image starting address (0x%x) is not on flash sector boundary (sector size %d)",
imageBegin,
flashSectorSize)
}
if imageBegin == 0 && len(im.Data) > 0 {
if im.Data[0] != espImageMagicByte {
return errors.Errorf("Invalid magic byte in the first image")
}
}
if ct == esp.ChipESP8266 && esp8266CheckSysParams {
sysParamsBegin := flashSize - sysParamsAreaSize
if imageBegin == sysParamsBegin && im.Type == sysParamsPartType {
// Ok, a sys_params image.
} else if imageEnd > sysParamsBegin {
return errors.Errorf("Image 0x%x overlaps with system params area (%d @ 0x%x)",
imageBegin, sysParamsAreaSize, sysParamsBegin)
}
}
if i > 0 {
prevImageBegin := int(images[i-1].Addr)
prevImageEnd := prevImageBegin + len(images[i-1].Data)
// We traverse the list in order, so a simple check will suffice.
if prevImageEnd > imageBegin {
return errors.Errorf("Images 0x%x and 0x%x overlap", prevImageBegin, imageBegin)
}
}
}
return nil
}
func dedupImages(fc *FlasherClient, images []*image) ([]*image, error) {
var dedupedImages []*image
for _, im := range images {
glog.V(2).Infof("%d @ 0x%x", len(im.Data), im.Addr)
imAddr := int(im.Addr)
digests, err := fc.Digest(im.Addr, uint32(len(im.Data)), flashSectorSize)
if err != nil {
return nil, errors.Annotatef(err, "%s: failed to compute digest %d @ 0x%x", im.Name, len(im.Data), im.Addr)
}
i, offset := 0, 0
var newImages []*image
newAddr, newLen, newTotalLen := imAddr, 0, 0
for offset < len(im.Data) {
blockLen := flashSectorSize
if offset+blockLen > len(im.Data) {
blockLen = len(im.Data) - offset
}
digestHex := strings.ToLower(hex.EncodeToString(digests[i]))
expectedDigest := md5.Sum(im.Data[offset : offset+blockLen])
expectedDigestHex := strings.ToLower(hex.EncodeToString(expectedDigest[:]))
glog.V(2).Infof("0x%06x %4d %s %s %t", imAddr+offset, blockLen, expectedDigestHex, digestHex, expectedDigestHex == digestHex)
if expectedDigestHex == digestHex {
// Found a matching sector. If we've been building an image, commit it.
if newLen > 0 {
nim := &image{
Name: im.Name, | glog.V(2).Infof("%d @ 0x%x", len(nim.Data), nim.Addr)
newImages = append(newImages, nim)
newTotalLen += newLen
newAddr, newLen = 0, 0
}
} else {
// Found a sector that needs to be written. Start a new image or continue the existing one.
if newLen == 0 {
newAddr = imAddr + offset
}
newLen += blockLen
}
offset += blockLen
i++
}
if newLen > 0 {
nim := &image{
Name: im.Name,
Type: im.Type,
Addr: uint32(newAddr),
Data: im.Data[newAddr-imAddr : newAddr-imAddr+newLen],
ESP32Encrypt: im.ESP32Encrypt,
}
newImages = append(newImages, nim)
glog.V(2).Infof("%d @ %x", len(nim.Data), nim.Addr)
newTotalLen += newLen
newAddr, newLen = 0, 0
}
glog.V(2).Infof("%d @ 0x%x -> %d", len(im.Data), im.Addr, newTotalLen)
// There's a price for fragmenting a large image: erasing many individual
// sectors is slower than erasing a whole block. So unless the difference
// is substantial, don't bother.
if newTotalLen < len(im.Data) && (newTotalLen < flashBlockSize || len(im.Data)-newTotalLen >= flashBlockSize) {
dedupedImages = append(dedupedImages, newImages...)
common.Reportf(" %7d @ 0x%x -> %d", len(im.Data), im.Addr, newTotalLen)
} else {
dedupedImages = append(dedupedImages, im)
}
}
return dedupedImages, nil
} | Type: im.Type,
Addr: uint32(newAddr),
Data: im.Data[newAddr-imAddr : newAddr-imAddr+newLen],
ESP32Encrypt: im.ESP32Encrypt,
} | random_line_split |
main_tc.py | import argparse
import torch
import os
import numpy as np
from gym.spaces import Box, Discrete
from pathlib import Path
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from utils.make_env import make_env
from utils.tc_replay_buffer import ReplayBuffer
from utils.env_wrappers import SubprocVecEnv, DummyVecEnv
from algorithms.maddpg_c_ctc import MADDPG
from random import seed
USE_CUDA = True
def make_parallel_env(env_id, n_rollout_threads, seed, discrete_action):
|
def run(config):
torch.manual_seed(config.seed)
np.random.seed(config.seed)
seed(config.seed)
if USE_CUDA:
torch.cuda.set_device(0)
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
all_rewards = []
all_penalties_1 = []
all_penalties_2 = []
b_t = 0.00001
alpha_1 = 12
alpha_2 = 0.2
lb_t_1 = torch.from_numpy(np.random.rand(1)).float()
lb_t_2 = torch.from_numpy(np.random.rand(1)).float()
model_dir = Path('./models') / config.env_id / config.model_name
if not model_dir.exists():
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
model_dir.iterdir() if
str(folder.name).startswith('run')]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
run_dir = model_dir / curr_run
log_dir = str(run_dir / 'logs')
os.makedirs(log_dir)
logger = SummaryWriter(str(log_dir))
env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
config.discrete_action)
maddpg = MADDPG.init_from_env(env, nagents=config.n_agents,
tau=config.tau,
lr=config.lr,
hidden_dim=config.hidden_dim, gamma=config.gamma)
replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
[obsp.shape[0] for obsp in env.observation_space],
[acsp.shape[0] if isinstance(acsp, Box) else acsp.n
for acsp in env.action_space])
t = 0
for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
print("Episodes %i-%i of %i" % (ep_i + 1,
ep_i + 1 + config.n_rollout_threads,
config.n_episodes))
obs = env.reset()
maddpg.prep_rollouts(device='cpu')
episode_rewards = []
episode_coll_penalties = []
episode_dep_penalties = []
explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
maddpg.scale_noise(
config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
maddpg.reset_noise()
for et_i in range(config.episode_length):
# rearrange observations to be per agent, and convert to torch Variable
torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
requires_grad=False)
for i in range(maddpg.nagents)]
# get actions as torch Variables
torch_agent_actions = maddpg.step(torch_obs, explore=True)
# convert actions to numpy arrays
agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
# rearrange actions to be per environment
actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
next_obs, rewards, dones, infos = env.step(actions)
ls_rollout_rewards = []
ls_rollout_collectors_penalty = []
ls_rollout_depositors_penalty = []
collectors_penalty = []
depositors_penalty = []
lagrangian_rews = []
for reward in rewards:
all_agts_rews_pens = reward.squeeze(1)
agt_rews_pens = all_agts_rews_pens[0] # rewards and penalties are shared among agents
rew = agt_rews_pens[0]
coll_pen = agt_rews_pens[1]
dep_pen = agt_rews_pens[2]
lagrangian_rew = rew + ((lb_t_1 * coll_pen) + (lb_t_2 * dep_pen))
# collect separately for plotting
ls_rollout_rewards.append(rew)
ls_rollout_collectors_penalty.append(coll_pen)
ls_rollout_depositors_penalty.append(dep_pen)
# collect for experience replay
collectors_penalty.append(np.asarray([coll_pen] * config.n_agents))
depositors_penalty.append(np.asarray([dep_pen] * config.n_agents))
lagrangian_rews.append(np.asarray([lagrangian_rew] * config.n_agents))
episode_rewards.append(np.mean(ls_rollout_rewards))
episode_coll_penalties.append(np.mean(ls_rollout_collectors_penalty))
episode_dep_penalties.append(np.mean(ls_rollout_depositors_penalty))
lagrangian_rews = np.asarray(lagrangian_rews)
collectors_penalty = np.asarray(collectors_penalty)
depositors_penalty = np.asarray(depositors_penalty)
replay_buffer.push(obs, agent_actions, lagrangian_rews, collectors_penalty, depositors_penalty, next_obs,
dones)
obs = next_obs
t += config.n_rollout_threads
lb_t_ls_1 = []
lb_t_ls_2 = []
lb_t_ls_1_up = []
lb_t_ls_2_up = []
if (len(replay_buffer) >= config.batch_size and
(t % config.steps_per_update) < config.n_rollout_threads):
if USE_CUDA:
maddpg.prep_training(device='gpu')
else:
maddpg.prep_training(device='cpu')
for u_i in range(config.num_updates):
for a_i in range(maddpg.nagents):
sample = replay_buffer.sample(config.batch_size,
to_gpu=USE_CUDA)
penalty_helper_1, penalty_helper_2 = maddpg.update(sample, a_i, logger=logger)
lb_t_1_ = torch.max(torch.tensor(0.0),
(lb_t_1 + ((penalty_helper_1.mean() - alpha_1).float() * b_t)))
lb_t_2_ = torch.max(torch.tensor(0.0),
(lb_t_2 + ((penalty_helper_2.mean() - alpha_2).float() * b_t)))
lb_t_ls_1.append(lb_t_1_)
lb_t_ls_2.append(lb_t_2_)
lb_t_ls_1_up.append(torch.from_numpy(np.asarray(lb_t_ls_1)).mean())
lb_t_ls_2_up.append(torch.from_numpy(np.asarray(lb_t_ls_2)).mean())
maddpg.update_all_targets()
maddpg.prep_rollouts(device='cpu')
lb_t_1 = torch.from_numpy(np.asarray(lb_t_ls_1_up)).mean()
lb_t_2 = torch.from_numpy(np.asarray(lb_t_ls_2_up)).mean()
all_rewards.append(np.sum(episode_rewards))
all_penalties_1.append(np.sum(episode_coll_penalties))
all_penalties_2.append(np.sum(episode_dep_penalties))
log_rew = np.mean(all_rewards[-1024:])
log_penalty1 = np.mean(all_penalties_1[-1024:])
log_penalty2 = np.mean(all_penalties_2[-1024:])
logger.add_scalar("Mean cost over latest 1024 epi/Training:-", log_rew, ep_i)
logger.add_scalar("Mean penalty_1 over latest 1024 epi/Training:-", log_penalty1, ep_i)
logger.add_scalar("Mean penalty_2 over latest 1024 epi/Training:-", log_penalty2, ep_i)
#logger.add_scalar('lbt1', lb_t_1, ep_i)
#logger.add_scalar('lbt2', lb_t_2, ep_i)
if ep_i % config.save_interval < config.n_rollout_threads:
maddpg.prep_rollouts(device='cpu')
os.makedirs(str(run_dir / 'incremental'), exist_ok=True)
maddpg.save(str(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1))))
maddpg.save(str(run_dir / 'model.pt'))
maddpg.save(str(run_dir / 'model.pt'))
env.close()
logger.export_scalars_to_json(str(log_dir / 'summary.json'))
logger.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--env_id", default="fullobs_collect_treasure", help="Name of environment")
parser.add_argument("--model_name", default="Treasure Collection",
help="Name of directory to store " +
"model/training contents")
parser.add_argument("--seed",
default=0, type=int,
help="Random seed")
parser.add_argument("--n_rollout_threads", default=12, type=int)
parser.add_argument("--n_training_threads", default=6, type=int)
parser.add_argument("--buffer_length", default=int(1e6), type=int)
parser.add_argument("--n_episodes", default=100000, type=int)
parser.add_argument("--n_agents", default=8, type=int)
parser.add_argument("--episode_length", default=100, type=int)
parser.add_argument("--steps_per_update", default=100, type=int)
parser.add_argument("--num_updates", default=4, type=int,
help="Number of updates per update cycle")
parser.add_argument("--batch_size",
default=1024, type=int,
help="Batch size for model training")
parser.add_argument("--n_exploration_eps", default=100000, type=int)
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--save_interval", default=1000, type=int)
parser.add_argument("--hidden_dim", default=128, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--tau", default=0.001, type=float)
parser.add_argument("--gamma", default=0.99, type=float)
parser.add_argument("--discrete_action",
default='True')
config = parser.parse_args()
run(config)
| def get_env_fn(rank):
def init_env():
env = make_env(env_id, discrete_action=discrete_action)
env.seed(seed + rank * 1000)
np.random.seed(seed + rank * 1000)
return env
return init_env
if n_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)]) | identifier_body |
main_tc.py | import argparse
import torch
import os
import numpy as np
from gym.spaces import Box, Discrete
from pathlib import Path
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from utils.make_env import make_env
from utils.tc_replay_buffer import ReplayBuffer
from utils.env_wrappers import SubprocVecEnv, DummyVecEnv
from algorithms.maddpg_c_ctc import MADDPG
from random import seed
USE_CUDA = True
def make_parallel_env(env_id, n_rollout_threads, seed, discrete_action):
def get_env_fn(rank):
def init_env():
env = make_env(env_id, discrete_action=discrete_action)
env.seed(seed + rank * 1000)
np.random.seed(seed + rank * 1000)
return env
return init_env
if n_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)])
def | (config):
torch.manual_seed(config.seed)
np.random.seed(config.seed)
seed(config.seed)
if USE_CUDA:
torch.cuda.set_device(0)
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
all_rewards = []
all_penalties_1 = []
all_penalties_2 = []
b_t = 0.00001
alpha_1 = 12
alpha_2 = 0.2
lb_t_1 = torch.from_numpy(np.random.rand(1)).float()
lb_t_2 = torch.from_numpy(np.random.rand(1)).float()
model_dir = Path('./models') / config.env_id / config.model_name
if not model_dir.exists():
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
model_dir.iterdir() if
str(folder.name).startswith('run')]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
run_dir = model_dir / curr_run
log_dir = str(run_dir / 'logs')
os.makedirs(log_dir)
logger = SummaryWriter(str(log_dir))
env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
config.discrete_action)
maddpg = MADDPG.init_from_env(env, nagents=config.n_agents,
tau=config.tau,
lr=config.lr,
hidden_dim=config.hidden_dim, gamma=config.gamma)
replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
[obsp.shape[0] for obsp in env.observation_space],
[acsp.shape[0] if isinstance(acsp, Box) else acsp.n
for acsp in env.action_space])
t = 0
for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
print("Episodes %i-%i of %i" % (ep_i + 1,
ep_i + 1 + config.n_rollout_threads,
config.n_episodes))
obs = env.reset()
maddpg.prep_rollouts(device='cpu')
episode_rewards = []
episode_coll_penalties = []
episode_dep_penalties = []
explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
maddpg.scale_noise(
config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
maddpg.reset_noise()
for et_i in range(config.episode_length):
# rearrange observations to be per agent, and convert to torch Variable
torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
requires_grad=False)
for i in range(maddpg.nagents)]
# get actions as torch Variables
torch_agent_actions = maddpg.step(torch_obs, explore=True)
# convert actions to numpy arrays
agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
# rearrange actions to be per environment
actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
next_obs, rewards, dones, infos = env.step(actions)
ls_rollout_rewards = []
ls_rollout_collectors_penalty = []
ls_rollout_depositors_penalty = []
collectors_penalty = []
depositors_penalty = []
lagrangian_rews = []
for reward in rewards:
all_agts_rews_pens = reward.squeeze(1)
agt_rews_pens = all_agts_rews_pens[0] # rewards and penalties are shared among agents
rew = agt_rews_pens[0]
coll_pen = agt_rews_pens[1]
dep_pen = agt_rews_pens[2]
lagrangian_rew = rew + ((lb_t_1 * coll_pen) + (lb_t_2 * dep_pen))
# collect separately for plotting
ls_rollout_rewards.append(rew)
ls_rollout_collectors_penalty.append(coll_pen)
ls_rollout_depositors_penalty.append(dep_pen)
# collect for experience replay
collectors_penalty.append(np.asarray([coll_pen] * config.n_agents))
depositors_penalty.append(np.asarray([dep_pen] * config.n_agents))
lagrangian_rews.append(np.asarray([lagrangian_rew] * config.n_agents))
episode_rewards.append(np.mean(ls_rollout_rewards))
episode_coll_penalties.append(np.mean(ls_rollout_collectors_penalty))
episode_dep_penalties.append(np.mean(ls_rollout_depositors_penalty))
lagrangian_rews = np.asarray(lagrangian_rews)
collectors_penalty = np.asarray(collectors_penalty)
depositors_penalty = np.asarray(depositors_penalty)
replay_buffer.push(obs, agent_actions, lagrangian_rews, collectors_penalty, depositors_penalty, next_obs,
dones)
obs = next_obs
t += config.n_rollout_threads
lb_t_ls_1 = []
lb_t_ls_2 = []
lb_t_ls_1_up = []
lb_t_ls_2_up = []
if (len(replay_buffer) >= config.batch_size and
(t % config.steps_per_update) < config.n_rollout_threads):
if USE_CUDA:
maddpg.prep_training(device='gpu')
else:
maddpg.prep_training(device='cpu')
for u_i in range(config.num_updates):
for a_i in range(maddpg.nagents):
sample = replay_buffer.sample(config.batch_size,
to_gpu=USE_CUDA)
penalty_helper_1, penalty_helper_2 = maddpg.update(sample, a_i, logger=logger)
lb_t_1_ = torch.max(torch.tensor(0.0),
(lb_t_1 + ((penalty_helper_1.mean() - alpha_1).float() * b_t)))
lb_t_2_ = torch.max(torch.tensor(0.0),
(lb_t_2 + ((penalty_helper_2.mean() - alpha_2).float() * b_t)))
lb_t_ls_1.append(lb_t_1_)
lb_t_ls_2.append(lb_t_2_)
lb_t_ls_1_up.append(torch.from_numpy(np.asarray(lb_t_ls_1)).mean())
lb_t_ls_2_up.append(torch.from_numpy(np.asarray(lb_t_ls_2)).mean())
maddpg.update_all_targets()
maddpg.prep_rollouts(device='cpu')
lb_t_1 = torch.from_numpy(np.asarray(lb_t_ls_1_up)).mean()
lb_t_2 = torch.from_numpy(np.asarray(lb_t_ls_2_up)).mean()
all_rewards.append(np.sum(episode_rewards))
all_penalties_1.append(np.sum(episode_coll_penalties))
all_penalties_2.append(np.sum(episode_dep_penalties))
log_rew = np.mean(all_rewards[-1024:])
log_penalty1 = np.mean(all_penalties_1[-1024:])
log_penalty2 = np.mean(all_penalties_2[-1024:])
logger.add_scalar("Mean cost over latest 1024 epi/Training:-", log_rew, ep_i)
logger.add_scalar("Mean penalty_1 over latest 1024 epi/Training:-", log_penalty1, ep_i)
logger.add_scalar("Mean penalty_2 over latest 1024 epi/Training:-", log_penalty2, ep_i)
#logger.add_scalar('lbt1', lb_t_1, ep_i)
#logger.add_scalar('lbt2', lb_t_2, ep_i)
if ep_i % config.save_interval < config.n_rollout_threads:
maddpg.prep_rollouts(device='cpu')
os.makedirs(str(run_dir / 'incremental'), exist_ok=True)
maddpg.save(str(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1))))
maddpg.save(str(run_dir / 'model.pt'))
maddpg.save(str(run_dir / 'model.pt'))
env.close()
logger.export_scalars_to_json(str(log_dir / 'summary.json'))
logger.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--env_id", default="fullobs_collect_treasure", help="Name of environment")
parser.add_argument("--model_name", default="Treasure Collection",
help="Name of directory to store " +
"model/training contents")
parser.add_argument("--seed",
default=0, type=int,
help="Random seed")
parser.add_argument("--n_rollout_threads", default=12, type=int)
parser.add_argument("--n_training_threads", default=6, type=int)
parser.add_argument("--buffer_length", default=int(1e6), type=int)
parser.add_argument("--n_episodes", default=100000, type=int)
parser.add_argument("--n_agents", default=8, type=int)
parser.add_argument("--episode_length", default=100, type=int)
parser.add_argument("--steps_per_update", default=100, type=int)
parser.add_argument("--num_updates", default=4, type=int,
help="Number of updates per update cycle")
parser.add_argument("--batch_size",
default=1024, type=int,
help="Batch size for model training")
parser.add_argument("--n_exploration_eps", default=100000, type=int)
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--save_interval", default=1000, type=int)
parser.add_argument("--hidden_dim", default=128, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--tau", default=0.001, type=float)
parser.add_argument("--gamma", default=0.99, type=float)
parser.add_argument("--discrete_action",
default='True')
config = parser.parse_args()
run(config)
| run | identifier_name |
main_tc.py | import argparse
import torch
import os
import numpy as np
from gym.spaces import Box, Discrete
from pathlib import Path
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from utils.make_env import make_env
from utils.tc_replay_buffer import ReplayBuffer
from utils.env_wrappers import SubprocVecEnv, DummyVecEnv
from algorithms.maddpg_c_ctc import MADDPG
from random import seed
USE_CUDA = True
def make_parallel_env(env_id, n_rollout_threads, seed, discrete_action):
def get_env_fn(rank):
def init_env():
env = make_env(env_id, discrete_action=discrete_action)
env.seed(seed + rank * 1000)
np.random.seed(seed + rank * 1000)
return env
return init_env
if n_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)])
def run(config):
torch.manual_seed(config.seed)
np.random.seed(config.seed)
seed(config.seed)
if USE_CUDA:
torch.cuda.set_device(0)
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
all_rewards = []
all_penalties_1 = []
all_penalties_2 = []
b_t = 0.00001
alpha_1 = 12
alpha_2 = 0.2
lb_t_1 = torch.from_numpy(np.random.rand(1)).float()
lb_t_2 = torch.from_numpy(np.random.rand(1)).float()
model_dir = Path('./models') / config.env_id / config.model_name
if not model_dir.exists():
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
model_dir.iterdir() if
str(folder.name).startswith('run')]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
run_dir = model_dir / curr_run
log_dir = str(run_dir / 'logs')
os.makedirs(log_dir)
logger = SummaryWriter(str(log_dir))
env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
config.discrete_action)
maddpg = MADDPG.init_from_env(env, nagents=config.n_agents,
tau=config.tau,
lr=config.lr,
hidden_dim=config.hidden_dim, gamma=config.gamma)
replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
[obsp.shape[0] for obsp in env.observation_space],
[acsp.shape[0] if isinstance(acsp, Box) else acsp.n
for acsp in env.action_space])
t = 0
for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
print("Episodes %i-%i of %i" % (ep_i + 1,
ep_i + 1 + config.n_rollout_threads,
config.n_episodes))
obs = env.reset()
maddpg.prep_rollouts(device='cpu')
episode_rewards = []
episode_coll_penalties = []
episode_dep_penalties = []
explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
maddpg.scale_noise(
config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
maddpg.reset_noise()
for et_i in range(config.episode_length):
# rearrange observations to be per agent, and convert to torch Variable
torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
requires_grad=False)
for i in range(maddpg.nagents)]
# get actions as torch Variables
torch_agent_actions = maddpg.step(torch_obs, explore=True)
# convert actions to numpy arrays
agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
# rearrange actions to be per environment
actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
next_obs, rewards, dones, infos = env.step(actions)
ls_rollout_rewards = []
ls_rollout_collectors_penalty = []
ls_rollout_depositors_penalty = []
collectors_penalty = []
depositors_penalty = []
lagrangian_rews = []
for reward in rewards:
all_agts_rews_pens = reward.squeeze(1)
agt_rews_pens = all_agts_rews_pens[0] # rewards and penalties are shared among agents
rew = agt_rews_pens[0]
coll_pen = agt_rews_pens[1]
dep_pen = agt_rews_pens[2]
lagrangian_rew = rew + ((lb_t_1 * coll_pen) + (lb_t_2 * dep_pen))
# collect separately for plotting
ls_rollout_rewards.append(rew)
ls_rollout_collectors_penalty.append(coll_pen)
ls_rollout_depositors_penalty.append(dep_pen)
# collect for experience replay
collectors_penalty.append(np.asarray([coll_pen] * config.n_agents))
depositors_penalty.append(np.asarray([dep_pen] * config.n_agents))
lagrangian_rews.append(np.asarray([lagrangian_rew] * config.n_agents))
episode_rewards.append(np.mean(ls_rollout_rewards))
episode_coll_penalties.append(np.mean(ls_rollout_collectors_penalty))
episode_dep_penalties.append(np.mean(ls_rollout_depositors_penalty))
lagrangian_rews = np.asarray(lagrangian_rews)
collectors_penalty = np.asarray(collectors_penalty)
depositors_penalty = np.asarray(depositors_penalty)
replay_buffer.push(obs, agent_actions, lagrangian_rews, collectors_penalty, depositors_penalty, next_obs,
dones)
obs = next_obs
t += config.n_rollout_threads
lb_t_ls_1 = []
lb_t_ls_2 = []
lb_t_ls_1_up = []
lb_t_ls_2_up = []
if (len(replay_buffer) >= config.batch_size and
(t % config.steps_per_update) < config.n_rollout_threads):
if USE_CUDA:
maddpg.prep_training(device='gpu')
else:
|
for u_i in range(config.num_updates):
for a_i in range(maddpg.nagents):
sample = replay_buffer.sample(config.batch_size,
to_gpu=USE_CUDA)
penalty_helper_1, penalty_helper_2 = maddpg.update(sample, a_i, logger=logger)
lb_t_1_ = torch.max(torch.tensor(0.0),
(lb_t_1 + ((penalty_helper_1.mean() - alpha_1).float() * b_t)))
lb_t_2_ = torch.max(torch.tensor(0.0),
(lb_t_2 + ((penalty_helper_2.mean() - alpha_2).float() * b_t)))
lb_t_ls_1.append(lb_t_1_)
lb_t_ls_2.append(lb_t_2_)
lb_t_ls_1_up.append(torch.from_numpy(np.asarray(lb_t_ls_1)).mean())
lb_t_ls_2_up.append(torch.from_numpy(np.asarray(lb_t_ls_2)).mean())
maddpg.update_all_targets()
maddpg.prep_rollouts(device='cpu')
lb_t_1 = torch.from_numpy(np.asarray(lb_t_ls_1_up)).mean()
lb_t_2 = torch.from_numpy(np.asarray(lb_t_ls_2_up)).mean()
all_rewards.append(np.sum(episode_rewards))
all_penalties_1.append(np.sum(episode_coll_penalties))
all_penalties_2.append(np.sum(episode_dep_penalties))
log_rew = np.mean(all_rewards[-1024:])
log_penalty1 = np.mean(all_penalties_1[-1024:])
log_penalty2 = np.mean(all_penalties_2[-1024:])
logger.add_scalar("Mean cost over latest 1024 epi/Training:-", log_rew, ep_i)
logger.add_scalar("Mean penalty_1 over latest 1024 epi/Training:-", log_penalty1, ep_i)
logger.add_scalar("Mean penalty_2 over latest 1024 epi/Training:-", log_penalty2, ep_i)
#logger.add_scalar('lbt1', lb_t_1, ep_i)
#logger.add_scalar('lbt2', lb_t_2, ep_i)
if ep_i % config.save_interval < config.n_rollout_threads:
maddpg.prep_rollouts(device='cpu')
os.makedirs(str(run_dir / 'incremental'), exist_ok=True)
maddpg.save(str(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1))))
maddpg.save(str(run_dir / 'model.pt'))
maddpg.save(str(run_dir / 'model.pt'))
env.close()
logger.export_scalars_to_json(str(log_dir / 'summary.json'))
logger.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--env_id", default="fullobs_collect_treasure", help="Name of environment")
parser.add_argument("--model_name", default="Treasure Collection",
help="Name of directory to store " +
"model/training contents")
parser.add_argument("--seed",
default=0, type=int,
help="Random seed")
parser.add_argument("--n_rollout_threads", default=12, type=int)
parser.add_argument("--n_training_threads", default=6, type=int)
parser.add_argument("--buffer_length", default=int(1e6), type=int)
parser.add_argument("--n_episodes", default=100000, type=int)
parser.add_argument("--n_agents", default=8, type=int)
parser.add_argument("--episode_length", default=100, type=int)
parser.add_argument("--steps_per_update", default=100, type=int)
parser.add_argument("--num_updates", default=4, type=int,
help="Number of updates per update cycle")
parser.add_argument("--batch_size",
default=1024, type=int,
help="Batch size for model training")
parser.add_argument("--n_exploration_eps", default=100000, type=int)
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--save_interval", default=1000, type=int)
parser.add_argument("--hidden_dim", default=128, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--tau", default=0.001, type=float)
parser.add_argument("--gamma", default=0.99, type=float)
parser.add_argument("--discrete_action",
default='True')
config = parser.parse_args()
run(config)
| maddpg.prep_training(device='cpu') | conditional_block |
main_tc.py | import argparse
import torch
import os
import numpy as np
from gym.spaces import Box, Discrete
from pathlib import Path
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from utils.make_env import make_env
from utils.tc_replay_buffer import ReplayBuffer
from utils.env_wrappers import SubprocVecEnv, DummyVecEnv
from algorithms.maddpg_c_ctc import MADDPG
from random import seed
USE_CUDA = True
def make_parallel_env(env_id, n_rollout_threads, seed, discrete_action):
def get_env_fn(rank):
def init_env():
env = make_env(env_id, discrete_action=discrete_action)
env.seed(seed + rank * 1000)
np.random.seed(seed + rank * 1000)
return env
return init_env
if n_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)])
def run(config):
torch.manual_seed(config.seed)
np.random.seed(config.seed)
seed(config.seed)
if USE_CUDA:
torch.cuda.set_device(0)
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
all_rewards = []
all_penalties_1 = []
all_penalties_2 = []
b_t = 0.00001 | lb_t_1 = torch.from_numpy(np.random.rand(1)).float()
lb_t_2 = torch.from_numpy(np.random.rand(1)).float()
model_dir = Path('./models') / config.env_id / config.model_name
if not model_dir.exists():
curr_run = 'run1'
else:
exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in
model_dir.iterdir() if
str(folder.name).startswith('run')]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
run_dir = model_dir / curr_run
log_dir = str(run_dir / 'logs')
os.makedirs(log_dir)
logger = SummaryWriter(str(log_dir))
env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed,
config.discrete_action)
maddpg = MADDPG.init_from_env(env, nagents=config.n_agents,
tau=config.tau,
lr=config.lr,
hidden_dim=config.hidden_dim, gamma=config.gamma)
replay_buffer = ReplayBuffer(config.buffer_length, maddpg.nagents,
[obsp.shape[0] for obsp in env.observation_space],
[acsp.shape[0] if isinstance(acsp, Box) else acsp.n
for acsp in env.action_space])
t = 0
for ep_i in range(0, config.n_episodes, config.n_rollout_threads):
print("Episodes %i-%i of %i" % (ep_i + 1,
ep_i + 1 + config.n_rollout_threads,
config.n_episodes))
obs = env.reset()
maddpg.prep_rollouts(device='cpu')
episode_rewards = []
episode_coll_penalties = []
episode_dep_penalties = []
explr_pct_remaining = max(0, config.n_exploration_eps - ep_i) / config.n_exploration_eps
maddpg.scale_noise(
config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
maddpg.reset_noise()
for et_i in range(config.episode_length):
# rearrange observations to be per agent, and convert to torch Variable
torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),
requires_grad=False)
for i in range(maddpg.nagents)]
# get actions as torch Variables
torch_agent_actions = maddpg.step(torch_obs, explore=True)
# convert actions to numpy arrays
agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
# rearrange actions to be per environment
actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
next_obs, rewards, dones, infos = env.step(actions)
ls_rollout_rewards = []
ls_rollout_collectors_penalty = []
ls_rollout_depositors_penalty = []
collectors_penalty = []
depositors_penalty = []
lagrangian_rews = []
for reward in rewards:
all_agts_rews_pens = reward.squeeze(1)
agt_rews_pens = all_agts_rews_pens[0] # rewards and penalties are shared among agents
rew = agt_rews_pens[0]
coll_pen = agt_rews_pens[1]
dep_pen = agt_rews_pens[2]
lagrangian_rew = rew + ((lb_t_1 * coll_pen) + (lb_t_2 * dep_pen))
# collect separately for plotting
ls_rollout_rewards.append(rew)
ls_rollout_collectors_penalty.append(coll_pen)
ls_rollout_depositors_penalty.append(dep_pen)
# collect for experience replay
collectors_penalty.append(np.asarray([coll_pen] * config.n_agents))
depositors_penalty.append(np.asarray([dep_pen] * config.n_agents))
lagrangian_rews.append(np.asarray([lagrangian_rew] * config.n_agents))
episode_rewards.append(np.mean(ls_rollout_rewards))
episode_coll_penalties.append(np.mean(ls_rollout_collectors_penalty))
episode_dep_penalties.append(np.mean(ls_rollout_depositors_penalty))
lagrangian_rews = np.asarray(lagrangian_rews)
collectors_penalty = np.asarray(collectors_penalty)
depositors_penalty = np.asarray(depositors_penalty)
replay_buffer.push(obs, agent_actions, lagrangian_rews, collectors_penalty, depositors_penalty, next_obs,
dones)
obs = next_obs
t += config.n_rollout_threads
lb_t_ls_1 = []
lb_t_ls_2 = []
lb_t_ls_1_up = []
lb_t_ls_2_up = []
if (len(replay_buffer) >= config.batch_size and
(t % config.steps_per_update) < config.n_rollout_threads):
if USE_CUDA:
maddpg.prep_training(device='gpu')
else:
maddpg.prep_training(device='cpu')
for u_i in range(config.num_updates):
for a_i in range(maddpg.nagents):
sample = replay_buffer.sample(config.batch_size,
to_gpu=USE_CUDA)
penalty_helper_1, penalty_helper_2 = maddpg.update(sample, a_i, logger=logger)
lb_t_1_ = torch.max(torch.tensor(0.0),
(lb_t_1 + ((penalty_helper_1.mean() - alpha_1).float() * b_t)))
lb_t_2_ = torch.max(torch.tensor(0.0),
(lb_t_2 + ((penalty_helper_2.mean() - alpha_2).float() * b_t)))
lb_t_ls_1.append(lb_t_1_)
lb_t_ls_2.append(lb_t_2_)
lb_t_ls_1_up.append(torch.from_numpy(np.asarray(lb_t_ls_1)).mean())
lb_t_ls_2_up.append(torch.from_numpy(np.asarray(lb_t_ls_2)).mean())
maddpg.update_all_targets()
maddpg.prep_rollouts(device='cpu')
lb_t_1 = torch.from_numpy(np.asarray(lb_t_ls_1_up)).mean()
lb_t_2 = torch.from_numpy(np.asarray(lb_t_ls_2_up)).mean()
all_rewards.append(np.sum(episode_rewards))
all_penalties_1.append(np.sum(episode_coll_penalties))
all_penalties_2.append(np.sum(episode_dep_penalties))
log_rew = np.mean(all_rewards[-1024:])
log_penalty1 = np.mean(all_penalties_1[-1024:])
log_penalty2 = np.mean(all_penalties_2[-1024:])
logger.add_scalar("Mean cost over latest 1024 epi/Training:-", log_rew, ep_i)
logger.add_scalar("Mean penalty_1 over latest 1024 epi/Training:-", log_penalty1, ep_i)
logger.add_scalar("Mean penalty_2 over latest 1024 epi/Training:-", log_penalty2, ep_i)
#logger.add_scalar('lbt1', lb_t_1, ep_i)
#logger.add_scalar('lbt2', lb_t_2, ep_i)
if ep_i % config.save_interval < config.n_rollout_threads:
maddpg.prep_rollouts(device='cpu')
os.makedirs(str(run_dir / 'incremental'), exist_ok=True)
maddpg.save(str(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1))))
maddpg.save(str(run_dir / 'model.pt'))
maddpg.save(str(run_dir / 'model.pt'))
env.close()
logger.export_scalars_to_json(str(log_dir / 'summary.json'))
logger.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--env_id", default="fullobs_collect_treasure", help="Name of environment")
parser.add_argument("--model_name", default="Treasure Collection",
help="Name of directory to store " +
"model/training contents")
parser.add_argument("--seed",
default=0, type=int,
help="Random seed")
parser.add_argument("--n_rollout_threads", default=12, type=int)
parser.add_argument("--n_training_threads", default=6, type=int)
parser.add_argument("--buffer_length", default=int(1e6), type=int)
parser.add_argument("--n_episodes", default=100000, type=int)
parser.add_argument("--n_agents", default=8, type=int)
parser.add_argument("--episode_length", default=100, type=int)
parser.add_argument("--steps_per_update", default=100, type=int)
parser.add_argument("--num_updates", default=4, type=int,
help="Number of updates per update cycle")
parser.add_argument("--batch_size",
default=1024, type=int,
help="Batch size for model training")
parser.add_argument("--n_exploration_eps", default=100000, type=int)
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--save_interval", default=1000, type=int)
parser.add_argument("--hidden_dim", default=128, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--tau", default=0.001, type=float)
parser.add_argument("--gamma", default=0.99, type=float)
parser.add_argument("--discrete_action",
default='True')
config = parser.parse_args()
run(config) | alpha_1 = 12
alpha_2 = 0.2
| random_line_split |
WDCNN-DANN(p).py | # -*- coding: utf-8 -*-
# @Time : 2022-03-09 21:51
# @Author : 袁肖瀚
# @FileName: WDCNN-DANN.py
# @Software: PyCharm
import torch
import numpy as np
import torch.nn as nn
import argparse
from model import WDCNN1
from torch.nn.init import xavier_uniform_
import torch.utils.data as Data
import matplotlib.pylab as plt
import wandb
import os
from matplotlib.ticker import FuncFormatter
#定义wandb参数
hyperparameter_defaults = dict(
epochs=70,
batch_train=40,
batch_val=50,
batch_test=40,
lr=0.0002,
weight_decay=0.0005,
r=0.02
)
wandb.init(config=hyperparameter_defaults, project="WDCNN-DANN")
config = wandb.config
plt.rcParams['font.family'] = ['Times New Roman']
def to_percent(temp, position):
return '%1.0f' % (temp) + '%'
# model initialization 参数初始化
def weight_init(m):
class_name = m.__class__.__name__ #得到网络层的名字
if class_name.find('Conv') != -1: # 使用了find函数,如果不存在返回值为-1,所以让其不等于-1
xavier_uniform_(m.weight.data)
if class_name.find('Linear') != -1:
xavier_uniform_(m.weight.data)
def batch_norm_init(m):
class_name = m.__class__.__name__
if class_name.find('BatchNorm') != -1:
m.reset_running_stats()
# split train and split data
def data_split_train(data_set, label_set):
data_set_train = []
data_set_val = [] | train = []
label_set_val = []
for i in range(data_set.shape[0]): #行数 shape[2]通道数
index = np.arange(data_set.shape[1]) #列数矩阵[0 1 2 ''']
np.random.shuffle(index) #随机打乱数据 每次shuffle后数据都被打乱,这个方法可以在机器学习训练的时候在每个epoch结束后将数据重新洗牌进入下一个epoch的学习
a = index[:int((data_set.shape[1]) * 0.8)]
data = data_set[i] #第i行
data_train = data[a]
data_val = np.delete(data, a, 0)
data_set_train.append(data_train)
data_set_val.append(data_val)
label_set_train.extend(label_set[i][:len(data_train)])
label_set_val.extend(label_set[i][:len(data_val)])
data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])
data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])
label_set_train = np.array(label_set_train)
label_set_val = np.array(label_set_val)
return data_set_train, data_set_val, label_set_train, label_set_val
# training process
def train(train_dataset, val_dataset_s, val_dataset_t,train_dataset_t):
global alpha
#torch.cuda.empty_cache()
length = len(train_dataset.tensors[0])
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
train_dataloader = Data.DataLoader(train_dataset, batch_size=config.batch_train, shuffle=True)
val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.batch_val, shuffle=False)
val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.batch_val, shuffle=False)
t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.batch_train), shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
# t_loader_iter = iter(t_loader)
val_loss_s = []
val_loss_t = []
val_acc_s = []
val_acc_t = []
cross_loss = [] #暂时不知道作用
Source_Train_Acc=[]
for epoch in range(config.epochs):
# t_loader = Data.DataLoader(train_dataset_t, batch_size=int(args.batch_train),shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
t_loader_iter = iter(t_loader)
model.train()
for index, (s_data_train, s_label_train) in enumerate(train_dataloader):
p = float(index) / 20
alpha = 2. / (1. + np.exp(-10 * p)) - 1
t_data_train = t_loader_iter.next()
s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)
t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)
s_label_train = s_label_train.long().to(device)
s_domain_label = torch.zeros(config.batch_train).long().cuda()
t_domain_label = torch.ones(config.batch_train).long().cuda()
s_out_train, s_domain_out = model(s_data_train, alpha)
t_out_train, t_domain_out = model(t_data_train, alpha)
loss_domain_s = criterion(s_domain_out, s_domain_label) #源域域分类损失
loss_domain_t = criterion(t_domain_out, t_domain_label) #目标域域分类损失
loss_c = criterion(s_out_train, s_label_train) #分类器损失
loss = loss_c + (loss_domain_s + loss_domain_t)*0.02
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred_s = torch.argmax(s_out_train.data, 1) # 返回指定维度最大值的序号 dim=1
correct_s = pred_s.eq(s_label_train).cpu().sum() #源域正确率
acc = 100. * correct_s.item() / len(s_data_train)
Source_Train_Acc.append(acc)
wandb.log({"Source Train Acc": acc})
if index % 2 == 0:
print('Train Epoch: {}/{} [{}/{} ({:.0f}%)] \t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'.format
(epoch, config.epochs, (index + 1) * len(s_data_train), length,
100. * (config.batch_train * (index + 1) / length), loss_c.item(),
loss_domain_s.item() + loss_domain_t.item()
, acc))
#validation
model.eval()
#源域验证
correct_val_s = 0
sum_loss_s = 0
length_val_s = len(val_dataset_s)
for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):
with torch.no_grad():
s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)
s_label_val = s_label_val.long().to(device)
output_val_s, _ = model(s_data_val, alpha)
loss_s = criterion(output_val_s, s_label_val)
pred_val_s = torch.argmax(output_val_s.data, 1)
correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()
sum_loss_s += loss_s
acc_s = 100. * correct_val_s.item() / length_val_s #源域正确率
average_loss_s = sum_loss_s.item() / length_val_s #源域损失
#目标域验证
correct_val_t = 0
sum_loss_t = 0
length_val_t = len(val_dataset_t)
for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):
with torch.no_grad():
t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)
t_label_val = t_label_val.long().to(device)
output_val_t, _ = model(t_data_val, alpha)
loss_t = criterion(output_val_t, t_label_val)
pred_val_t = torch.argmax(output_val_t.data, 1)
correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()
sum_loss_t += loss_t
acc_t = 100. * correct_val_t.item() / length_val_t #目标域正确率
average_loss_t = sum_loss_t.item() / length_val_t #目标域损失
metrics = {"Acc_val_t": acc_t, 'epoch':epoch}
wandb.log(metrics)
print('\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%'.format(
epoch, config.epochs, average_loss_s, acc_s,average_loss_t, acc_t))
val_loss_s.append(loss_s.item())
val_loss_t.append(loss_t.item())
val_acc_t.append(acc_t)
val_acc_s.append(acc_s)
torch.save(model.state_dict(), os.path.join(wandb.run.dir, "model.pth"))
#画出验证集正确率曲线
plt.plot(val_acc_s, 'r-',marker='s')
plt.plot(val_acc_t, 'g-',marker='*')
plt.legend(["Source domain validation accuracy", "Target domain validation accuracy"])
plt.xlabel('Epochs')
plt.ylabel('validation accuracy')
plt.title('Source doamin & Target domain Validation Accuracy Rate')
plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))
plt.savefig("Source doamin & Target domain Validation Accuracy Rate.png")
plt.show()
#画出验证集损失
plt.plot(val_loss_s, 'r-',marker='o')
plt.plot(val_loss_t, 'g-',marker='x')
plt.legend(["Source domain validation Loss", "Target domain validation Loss"])
plt.xlabel('Epochs')
plt.ylabel('val_loss')
plt.title('Source domain & Target domain Validation Loss')
plt.savefig("Source domain & Target domain Validation Loss")
plt.show()
# testing
def test(test_dataset):
model.eval()
length = len(test_dataset)
correct = 0
test_loader = Data.DataLoader(test_dataset, batch_size=config.batch_test, shuffle=False)
y_test = []
y_pred = []
for index, (data, label) in enumerate(test_loader):
with torch.no_grad():
data = data.float().to(device)
label = label.long().to(device)
y_test.append(label)
output, _ = model(data.unsqueeze(dim=1), alpha)
pred = torch.argmax(output.data, 1)
y_pred.append(pred)
correct += pred.eq(label).cpu().sum()
acc = 100. * correct / length
return acc
if __name__ == '__main__':
torch.cuda.empty_cache()
# use cpu or gpu
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
device = torch.device(device)
# CWRU
dataset_s_train = np.load(r'bearing numpy data\dataset_train_0HP_100.npz')
dataset_s_test = np.load(r'bearing numpy data\dataset_val_0HP_80.npz')
dataset_t_train = np.load(r'bearing numpy data\dataset_train_3HP_100.npz')
dataset_t_test = np.load(r'bearing numpy data\dataset_val_3HP_80.npz')
data_s_train_val = dataset_s_train['data']
data_s_test = dataset_s_test['data'].reshape(-1, 1024)
data_t_train_val = dataset_t_train['data']
data_t_test = dataset_t_test['data'].reshape(-1, 1024)
label_s_train_val = dataset_s_train['label']
label_s_test = dataset_s_test['label'].reshape(1, -1)
label_t_train_val = dataset_t_train['label']
label_t_test = dataset_t_test['label'].reshape(1, -1)
iteration_acc = []
test_acc_s = []
# repeat several times for an average result
for iteration in range(1):
# load model
model = WDCNN1(C_in=1, class_num=10).to(device)
model.apply(weight_init)
model.apply(batch_norm_init)
# train/val
data_s_train, data_s_val, label_s_train, label_s_val = data_split_train(data_s_train_val, label_s_train_val)
data_t_train, data_t_val, _, label_t_val = data_split_train(data_t_train_val, label_t_train_val)
# transfer ndarray to tensor
data_s_train = torch.from_numpy(data_s_train)
data_s_val = torch.from_numpy(data_s_val)
data_t_val = torch.from_numpy(data_t_val) #加的验证
data_s_test = torch.from_numpy(data_s_test)
data_t_train = torch.from_numpy(data_t_train)
data_t_test = torch.from_numpy(data_t_test)
label_s_train = torch.from_numpy(label_s_train)
label_s_val = torch.from_numpy(label_s_val)
label_t_val = torch.from_numpy(label_t_val) #加的验证
label_s_test = torch.from_numpy(label_s_test)
#label_t_train = torch.from_numpy(label_t_train)
label_t_test = torch.from_numpy(label_t_test)
# seal to data-set
train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)
train_dataset_t = Data.TensorDataset(data_t_train)
val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)
val_dataset_t = Data.TensorDataset(data_t_val, label_t_val) #加的验证
test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze())
test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze())
# print(train_dataset_s, val_dataset_s)
criterion = nn.NLLLoss()
train(train_dataset_s, val_dataset_s, val_dataset_t,train_dataset_t)
s_test_acc = test(test_dataset_s)
t_test_acc = test(test_dataset_t)
print('\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(s_test_acc, t_test_acc))
wandb.finish()
|
label_set_ | identifier_name |
WDCNN-DANN(p).py | # -*- coding: utf-8 -*-
# @Time : 2022-03-09 21:51
# @Author : 袁肖瀚
# @FileName: WDCNN-DANN.py
# @Software: PyCharm
import torch
import numpy as np
import torch.nn as nn
import argparse
from model import WDCNN1
from torch.nn.init import xavier_uniform_
import torch.utils.data as Data
import matplotlib.pylab as plt
import wandb
import os
from matplotlib.ticker import FuncFormatter
#定义wandb参数
hyperparameter_defaults = dict(
epochs=70,
batch_train=40,
batch_val=50,
batch_test=40,
lr=0.0002,
weight_decay=0.0005,
r=0.02
)
wandb.init(config=hyperparameter_defaults, project="WDCNN-DANN")
config = wandb.config
plt.rcParams['font.family'] = ['Times New Roman']
def to_percent(temp, position):
return '%1.0f' % (temp) + '%'
# model initialization 参数初始化
def weight_init(m):
class_name = m.__class__.__name__ #得到网络层的名字
if class_name.find('Conv') != -1: # 使用了find函数,如果不存在返回值为-1,所以让其不等于-1
xavier_uniform_(m.weight.data)
if class_name.find('Linear') != -1:
xavier_uniform_(m.weight.data)
def batch_norm_init(m):
class_name = m.__class__.__name__
if class_name.find('BatchNorm') != -1:
m.reset_running_stats()
# split train and split data
def data_split_train(data_set, label_set):
data_set_train = []
data_set_val = []
label_set_train = []
label_set_val = []
for i in range(data_set.shape[0]): #行数 shape[2]通道数
index = np.arange(data_set.shape[1]) #列数矩阵[0 1 2 ''']
np.random.shuffle(index) #随机打乱数据 每次shuffle后数据都被打乱,这个方法可以在机器学习训练的时候在每个epoch结束后将数据重新洗牌进入下一个epoch的学习
a = index[:int((data_set.shape[1]) * 0.8)]
data = data_set[i] #第i行
data_train = data[a]
data_val = np.delete(data, a, 0)
data_set_train.append(data_train)
data_set_val.append(data_val)
label_set_train.extend(label_set[i][:len(data_train)])
label_set_val.extend(label_set[i][:len(data_val)])
data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])
data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])
label_set_train = np.array(label_set_train)
label_set_val = np.array(label_set_val)
return data_set_train, data_set_val, label_set_train, label_set_val
# training process
def train(train_dataset, val_dataset_s, val_dataset_t,train_dataset_t):
global alpha
#torch.cuda.empty_cache()
length = len(train_dataset.tensors[0])
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
train_dataloader = Data.DataLoader(train_dataset, batch_size=config.batch_train, shuffle=True)
val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.batch_val, shuffle=False)
val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.batch_val, shuffle=False)
t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.batch_train), shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
# t_loader_iter = iter(t_loader)
val_loss_s = []
val_loss_t = []
val_acc_s = []
val_acc_t = []
cross_loss = [] #暂时不知道作用
Source_Train_Acc=[]
for epoch in range(config.epochs):
# t_loader = Data.DataLoader(train_dataset_t, batch_size=int(args.batch_train),shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
t_loader_iter = iter(t_loader)
model.train()
for index, (s_data_train, s_label_train) in enumerate(train_dataloader):
p = float(index) / 20
alpha = 2. / (1. + np.exp(-10 * p)) - 1
t_data_train = t_loader_iter.next()
s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)
t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)
s_label_train = s_label_train.long().to(device)
s_domain_label = torch.zeros(config.batch_train).long().cuda()
t_domain_label = torch.ones(config.batch_train).long().cuda()
s_out_train, s_domain_out = model(s_data_train, alpha)
t_out_train, t_domain_out = model(t_data_train, alpha)
loss_domain_s = criterion(s_domain_out, s_domain_label) #源域域分类损失
loss_domain_t = criterion(t_domain_out, t_domain_label) #目标域域分类损失
loss_c = criterion(s_out_train, s_label_train) #分类器损失
loss = loss_c + (loss_domain_s + loss_domain_t)*0.02
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred_s = torch.argmax(s_out_train.data, 1) # 返回指定维度最大值的序号 dim=1
correct_s = pred_s.eq(s_label_train).cpu().sum() #源域正确率
acc = 100. * correct_s.item() / len(s_data_train)
Source_Train_Acc.append(acc)
wandb.log({"Source Train Acc": acc})
if index % 2 == 0:
print('Train Epoch: {}/{} [{}/{} ({:.0f}%)] \t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'.format
(epoch, config.epochs, (index + 1) * len(s_data_train), length,
100. * (config.batch_train * (index + 1) / length), loss_c.item(),
loss_domain_s.item() + loss_domain_t.item()
, acc))
#validation
model.eval()
#源域验证
correct_val_s = 0
sum_loss_s = 0
length_val_s = len(val_dataset_s)
for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):
with torch.no_grad():
s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)
s_label_val = s_label_val.long().to(device)
output_val_s, _ = model(s_data_val, alpha)
loss_s = criterion(output_val_s, s_label_val)
pred_val_s = torch.argmax(output_val_s.data, 1)
correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()
sum_loss_s += loss_s
acc_s = 100. * correct_val_s.item() / length_val_s #源域正确率
average_loss_s = sum_loss_s.item() / length_val_s #源域损失
#目标域验证
correct_val_t = 0
sum_loss_t = 0
length_val_t = len(val_dataset_t)
for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):
with torch.no_grad():
t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)
t_label_val = t_label_val.long().to(device)
output_val_t, _ = model(t_data_val, alpha)
loss_t = criterion(output_val_t, t_label_val)
pred_val_t = torch.argmax(output_val_t.data, 1)
correct_val_t += pred_val_t.eq(t_label_val).c | s, acc_s,average_loss_t, acc_t))
val_loss_s.append(loss_s.item())
val_loss_t.append(loss_t.item())
val_acc_t.append(acc_t)
val_acc_s.append(acc_s)
torch.save(model.state_dict(), os.path.join(wandb.run.dir, "model.pth"))
#画出验证集正确率曲线
plt.plot(val_acc_s, 'r-',marker='s')
plt.plot(val_acc_t, 'g-',marker='*')
plt.legend(["Source domain validation accuracy", "Target domain validation accuracy"])
plt.xlabel('Epochs')
plt.ylabel('validation accuracy')
plt.title('Source doamin & Target domain Validation Accuracy Rate')
plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))
plt.savefig("Source doamin & Target domain Validation Accuracy Rate.png")
plt.show()
#画出验证集损失
plt.plot(val_loss_s, 'r-',marker='o')
plt.plot(val_loss_t, 'g-',marker='x')
plt.legend(["Source domain validation Loss", "Target domain validation Loss"])
plt.xlabel('Epochs')
plt.ylabel('val_loss')
plt.title('Source domain & Target domain Validation Loss')
plt.savefig("Source domain & Target domain Validation Loss")
plt.show()
# testing
def test(test_dataset):
model.eval()
length = len(test_dataset)
correct = 0
test_loader = Data.DataLoader(test_dataset, batch_size=config.batch_test, shuffle=False)
y_test = []
y_pred = []
for index, (data, label) in enumerate(test_loader):
with torch.no_grad():
data = data.float().to(device)
label = label.long().to(device)
y_test.append(label)
output, _ = model(data.unsqueeze(dim=1), alpha)
pred = torch.argmax(output.data, 1)
y_pred.append(pred)
correct += pred.eq(label).cpu().sum()
acc = 100. * correct / length
return acc
if __name__ == '__main__':
torch.cuda.empty_cache()
# use cpu or gpu
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
device = torch.device(device)
# CWRU
dataset_s_train = np.load(r'bearing numpy data\dataset_train_0HP_100.npz')
dataset_s_test = np.load(r'bearing numpy data\dataset_val_0HP_80.npz')
dataset_t_train = np.load(r'bearing numpy data\dataset_train_3HP_100.npz')
dataset_t_test = np.load(r'bearing numpy data\dataset_val_3HP_80.npz')
data_s_train_val = dataset_s_train['data']
data_s_test = dataset_s_test['data'].reshape(-1, 1024)
data_t_train_val = dataset_t_train['data']
data_t_test = dataset_t_test['data'].reshape(-1, 1024)
label_s_train_val = dataset_s_train['label']
label_s_test = dataset_s_test['label'].reshape(1, -1)
label_t_train_val = dataset_t_train['label']
label_t_test = dataset_t_test['label'].reshape(1, -1)
iteration_acc = []
test_acc_s = []
# repeat several times for an average result
for iteration in range(1):
# load model
model = WDCNN1(C_in=1, class_num=10).to(device)
model.apply(weight_init)
model.apply(batch_norm_init)
# train/val
data_s_train, data_s_val, label_s_train, label_s_val = data_split_train(data_s_train_val, label_s_train_val)
data_t_train, data_t_val, _, label_t_val = data_split_train(data_t_train_val, label_t_train_val)
# transfer ndarray to tensor
data_s_train = torch.from_numpy(data_s_train)
data_s_val = torch.from_numpy(data_s_val)
data_t_val = torch.from_numpy(data_t_val) #加的验证
data_s_test = torch.from_numpy(data_s_test)
data_t_train = torch.from_numpy(data_t_train)
data_t_test = torch.from_numpy(data_t_test)
label_s_train = torch.from_numpy(label_s_train)
label_s_val = torch.from_numpy(label_s_val)
label_t_val = torch.from_numpy(label_t_val) #加的验证
label_s_test = torch.from_numpy(label_s_test)
#label_t_train = torch.from_numpy(label_t_train)
label_t_test = torch.from_numpy(label_t_test)
# seal to data-set
train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)
train_dataset_t = Data.TensorDataset(data_t_train)
val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)
val_dataset_t = Data.TensorDataset(data_t_val, label_t_val) #加的验证
test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze())
test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze())
# print(train_dataset_s, val_dataset_s)
criterion = nn.NLLLoss()
train(train_dataset_s, val_dataset_s, val_dataset_t,train_dataset_t)
s_test_acc = test(test_dataset_s)
t_test_acc = test(test_dataset_t)
print('\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(s_test_acc, t_test_acc))
wandb.finish()
| pu().sum()
sum_loss_t += loss_t
acc_t = 100. * correct_val_t.item() / length_val_t #目标域正确率
average_loss_t = sum_loss_t.item() / length_val_t #目标域损失
metrics = {"Acc_val_t": acc_t, 'epoch':epoch}
wandb.log(metrics)
print('\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%'.format(
epoch, config.epochs, average_loss_ | conditional_block |
WDCNN-DANN(p).py | # -*- coding: utf-8 -*-
# @Time : 2022-03-09 21:51
# @Author : 袁肖瀚
# @FileName: WDCNN-DANN.py
# @Software: PyCharm
import torch
import numpy as np
import torch.nn as nn
import argparse
from model import WDCNN1
from torch.nn.init import xavier_uniform_
import torch.utils.data as Data
import matplotlib.pylab as plt
import wandb
import os
from matplotlib.ticker import FuncFormatter
#定义wandb参数
hyperparameter_defaults = dict(
epochs=70,
batch_train=40,
batch_val=50,
batch_test=40,
lr=0.0002,
weight_decay=0.0005,
r=0.02
)
wandb.init(config=hyperparameter_defaults, project="WDCNN-DANN")
config = wandb.config
plt.rcParams['font.family'] = ['Times New Roman']
def to_percent(temp, position):
return '%1.0f' % (temp) + '%'
# model initialization 参数初始化
def weight_init(m):
class_name = m.__class__.__name__ #得到网络层的名字
if class_name.find('Conv') != -1: # 使用了find函数,如果不存在返回值为-1,所以让其不等于-1
xavier_uniform_(m.weight.data)
if class_name.find('Linear') != -1:
xavier_uniform_(m.weight.data)
def batch_norm_init(m):
class_name = m.__class__.__name__
if class_name.find('BatchNorm') != -1:
| ta_set_train = []
data_set_val = []
label_set_train = []
label_set_val = []
for i in range(data_set.shape[0]): #行数 shape[2]通道数
index = np.arange(data_set.shape[1]) #列数矩阵[0 1 2 ''']
np.random.shuffle(index) #随机打乱数据 每次shuffle后数据都被打乱,这个方法可以在机器学习训练的时候在每个epoch结束后将数据重新洗牌进入下一个epoch的学习
a = index[:int((data_set.shape[1]) * 0.8)]
data = data_set[i] #第i行
data_train = data[a]
data_val = np.delete(data, a, 0)
data_set_train.append(data_train)
data_set_val.append(data_val)
label_set_train.extend(label_set[i][:len(data_train)])
label_set_val.extend(label_set[i][:len(data_val)])
data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])
data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])
label_set_train = np.array(label_set_train)
label_set_val = np.array(label_set_val)
return data_set_train, data_set_val, label_set_train, label_set_val
# training process
def train(train_dataset, val_dataset_s, val_dataset_t,train_dataset_t):
global alpha
#torch.cuda.empty_cache()
length = len(train_dataset.tensors[0])
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
train_dataloader = Data.DataLoader(train_dataset, batch_size=config.batch_train, shuffle=True)
val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.batch_val, shuffle=False)
val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.batch_val, shuffle=False)
t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.batch_train), shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
# t_loader_iter = iter(t_loader)
val_loss_s = []
val_loss_t = []
val_acc_s = []
val_acc_t = []
cross_loss = [] #暂时不知道作用
Source_Train_Acc=[]
for epoch in range(config.epochs):
# t_loader = Data.DataLoader(train_dataset_t, batch_size=int(args.batch_train),shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
t_loader_iter = iter(t_loader)
model.train()
for index, (s_data_train, s_label_train) in enumerate(train_dataloader):
p = float(index) / 20
alpha = 2. / (1. + np.exp(-10 * p)) - 1
t_data_train = t_loader_iter.next()
s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)
t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)
s_label_train = s_label_train.long().to(device)
s_domain_label = torch.zeros(config.batch_train).long().cuda()
t_domain_label = torch.ones(config.batch_train).long().cuda()
s_out_train, s_domain_out = model(s_data_train, alpha)
t_out_train, t_domain_out = model(t_data_train, alpha)
loss_domain_s = criterion(s_domain_out, s_domain_label) #源域域分类损失
loss_domain_t = criterion(t_domain_out, t_domain_label) #目标域域分类损失
loss_c = criterion(s_out_train, s_label_train) #分类器损失
loss = loss_c + (loss_domain_s + loss_domain_t)*0.02
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred_s = torch.argmax(s_out_train.data, 1) # 返回指定维度最大值的序号 dim=1
correct_s = pred_s.eq(s_label_train).cpu().sum() #源域正确率
acc = 100. * correct_s.item() / len(s_data_train)
Source_Train_Acc.append(acc)
wandb.log({"Source Train Acc": acc})
if index % 2 == 0:
print('Train Epoch: {}/{} [{}/{} ({:.0f}%)] \t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'.format
(epoch, config.epochs, (index + 1) * len(s_data_train), length,
100. * (config.batch_train * (index + 1) / length), loss_c.item(),
loss_domain_s.item() + loss_domain_t.item()
, acc))
#validation
model.eval()
#源域验证
correct_val_s = 0
sum_loss_s = 0
length_val_s = len(val_dataset_s)
for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):
with torch.no_grad():
s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)
s_label_val = s_label_val.long().to(device)
output_val_s, _ = model(s_data_val, alpha)
loss_s = criterion(output_val_s, s_label_val)
pred_val_s = torch.argmax(output_val_s.data, 1)
correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()
sum_loss_s += loss_s
acc_s = 100. * correct_val_s.item() / length_val_s #源域正确率
average_loss_s = sum_loss_s.item() / length_val_s #源域损失
#目标域验证
correct_val_t = 0
sum_loss_t = 0
length_val_t = len(val_dataset_t)
for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):
with torch.no_grad():
t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)
t_label_val = t_label_val.long().to(device)
output_val_t, _ = model(t_data_val, alpha)
loss_t = criterion(output_val_t, t_label_val)
pred_val_t = torch.argmax(output_val_t.data, 1)
correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()
sum_loss_t += loss_t
acc_t = 100. * correct_val_t.item() / length_val_t #目标域正确率
average_loss_t = sum_loss_t.item() / length_val_t #目标域损失
metrics = {"Acc_val_t": acc_t, 'epoch':epoch}
wandb.log(metrics)
print('\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%'.format(
epoch, config.epochs, average_loss_s, acc_s,average_loss_t, acc_t))
val_loss_s.append(loss_s.item())
val_loss_t.append(loss_t.item())
val_acc_t.append(acc_t)
val_acc_s.append(acc_s)
torch.save(model.state_dict(), os.path.join(wandb.run.dir, "model.pth"))
#画出验证集正确率曲线
plt.plot(val_acc_s, 'r-',marker='s')
plt.plot(val_acc_t, 'g-',marker='*')
plt.legend(["Source domain validation accuracy", "Target domain validation accuracy"])
plt.xlabel('Epochs')
plt.ylabel('validation accuracy')
plt.title('Source doamin & Target domain Validation Accuracy Rate')
plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))
plt.savefig("Source doamin & Target domain Validation Accuracy Rate.png")
plt.show()
#画出验证集损失
plt.plot(val_loss_s, 'r-',marker='o')
plt.plot(val_loss_t, 'g-',marker='x')
plt.legend(["Source domain validation Loss", "Target domain validation Loss"])
plt.xlabel('Epochs')
plt.ylabel('val_loss')
plt.title('Source domain & Target domain Validation Loss')
plt.savefig("Source domain & Target domain Validation Loss")
plt.show()
# testing
def test(test_dataset):
model.eval()
length = len(test_dataset)
correct = 0
test_loader = Data.DataLoader(test_dataset, batch_size=config.batch_test, shuffle=False)
y_test = []
y_pred = []
for index, (data, label) in enumerate(test_loader):
with torch.no_grad():
data = data.float().to(device)
label = label.long().to(device)
y_test.append(label)
output, _ = model(data.unsqueeze(dim=1), alpha)
pred = torch.argmax(output.data, 1)
y_pred.append(pred)
correct += pred.eq(label).cpu().sum()
acc = 100. * correct / length
return acc
if __name__ == '__main__':
torch.cuda.empty_cache()
# use cpu or gpu
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
device = torch.device(device)
# CWRU
dataset_s_train = np.load(r'bearing numpy data\dataset_train_0HP_100.npz')
dataset_s_test = np.load(r'bearing numpy data\dataset_val_0HP_80.npz')
dataset_t_train = np.load(r'bearing numpy data\dataset_train_3HP_100.npz')
dataset_t_test = np.load(r'bearing numpy data\dataset_val_3HP_80.npz')
data_s_train_val = dataset_s_train['data']
data_s_test = dataset_s_test['data'].reshape(-1, 1024)
data_t_train_val = dataset_t_train['data']
data_t_test = dataset_t_test['data'].reshape(-1, 1024)
label_s_train_val = dataset_s_train['label']
label_s_test = dataset_s_test['label'].reshape(1, -1)
label_t_train_val = dataset_t_train['label']
label_t_test = dataset_t_test['label'].reshape(1, -1)
iteration_acc = []
test_acc_s = []
# repeat several times for an average result
for iteration in range(1):
# load model
model = WDCNN1(C_in=1, class_num=10).to(device)
model.apply(weight_init)
model.apply(batch_norm_init)
# train/val
data_s_train, data_s_val, label_s_train, label_s_val = data_split_train(data_s_train_val, label_s_train_val)
data_t_train, data_t_val, _, label_t_val = data_split_train(data_t_train_val, label_t_train_val)
# transfer ndarray to tensor
data_s_train = torch.from_numpy(data_s_train)
data_s_val = torch.from_numpy(data_s_val)
data_t_val = torch.from_numpy(data_t_val) #加的验证
data_s_test = torch.from_numpy(data_s_test)
data_t_train = torch.from_numpy(data_t_train)
data_t_test = torch.from_numpy(data_t_test)
label_s_train = torch.from_numpy(label_s_train)
label_s_val = torch.from_numpy(label_s_val)
label_t_val = torch.from_numpy(label_t_val) #加的验证
label_s_test = torch.from_numpy(label_s_test)
#label_t_train = torch.from_numpy(label_t_train)
label_t_test = torch.from_numpy(label_t_test)
# seal to data-set
train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)
train_dataset_t = Data.TensorDataset(data_t_train)
val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)
val_dataset_t = Data.TensorDataset(data_t_val, label_t_val) #加的验证
test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze())
test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze())
# print(train_dataset_s, val_dataset_s)
criterion = nn.NLLLoss()
train(train_dataset_s, val_dataset_s, val_dataset_t,train_dataset_t)
s_test_acc = test(test_dataset_s)
t_test_acc = test(test_dataset_t)
print('\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(s_test_acc, t_test_acc))
wandb.finish()
| m.reset_running_stats()
# split train and split data
def data_split_train(data_set, label_set):
da | identifier_body |
WDCNN-DANN(p).py | # -*- coding: utf-8 -*-
# @Time : 2022-03-09 21:51
# @Author : 袁肖瀚
# @FileName: WDCNN-DANN.py
# @Software: PyCharm
import torch
import numpy as np
import torch.nn as nn
import argparse
from model import WDCNN1
from torch.nn.init import xavier_uniform_
import torch.utils.data as Data
import matplotlib.pylab as plt
import wandb
import os
from matplotlib.ticker import FuncFormatter
#定义wandb参数
hyperparameter_defaults = dict(
epochs=70,
batch_train=40,
batch_val=50,
batch_test=40,
lr=0.0002,
weight_decay=0.0005,
|
plt.rcParams['font.family'] = ['Times New Roman']
def to_percent(temp, position):
return '%1.0f' % (temp) + '%'
# model initialization 参数初始化
def weight_init(m):
class_name = m.__class__.__name__ #得到网络层的名字
if class_name.find('Conv') != -1: # 使用了find函数,如果不存在返回值为-1,所以让其不等于-1
xavier_uniform_(m.weight.data)
if class_name.find('Linear') != -1:
xavier_uniform_(m.weight.data)
def batch_norm_init(m):
class_name = m.__class__.__name__
if class_name.find('BatchNorm') != -1:
m.reset_running_stats()
# split train and split data
def data_split_train(data_set, label_set):
data_set_train = []
data_set_val = []
label_set_train = []
label_set_val = []
for i in range(data_set.shape[0]): #行数 shape[2]通道数
index = np.arange(data_set.shape[1]) #列数矩阵[0 1 2 ''']
np.random.shuffle(index) #随机打乱数据 每次shuffle后数据都被打乱,这个方法可以在机器学习训练的时候在每个epoch结束后将数据重新洗牌进入下一个epoch的学习
a = index[:int((data_set.shape[1]) * 0.8)]
data = data_set[i] #第i行
data_train = data[a]
data_val = np.delete(data, a, 0)
data_set_train.append(data_train)
data_set_val.append(data_val)
label_set_train.extend(label_set[i][:len(data_train)])
label_set_val.extend(label_set[i][:len(data_val)])
data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])
data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])
label_set_train = np.array(label_set_train)
label_set_val = np.array(label_set_val)
return data_set_train, data_set_val, label_set_train, label_set_val
# training process
def train(train_dataset, val_dataset_s, val_dataset_t,train_dataset_t):
global alpha
#torch.cuda.empty_cache()
length = len(train_dataset.tensors[0])
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
train_dataloader = Data.DataLoader(train_dataset, batch_size=config.batch_train, shuffle=True)
val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.batch_val, shuffle=False)
val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.batch_val, shuffle=False)
t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.batch_train), shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
# t_loader_iter = iter(t_loader)
val_loss_s = []
val_loss_t = []
val_acc_s = []
val_acc_t = []
cross_loss = [] #暂时不知道作用
Source_Train_Acc=[]
for epoch in range(config.epochs):
# t_loader = Data.DataLoader(train_dataset_t, batch_size=int(args.batch_train),shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
t_loader_iter = iter(t_loader)
model.train()
for index, (s_data_train, s_label_train) in enumerate(train_dataloader):
p = float(index) / 20
alpha = 2. / (1. + np.exp(-10 * p)) - 1
t_data_train = t_loader_iter.next()
s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)
t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)
s_label_train = s_label_train.long().to(device)
s_domain_label = torch.zeros(config.batch_train).long().cuda()
t_domain_label = torch.ones(config.batch_train).long().cuda()
s_out_train, s_domain_out = model(s_data_train, alpha)
t_out_train, t_domain_out = model(t_data_train, alpha)
loss_domain_s = criterion(s_domain_out, s_domain_label) #源域域分类损失
loss_domain_t = criterion(t_domain_out, t_domain_label) #目标域域分类损失
loss_c = criterion(s_out_train, s_label_train) #分类器损失
loss = loss_c + (loss_domain_s + loss_domain_t)*0.02
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred_s = torch.argmax(s_out_train.data, 1) # 返回指定维度最大值的序号 dim=1
correct_s = pred_s.eq(s_label_train).cpu().sum() #源域正确率
acc = 100. * correct_s.item() / len(s_data_train)
Source_Train_Acc.append(acc)
wandb.log({"Source Train Acc": acc})
if index % 2 == 0:
print('Train Epoch: {}/{} [{}/{} ({:.0f}%)] \t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'.format
(epoch, config.epochs, (index + 1) * len(s_data_train), length,
100. * (config.batch_train * (index + 1) / length), loss_c.item(),
loss_domain_s.item() + loss_domain_t.item()
, acc))
#validation
model.eval()
#源域验证
correct_val_s = 0
sum_loss_s = 0
length_val_s = len(val_dataset_s)
for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):
with torch.no_grad():
s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)
s_label_val = s_label_val.long().to(device)
output_val_s, _ = model(s_data_val, alpha)
loss_s = criterion(output_val_s, s_label_val)
pred_val_s = torch.argmax(output_val_s.data, 1)
correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()
sum_loss_s += loss_s
acc_s = 100. * correct_val_s.item() / length_val_s #源域正确率
average_loss_s = sum_loss_s.item() / length_val_s #源域损失
#目标域验证
correct_val_t = 0
sum_loss_t = 0
length_val_t = len(val_dataset_t)
for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):
with torch.no_grad():
t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)
t_label_val = t_label_val.long().to(device)
output_val_t, _ = model(t_data_val, alpha)
loss_t = criterion(output_val_t, t_label_val)
pred_val_t = torch.argmax(output_val_t.data, 1)
correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()
sum_loss_t += loss_t
acc_t = 100. * correct_val_t.item() / length_val_t #目标域正确率
average_loss_t = sum_loss_t.item() / length_val_t #目标域损失
metrics = {"Acc_val_t": acc_t, 'epoch':epoch}
wandb.log(metrics)
print('\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%'.format(
epoch, config.epochs, average_loss_s, acc_s,average_loss_t, acc_t))
val_loss_s.append(loss_s.item())
val_loss_t.append(loss_t.item())
val_acc_t.append(acc_t)
val_acc_s.append(acc_s)
torch.save(model.state_dict(), os.path.join(wandb.run.dir, "model.pth"))
#画出验证集正确率曲线
plt.plot(val_acc_s, 'r-',marker='s')
plt.plot(val_acc_t, 'g-',marker='*')
plt.legend(["Source domain validation accuracy", "Target domain validation accuracy"])
plt.xlabel('Epochs')
plt.ylabel('validation accuracy')
plt.title('Source doamin & Target domain Validation Accuracy Rate')
plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))
plt.savefig("Source doamin & Target domain Validation Accuracy Rate.png")
plt.show()
#画出验证集损失
plt.plot(val_loss_s, 'r-',marker='o')
plt.plot(val_loss_t, 'g-',marker='x')
plt.legend(["Source domain validation Loss", "Target domain validation Loss"])
plt.xlabel('Epochs')
plt.ylabel('val_loss')
plt.title('Source domain & Target domain Validation Loss')
plt.savefig("Source domain & Target domain Validation Loss")
plt.show()
# testing
def test(test_dataset):
model.eval()
length = len(test_dataset)
correct = 0
test_loader = Data.DataLoader(test_dataset, batch_size=config.batch_test, shuffle=False)
y_test = []
y_pred = []
for index, (data, label) in enumerate(test_loader):
with torch.no_grad():
data = data.float().to(device)
label = label.long().to(device)
y_test.append(label)
output, _ = model(data.unsqueeze(dim=1), alpha)
pred = torch.argmax(output.data, 1)
y_pred.append(pred)
correct += pred.eq(label).cpu().sum()
acc = 100. * correct / length
return acc
if __name__ == '__main__':
torch.cuda.empty_cache()
# use cpu or gpu
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
device = torch.device(device)
# CWRU
dataset_s_train = np.load(r'bearing numpy data\dataset_train_0HP_100.npz')
dataset_s_test = np.load(r'bearing numpy data\dataset_val_0HP_80.npz')
dataset_t_train = np.load(r'bearing numpy data\dataset_train_3HP_100.npz')
dataset_t_test = np.load(r'bearing numpy data\dataset_val_3HP_80.npz')
data_s_train_val = dataset_s_train['data']
data_s_test = dataset_s_test['data'].reshape(-1, 1024)
data_t_train_val = dataset_t_train['data']
data_t_test = dataset_t_test['data'].reshape(-1, 1024)
label_s_train_val = dataset_s_train['label']
label_s_test = dataset_s_test['label'].reshape(1, -1)
label_t_train_val = dataset_t_train['label']
label_t_test = dataset_t_test['label'].reshape(1, -1)
iteration_acc = []
test_acc_s = []
# repeat several times for an average result
for iteration in range(1):
# load model
model = WDCNN1(C_in=1, class_num=10).to(device)
model.apply(weight_init)
model.apply(batch_norm_init)
# train/val
data_s_train, data_s_val, label_s_train, label_s_val = data_split_train(data_s_train_val, label_s_train_val)
data_t_train, data_t_val, _, label_t_val = data_split_train(data_t_train_val, label_t_train_val)
# transfer ndarray to tensor
data_s_train = torch.from_numpy(data_s_train)
data_s_val = torch.from_numpy(data_s_val)
data_t_val = torch.from_numpy(data_t_val) #加的验证
data_s_test = torch.from_numpy(data_s_test)
data_t_train = torch.from_numpy(data_t_train)
data_t_test = torch.from_numpy(data_t_test)
label_s_train = torch.from_numpy(label_s_train)
label_s_val = torch.from_numpy(label_s_val)
label_t_val = torch.from_numpy(label_t_val) #加的验证
label_s_test = torch.from_numpy(label_s_test)
#label_t_train = torch.from_numpy(label_t_train)
label_t_test = torch.from_numpy(label_t_test)
# seal to data-set
train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)
train_dataset_t = Data.TensorDataset(data_t_train)
val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)
val_dataset_t = Data.TensorDataset(data_t_val, label_t_val) #加的验证
test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze())
test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze())
# print(train_dataset_s, val_dataset_s)
criterion = nn.NLLLoss()
train(train_dataset_s, val_dataset_s, val_dataset_t,train_dataset_t)
s_test_acc = test(test_dataset_s)
t_test_acc = test(test_dataset_t)
print('\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(s_test_acc, t_test_acc))
wandb.finish() | r=0.02
)
wandb.init(config=hyperparameter_defaults, project="WDCNN-DANN")
config = wandb.config
| random_line_split |
hls_live.rs | // Copyright (C) 2022 Mathieu Duponchelle <mathieu@centricular.com>
//
// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0.
// If a copy of the MPL was not distributed with this file, You can obtain one at
// <https://mozilla.org/MPL/2.0/>.
//
// SPDX-License-Identifier: MPL-2.0
// This creates a live HLS stream with one video playlist and two video playlists.
// Basic trimming is implemented
use gst::prelude::*;
use std::collections::VecDeque;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use anyhow::Error;
use chrono::{DateTime, Duration, Utc};
use m3u8_rs::{
AlternativeMedia, AlternativeMediaType, MasterPlaylist, MediaPlaylist, MediaSegment,
VariantStream,
};
struct State {
video_streams: Vec<VideoStream>,
audio_streams: Vec<AudioStream>,
all_mimes: Vec<String>,
path: PathBuf,
wrote_manifest: bool,
}
impl State {
fn maybe_write_manifest(&mut self) {
if self.wrote_manifest {
return;
}
if self.all_mimes.len() < self.video_streams.len() + self.audio_streams.len() {
return;
}
let mut all_mimes = self.all_mimes.clone();
all_mimes.sort();
all_mimes.dedup();
let playlist = MasterPlaylist {
version: Some(7),
variants: self
.video_streams
.iter()
.map(|stream| {
let mut path = PathBuf::new();
path.push(&stream.name);
path.push("manifest.m3u8");
VariantStream {
uri: path.as_path().display().to_string(),
bandwidth: stream.bitrate,
codecs: Some(all_mimes.join(",")),
resolution: Some(m3u8_rs::Resolution {
width: stream.width,
height: stream.height,
}),
audio: Some("audio".to_string()),
..Default::default()
}
})
.collect(),
alternatives: self
.audio_streams
.iter()
.map(|stream| {
let mut path = PathBuf::new();
path.push(&stream.name);
path.push("manifest.m3u8");
AlternativeMedia {
media_type: AlternativeMediaType::Audio,
uri: Some(path.as_path().display().to_string()),
group_id: "audio".to_string(),
language: Some(stream.lang.clone()),
name: stream.name.clone(),
default: stream.default,
autoselect: stream.default,
channels: Some("2".to_string()),
..Default::default()
}
})
.collect(),
independent_segments: true,
..Default::default()
};
println!("Writing master manifest to {}", self.path.display());
let mut file = std::fs::File::create(&self.path).unwrap();
playlist
.write_to(&mut file)
.expect("Failed to write master playlist");
self.wrote_manifest = true;
}
}
struct Segment {
date_time: DateTime<Utc>,
duration: gst::ClockTime,
path: String,
}
struct UnreffedSegment {
removal_time: DateTime<Utc>,
path: String,
}
struct StreamState {
path: PathBuf,
segments: VecDeque<Segment>,
trimmed_segments: VecDeque<UnreffedSegment>,
start_date_time: Option<DateTime<Utc>>,
start_time: Option<gst::ClockTime>,
media_sequence: u64,
segment_index: u32,
}
struct VideoStream {
name: String,
bitrate: u64,
width: u64,
height: u64,
}
struct AudioStream {
name: String,
lang: String,
default: bool,
wave: String,
}
fn trim_segments(state: &mut StreamState) {
// Arbitrary 5 segments window
while state.segments.len() > 5 {
let segment = state.segments.pop_front().unwrap();
state.media_sequence += 1;
state.trimmed_segments.push_back(UnreffedSegment {
// HLS spec mandates that segments are removed from the filesystem no sooner
// than the duration of the longest playlist + duration of the segment.
// This is 15 seconds (12.5 + 2.5) in our case, we use 20 seconds to be on the
// safe side
removal_time: segment
.date_time
.checked_add_signed(Duration::seconds(20))
.unwrap(),
path: segment.path.clone(),
});
}
while let Some(segment) = state.trimmed_segments.front() {
if segment.removal_time < state.segments.front().unwrap().date_time {
let segment = state.trimmed_segments.pop_front().unwrap();
let mut path = state.path.clone();
path.push(segment.path);
println!("Removing {}", path.display());
std::fs::remove_file(path).expect("Failed to remove old segment");
} else { | fn update_manifest(state: &mut StreamState) {
// Now write the manifest
let mut path = state.path.clone();
path.push("manifest.m3u8");
println!("writing manifest to {}", path.display());
trim_segments(state);
let playlist = MediaPlaylist {
version: Some(7),
target_duration: 2.5,
media_sequence: state.media_sequence,
segments: state
.segments
.iter()
.enumerate()
.map(|(idx, segment)| MediaSegment {
uri: segment.path.to_string(),
duration: (segment.duration.nseconds() as f64
/ gst::ClockTime::SECOND.nseconds() as f64) as f32,
map: Some(m3u8_rs::Map {
uri: "init.cmfi".into(),
..Default::default()
}),
program_date_time: if idx == 0 {
Some(segment.date_time.into())
} else {
None
},
..Default::default()
})
.collect(),
end_list: false,
playlist_type: None,
i_frames_only: false,
start: None,
independent_segments: true,
..Default::default()
};
let mut file = std::fs::File::create(path).unwrap();
playlist
.write_to(&mut file)
.expect("Failed to write media playlist");
}
fn setup_appsink(appsink: &gst_app::AppSink, name: &str, path: &Path, is_video: bool) {
let mut path: PathBuf = path.into();
path.push(name);
let state = Arc::new(Mutex::new(StreamState {
segments: VecDeque::new(),
trimmed_segments: VecDeque::new(),
path,
start_date_time: None,
start_time: gst::ClockTime::NONE,
media_sequence: 0,
segment_index: 0,
}));
appsink.set_callbacks(
gst_app::AppSinkCallbacks::builder()
.new_sample(move |sink| {
let sample = sink.pull_sample().map_err(|_| gst::FlowError::Eos)?;
let mut state = state.lock().unwrap();
// The muxer only outputs non-empty buffer lists
let mut buffer_list = sample.buffer_list_owned().expect("no buffer list");
assert!(!buffer_list.is_empty());
let mut first = buffer_list.get(0).unwrap();
// Each list contains a full segment, i.e. does not start with a DELTA_UNIT
assert!(!first.flags().contains(gst::BufferFlags::DELTA_UNIT));
// If the buffer has the DISCONT and HEADER flag set then it contains the media
// header, i.e. the `ftyp`, `moov` and other media boxes.
//
// This might be the initial header or the updated header at the end of the stream.
if first
.flags()
.contains(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER)
{
let mut path = state.path.clone();
std::fs::create_dir_all(&path).expect("failed to create directory");
path.push("init.cmfi");
println!("writing header to {}", path.display());
let map = first.map_readable().unwrap();
std::fs::write(path, &map).expect("failed to write header");
drop(map);
// Remove the header from the buffer list
buffer_list.make_mut().remove(0, 1);
// If the list is now empty then it only contained the media header and nothing
// else.
if buffer_list.is_empty() {
return Ok(gst::FlowSuccess::Ok);
}
// Otherwise get the next buffer and continue working with that.
first = buffer_list.get(0).unwrap();
}
// If the buffer only has the HEADER flag set then this is a segment header that is
// followed by one or more actual media buffers.
assert!(first.flags().contains(gst::BufferFlags::HEADER));
let mut path = state.path.clone();
let basename = format!(
"segment_{}.{}",
state.segment_index,
if is_video { "cmfv" } else { "cmfa" }
);
state.segment_index += 1;
path.push(&basename);
let segment = sample
.segment()
.expect("no segment")
.downcast_ref::<gst::ClockTime>()
.expect("no time segment");
let pts = segment
.to_running_time(first.pts().unwrap())
.expect("can't get running time");
if state.start_time.is_none() {
state.start_time = Some(pts);
}
if state.start_date_time.is_none() {
let now_utc = Utc::now();
let now_gst = sink.clock().unwrap().time().unwrap();
let pts_clock_time = pts + sink.base_time().unwrap();
let diff = now_gst.checked_sub(pts_clock_time).unwrap();
let pts_utc = now_utc
.checked_sub_signed(Duration::nanoseconds(diff.nseconds() as i64))
.unwrap();
state.start_date_time = Some(pts_utc);
}
let duration = first.duration().unwrap();
let mut file = std::fs::File::create(&path).expect("failed to open fragment");
for buffer in &*buffer_list {
use std::io::prelude::*;
let map = buffer.map_readable().unwrap();
file.write_all(&map).expect("failed to write fragment");
}
let date_time = state
.start_date_time
.unwrap()
.checked_add_signed(Duration::nanoseconds(
pts.opt_checked_sub(state.start_time)
.unwrap()
.unwrap()
.nseconds() as i64,
))
.unwrap();
println!(
"wrote segment with date time {} to {}",
date_time,
path.display()
);
state.segments.push_back(Segment {
duration,
path: basename.to_string(),
date_time,
});
update_manifest(&mut state);
Ok(gst::FlowSuccess::Ok)
})
.eos(move |_sink| {
unreachable!();
})
.build(),
);
}
fn probe_encoder(state: Arc<Mutex<State>>, enc: gst::Element) {
enc.static_pad("src").unwrap().add_probe(
gst::PadProbeType::EVENT_DOWNSTREAM,
move |_pad, info| match info.data {
Some(gst::PadProbeData::Event(ref ev)) => match ev.view() {
gst::EventView::Caps(e) => {
let mime = gst_pbutils::codec_utils_caps_get_mime_codec(e.caps());
let mut state = state.lock().unwrap();
state.all_mimes.push(mime.unwrap().into());
state.maybe_write_manifest();
gst::PadProbeReturn::Remove
}
_ => gst::PadProbeReturn::Ok,
},
_ => gst::PadProbeReturn::Ok,
},
);
}
impl VideoStream {
fn setup(
&self,
state: Arc<Mutex<State>>,
pipeline: &gst::Pipeline,
path: &Path,
) -> Result<(), Error> {
let src = gst::ElementFactory::make("videotestsrc")
.property("is-live", true)
.build()?;
let raw_capsfilter = gst::ElementFactory::make("capsfilter")
.property(
"caps",
gst_video::VideoCapsBuilder::new()
.format(gst_video::VideoFormat::I420)
.width(self.width as i32)
.height(self.height as i32)
.framerate(30.into())
.build(),
)
.build()?;
let timeoverlay = gst::ElementFactory::make("timeoverlay").build()?;
let enc = gst::ElementFactory::make("x264enc")
.property("bframes", 0u32)
.property("bitrate", self.bitrate as u32 / 1000u32)
.property_from_str("tune", "zerolatency")
.build()?;
let h264_capsfilter = gst::ElementFactory::make("capsfilter")
.property(
"caps",
gst::Caps::builder("video/x-h264")
.field("profile", "main")
.build(),
)
.build()?;
let mux = gst::ElementFactory::make("cmafmux")
.property("fragment-duration", 2500.mseconds())
.property_from_str("header-update-mode", "update")
.property("write-mehd", true)
.build()?;
let appsink = gst_app::AppSink::builder().buffer_list(true).build();
pipeline.add_many([
&src,
&raw_capsfilter,
&timeoverlay,
&enc,
&h264_capsfilter,
&mux,
appsink.upcast_ref(),
])?;
gst::Element::link_many([
&src,
&raw_capsfilter,
&timeoverlay,
&enc,
&h264_capsfilter,
&mux,
appsink.upcast_ref(),
])?;
probe_encoder(state, enc);
setup_appsink(&appsink, &self.name, path, true);
Ok(())
}
}
impl AudioStream {
fn setup(
&self,
state: Arc<Mutex<State>>,
pipeline: &gst::Pipeline,
path: &Path,
) -> Result<(), Error> {
let src = gst::ElementFactory::make("audiotestsrc")
.property("is-live", true)
.property_from_str("wave", &self.wave)
.build()?;
let enc = gst::ElementFactory::make("avenc_aac").build()?;
let mux = gst::ElementFactory::make("cmafmux")
.property_from_str("header-update-mode", "update")
.property("write-mehd", true)
.property("fragment-duration", 2500.mseconds())
.build()?;
let appsink = gst_app::AppSink::builder().buffer_list(true).build();
pipeline.add_many([&src, &enc, &mux, appsink.upcast_ref()])?;
gst::Element::link_many([&src, &enc, &mux, appsink.upcast_ref()])?;
probe_encoder(state, enc);
setup_appsink(&appsink, &self.name, path, false);
Ok(())
}
}
fn main() -> Result<(), Error> {
gst::init()?;
gstfmp4::plugin_register_static()?;
let path = PathBuf::from("hls_live_stream");
let pipeline = gst::Pipeline::default();
std::fs::create_dir_all(&path).expect("failed to create directory");
let mut manifest_path = path.clone();
manifest_path.push("manifest.m3u8");
let state = Arc::new(Mutex::new(State {
video_streams: vec![VideoStream {
name: "video_0".to_string(),
bitrate: 2_048_000,
width: 1280,
height: 720,
}],
audio_streams: vec![
AudioStream {
name: "audio_0".to_string(),
lang: "eng".to_string(),
default: true,
wave: "sine".to_string(),
},
AudioStream {
name: "audio_1".to_string(),
lang: "fre".to_string(),
default: false,
wave: "white-noise".to_string(),
},
],
all_mimes: vec![],
path: manifest_path.clone(),
wrote_manifest: false,
}));
{
let state_lock = state.lock().unwrap();
for stream in &state_lock.video_streams {
stream.setup(state.clone(), &pipeline, &path)?;
}
for stream in &state_lock.audio_streams {
stream.setup(state.clone(), &pipeline, &path)?;
}
}
pipeline.set_state(gst::State::Playing)?;
let bus = pipeline
.bus()
.expect("Pipeline without bus. Shouldn't happen!");
for msg in bus.iter_timed(gst::ClockTime::NONE) {
use gst::MessageView;
match msg.view() {
MessageView::Eos(..) => {
println!("EOS");
break;
}
MessageView::Error(err) => {
pipeline.set_state(gst::State::Null)?;
eprintln!(
"Got error from {}: {} ({})",
msg.src()
.map(|s| String::from(s.path_string()))
.unwrap_or_else(|| "None".into()),
err.error(),
err.debug().unwrap_or_else(|| "".into()),
);
break;
}
_ => (),
}
}
pipeline.set_state(gst::State::Null)?;
Ok(())
} | break;
}
}
}
| random_line_split |
hls_live.rs | // Copyright (C) 2022 Mathieu Duponchelle <mathieu@centricular.com>
//
// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0.
// If a copy of the MPL was not distributed with this file, You can obtain one at
// <https://mozilla.org/MPL/2.0/>.
//
// SPDX-License-Identifier: MPL-2.0
// This creates a live HLS stream with one video playlist and two video playlists.
// Basic trimming is implemented
use gst::prelude::*;
use std::collections::VecDeque;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use anyhow::Error;
use chrono::{DateTime, Duration, Utc};
use m3u8_rs::{
AlternativeMedia, AlternativeMediaType, MasterPlaylist, MediaPlaylist, MediaSegment,
VariantStream,
};
struct State {
video_streams: Vec<VideoStream>,
audio_streams: Vec<AudioStream>,
all_mimes: Vec<String>,
path: PathBuf,
wrote_manifest: bool,
}
impl State {
fn maybe_write_manifest(&mut self) {
if self.wrote_manifest {
return;
}
if self.all_mimes.len() < self.video_streams.len() + self.audio_streams.len() {
return;
}
let mut all_mimes = self.all_mimes.clone();
all_mimes.sort();
all_mimes.dedup();
let playlist = MasterPlaylist {
version: Some(7),
variants: self
.video_streams
.iter()
.map(|stream| {
let mut path = PathBuf::new();
path.push(&stream.name);
path.push("manifest.m3u8");
VariantStream {
uri: path.as_path().display().to_string(),
bandwidth: stream.bitrate,
codecs: Some(all_mimes.join(",")),
resolution: Some(m3u8_rs::Resolution {
width: stream.width,
height: stream.height,
}),
audio: Some("audio".to_string()),
..Default::default()
}
})
.collect(),
alternatives: self
.audio_streams
.iter()
.map(|stream| {
let mut path = PathBuf::new();
path.push(&stream.name);
path.push("manifest.m3u8");
AlternativeMedia {
media_type: AlternativeMediaType::Audio,
uri: Some(path.as_path().display().to_string()),
group_id: "audio".to_string(),
language: Some(stream.lang.clone()),
name: stream.name.clone(),
default: stream.default,
autoselect: stream.default,
channels: Some("2".to_string()),
..Default::default()
}
})
.collect(),
independent_segments: true,
..Default::default()
};
println!("Writing master manifest to {}", self.path.display());
let mut file = std::fs::File::create(&self.path).unwrap();
playlist
.write_to(&mut file)
.expect("Failed to write master playlist");
self.wrote_manifest = true;
}
}
struct Segment {
date_time: DateTime<Utc>,
duration: gst::ClockTime,
path: String,
}
struct UnreffedSegment {
removal_time: DateTime<Utc>,
path: String,
}
struct StreamState {
path: PathBuf,
segments: VecDeque<Segment>,
trimmed_segments: VecDeque<UnreffedSegment>,
start_date_time: Option<DateTime<Utc>>,
start_time: Option<gst::ClockTime>,
media_sequence: u64,
segment_index: u32,
}
struct VideoStream {
name: String,
bitrate: u64,
width: u64,
height: u64,
}
struct AudioStream {
name: String,
lang: String,
default: bool,
wave: String,
}
fn trim_segments(state: &mut StreamState) {
// Arbitrary 5 segments window
while state.segments.len() > 5 {
let segment = state.segments.pop_front().unwrap();
state.media_sequence += 1;
state.trimmed_segments.push_back(UnreffedSegment {
// HLS spec mandates that segments are removed from the filesystem no sooner
// than the duration of the longest playlist + duration of the segment.
// This is 15 seconds (12.5 + 2.5) in our case, we use 20 seconds to be on the
// safe side
removal_time: segment
.date_time
.checked_add_signed(Duration::seconds(20))
.unwrap(),
path: segment.path.clone(),
});
}
while let Some(segment) = state.trimmed_segments.front() {
if segment.removal_time < state.segments.front().unwrap().date_time {
let segment = state.trimmed_segments.pop_front().unwrap();
let mut path = state.path.clone();
path.push(segment.path);
println!("Removing {}", path.display());
std::fs::remove_file(path).expect("Failed to remove old segment");
} else {
break;
}
}
}
fn update_manifest(state: &mut StreamState) {
// Now write the manifest
let mut path = state.path.clone();
path.push("manifest.m3u8");
println!("writing manifest to {}", path.display());
trim_segments(state);
let playlist = MediaPlaylist {
version: Some(7),
target_duration: 2.5,
media_sequence: state.media_sequence,
segments: state
.segments
.iter()
.enumerate()
.map(|(idx, segment)| MediaSegment {
uri: segment.path.to_string(),
duration: (segment.duration.nseconds() as f64
/ gst::ClockTime::SECOND.nseconds() as f64) as f32,
map: Some(m3u8_rs::Map {
uri: "init.cmfi".into(),
..Default::default()
}),
program_date_time: if idx == 0 {
Some(segment.date_time.into())
} else {
None
},
..Default::default()
})
.collect(),
end_list: false,
playlist_type: None,
i_frames_only: false,
start: None,
independent_segments: true,
..Default::default()
};
let mut file = std::fs::File::create(path).unwrap();
playlist
.write_to(&mut file)
.expect("Failed to write media playlist");
}
fn setup_appsink(appsink: &gst_app::AppSink, name: &str, path: &Path, is_video: bool) {
let mut path: PathBuf = path.into();
path.push(name);
let state = Arc::new(Mutex::new(StreamState {
segments: VecDeque::new(),
trimmed_segments: VecDeque::new(),
path,
start_date_time: None,
start_time: gst::ClockTime::NONE,
media_sequence: 0,
segment_index: 0,
}));
appsink.set_callbacks(
gst_app::AppSinkCallbacks::builder()
.new_sample(move |sink| {
let sample = sink.pull_sample().map_err(|_| gst::FlowError::Eos)?;
let mut state = state.lock().unwrap();
// The muxer only outputs non-empty buffer lists
let mut buffer_list = sample.buffer_list_owned().expect("no buffer list");
assert!(!buffer_list.is_empty());
let mut first = buffer_list.get(0).unwrap();
// Each list contains a full segment, i.e. does not start with a DELTA_UNIT
assert!(!first.flags().contains(gst::BufferFlags::DELTA_UNIT));
// If the buffer has the DISCONT and HEADER flag set then it contains the media
// header, i.e. the `ftyp`, `moov` and other media boxes.
//
// This might be the initial header or the updated header at the end of the stream.
if first
.flags()
.contains(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER)
|
// If the buffer only has the HEADER flag set then this is a segment header that is
// followed by one or more actual media buffers.
assert!(first.flags().contains(gst::BufferFlags::HEADER));
let mut path = state.path.clone();
let basename = format!(
"segment_{}.{}",
state.segment_index,
if is_video { "cmfv" } else { "cmfa" }
);
state.segment_index += 1;
path.push(&basename);
let segment = sample
.segment()
.expect("no segment")
.downcast_ref::<gst::ClockTime>()
.expect("no time segment");
let pts = segment
.to_running_time(first.pts().unwrap())
.expect("can't get running time");
if state.start_time.is_none() {
state.start_time = Some(pts);
}
if state.start_date_time.is_none() {
let now_utc = Utc::now();
let now_gst = sink.clock().unwrap().time().unwrap();
let pts_clock_time = pts + sink.base_time().unwrap();
let diff = now_gst.checked_sub(pts_clock_time).unwrap();
let pts_utc = now_utc
.checked_sub_signed(Duration::nanoseconds(diff.nseconds() as i64))
.unwrap();
state.start_date_time = Some(pts_utc);
}
let duration = first.duration().unwrap();
let mut file = std::fs::File::create(&path).expect("failed to open fragment");
for buffer in &*buffer_list {
use std::io::prelude::*;
let map = buffer.map_readable().unwrap();
file.write_all(&map).expect("failed to write fragment");
}
let date_time = state
.start_date_time
.unwrap()
.checked_add_signed(Duration::nanoseconds(
pts.opt_checked_sub(state.start_time)
.unwrap()
.unwrap()
.nseconds() as i64,
))
.unwrap();
println!(
"wrote segment with date time {} to {}",
date_time,
path.display()
);
state.segments.push_back(Segment {
duration,
path: basename.to_string(),
date_time,
});
update_manifest(&mut state);
Ok(gst::FlowSuccess::Ok)
})
.eos(move |_sink| {
unreachable!();
})
.build(),
);
}
fn probe_encoder(state: Arc<Mutex<State>>, enc: gst::Element) {
enc.static_pad("src").unwrap().add_probe(
gst::PadProbeType::EVENT_DOWNSTREAM,
move |_pad, info| match info.data {
Some(gst::PadProbeData::Event(ref ev)) => match ev.view() {
gst::EventView::Caps(e) => {
let mime = gst_pbutils::codec_utils_caps_get_mime_codec(e.caps());
let mut state = state.lock().unwrap();
state.all_mimes.push(mime.unwrap().into());
state.maybe_write_manifest();
gst::PadProbeReturn::Remove
}
_ => gst::PadProbeReturn::Ok,
},
_ => gst::PadProbeReturn::Ok,
},
);
}
impl VideoStream {
fn setup(
&self,
state: Arc<Mutex<State>>,
pipeline: &gst::Pipeline,
path: &Path,
) -> Result<(), Error> {
let src = gst::ElementFactory::make("videotestsrc")
.property("is-live", true)
.build()?;
let raw_capsfilter = gst::ElementFactory::make("capsfilter")
.property(
"caps",
gst_video::VideoCapsBuilder::new()
.format(gst_video::VideoFormat::I420)
.width(self.width as i32)
.height(self.height as i32)
.framerate(30.into())
.build(),
)
.build()?;
let timeoverlay = gst::ElementFactory::make("timeoverlay").build()?;
let enc = gst::ElementFactory::make("x264enc")
.property("bframes", 0u32)
.property("bitrate", self.bitrate as u32 / 1000u32)
.property_from_str("tune", "zerolatency")
.build()?;
let h264_capsfilter = gst::ElementFactory::make("capsfilter")
.property(
"caps",
gst::Caps::builder("video/x-h264")
.field("profile", "main")
.build(),
)
.build()?;
let mux = gst::ElementFactory::make("cmafmux")
.property("fragment-duration", 2500.mseconds())
.property_from_str("header-update-mode", "update")
.property("write-mehd", true)
.build()?;
let appsink = gst_app::AppSink::builder().buffer_list(true).build();
pipeline.add_many([
&src,
&raw_capsfilter,
&timeoverlay,
&enc,
&h264_capsfilter,
&mux,
appsink.upcast_ref(),
])?;
gst::Element::link_many([
&src,
&raw_capsfilter,
&timeoverlay,
&enc,
&h264_capsfilter,
&mux,
appsink.upcast_ref(),
])?;
probe_encoder(state, enc);
setup_appsink(&appsink, &self.name, path, true);
Ok(())
}
}
impl AudioStream {
fn setup(
&self,
state: Arc<Mutex<State>>,
pipeline: &gst::Pipeline,
path: &Path,
) -> Result<(), Error> {
let src = gst::ElementFactory::make("audiotestsrc")
.property("is-live", true)
.property_from_str("wave", &self.wave)
.build()?;
let enc = gst::ElementFactory::make("avenc_aac").build()?;
let mux = gst::ElementFactory::make("cmafmux")
.property_from_str("header-update-mode", "update")
.property("write-mehd", true)
.property("fragment-duration", 2500.mseconds())
.build()?;
let appsink = gst_app::AppSink::builder().buffer_list(true).build();
pipeline.add_many([&src, &enc, &mux, appsink.upcast_ref()])?;
gst::Element::link_many([&src, &enc, &mux, appsink.upcast_ref()])?;
probe_encoder(state, enc);
setup_appsink(&appsink, &self.name, path, false);
Ok(())
}
}
fn main() -> Result<(), Error> {
gst::init()?;
gstfmp4::plugin_register_static()?;
let path = PathBuf::from("hls_live_stream");
let pipeline = gst::Pipeline::default();
std::fs::create_dir_all(&path).expect("failed to create directory");
let mut manifest_path = path.clone();
manifest_path.push("manifest.m3u8");
let state = Arc::new(Mutex::new(State {
video_streams: vec![VideoStream {
name: "video_0".to_string(),
bitrate: 2_048_000,
width: 1280,
height: 720,
}],
audio_streams: vec![
AudioStream {
name: "audio_0".to_string(),
lang: "eng".to_string(),
default: true,
wave: "sine".to_string(),
},
AudioStream {
name: "audio_1".to_string(),
lang: "fre".to_string(),
default: false,
wave: "white-noise".to_string(),
},
],
all_mimes: vec![],
path: manifest_path.clone(),
wrote_manifest: false,
}));
{
let state_lock = state.lock().unwrap();
for stream in &state_lock.video_streams {
stream.setup(state.clone(), &pipeline, &path)?;
}
for stream in &state_lock.audio_streams {
stream.setup(state.clone(), &pipeline, &path)?;
}
}
pipeline.set_state(gst::State::Playing)?;
let bus = pipeline
.bus()
.expect("Pipeline without bus. Shouldn't happen!");
for msg in bus.iter_timed(gst::ClockTime::NONE) {
use gst::MessageView;
match msg.view() {
MessageView::Eos(..) => {
println!("EOS");
break;
}
MessageView::Error(err) => {
pipeline.set_state(gst::State::Null)?;
eprintln!(
"Got error from {}: {} ({})",
msg.src()
.map(|s| String::from(s.path_string()))
.unwrap_or_else(|| "None".into()),
err.error(),
err.debug().unwrap_or_else(|| "".into()),
);
break;
}
_ => (),
}
}
pipeline.set_state(gst::State::Null)?;
Ok(())
}
| {
let mut path = state.path.clone();
std::fs::create_dir_all(&path).expect("failed to create directory");
path.push("init.cmfi");
println!("writing header to {}", path.display());
let map = first.map_readable().unwrap();
std::fs::write(path, &map).expect("failed to write header");
drop(map);
// Remove the header from the buffer list
buffer_list.make_mut().remove(0, 1);
// If the list is now empty then it only contained the media header and nothing
// else.
if buffer_list.is_empty() {
return Ok(gst::FlowSuccess::Ok);
}
// Otherwise get the next buffer and continue working with that.
first = buffer_list.get(0).unwrap();
} | conditional_block |
hls_live.rs | // Copyright (C) 2022 Mathieu Duponchelle <mathieu@centricular.com>
//
// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0.
// If a copy of the MPL was not distributed with this file, You can obtain one at
// <https://mozilla.org/MPL/2.0/>.
//
// SPDX-License-Identifier: MPL-2.0
// This creates a live HLS stream with one video playlist and two video playlists.
// Basic trimming is implemented
use gst::prelude::*;
use std::collections::VecDeque;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use anyhow::Error;
use chrono::{DateTime, Duration, Utc};
use m3u8_rs::{
AlternativeMedia, AlternativeMediaType, MasterPlaylist, MediaPlaylist, MediaSegment,
VariantStream,
};
struct State {
video_streams: Vec<VideoStream>,
audio_streams: Vec<AudioStream>,
all_mimes: Vec<String>,
path: PathBuf,
wrote_manifest: bool,
}
impl State {
fn maybe_write_manifest(&mut self) {
if self.wrote_manifest {
return;
}
if self.all_mimes.len() < self.video_streams.len() + self.audio_streams.len() {
return;
}
let mut all_mimes = self.all_mimes.clone();
all_mimes.sort();
all_mimes.dedup();
let playlist = MasterPlaylist {
version: Some(7),
variants: self
.video_streams
.iter()
.map(|stream| {
let mut path = PathBuf::new();
path.push(&stream.name);
path.push("manifest.m3u8");
VariantStream {
uri: path.as_path().display().to_string(),
bandwidth: stream.bitrate,
codecs: Some(all_mimes.join(",")),
resolution: Some(m3u8_rs::Resolution {
width: stream.width,
height: stream.height,
}),
audio: Some("audio".to_string()),
..Default::default()
}
})
.collect(),
alternatives: self
.audio_streams
.iter()
.map(|stream| {
let mut path = PathBuf::new();
path.push(&stream.name);
path.push("manifest.m3u8");
AlternativeMedia {
media_type: AlternativeMediaType::Audio,
uri: Some(path.as_path().display().to_string()),
group_id: "audio".to_string(),
language: Some(stream.lang.clone()),
name: stream.name.clone(),
default: stream.default,
autoselect: stream.default,
channels: Some("2".to_string()),
..Default::default()
}
})
.collect(),
independent_segments: true,
..Default::default()
};
println!("Writing master manifest to {}", self.path.display());
let mut file = std::fs::File::create(&self.path).unwrap();
playlist
.write_to(&mut file)
.expect("Failed to write master playlist");
self.wrote_manifest = true;
}
}
struct Segment {
date_time: DateTime<Utc>,
duration: gst::ClockTime,
path: String,
}
struct UnreffedSegment {
removal_time: DateTime<Utc>,
path: String,
}
struct StreamState {
path: PathBuf,
segments: VecDeque<Segment>,
trimmed_segments: VecDeque<UnreffedSegment>,
start_date_time: Option<DateTime<Utc>>,
start_time: Option<gst::ClockTime>,
media_sequence: u64,
segment_index: u32,
}
struct VideoStream {
name: String,
bitrate: u64,
width: u64,
height: u64,
}
struct AudioStream {
name: String,
lang: String,
default: bool,
wave: String,
}
fn trim_segments(state: &mut StreamState) {
// Arbitrary 5 segments window
while state.segments.len() > 5 {
let segment = state.segments.pop_front().unwrap();
state.media_sequence += 1;
state.trimmed_segments.push_back(UnreffedSegment {
// HLS spec mandates that segments are removed from the filesystem no sooner
// than the duration of the longest playlist + duration of the segment.
// This is 15 seconds (12.5 + 2.5) in our case, we use 20 seconds to be on the
// safe side
removal_time: segment
.date_time
.checked_add_signed(Duration::seconds(20))
.unwrap(),
path: segment.path.clone(),
});
}
while let Some(segment) = state.trimmed_segments.front() {
if segment.removal_time < state.segments.front().unwrap().date_time {
let segment = state.trimmed_segments.pop_front().unwrap();
let mut path = state.path.clone();
path.push(segment.path);
println!("Removing {}", path.display());
std::fs::remove_file(path).expect("Failed to remove old segment");
} else {
break;
}
}
}
fn update_manifest(state: &mut StreamState) {
// Now write the manifest
let mut path = state.path.clone();
path.push("manifest.m3u8");
println!("writing manifest to {}", path.display());
trim_segments(state);
let playlist = MediaPlaylist {
version: Some(7),
target_duration: 2.5,
media_sequence: state.media_sequence,
segments: state
.segments
.iter()
.enumerate()
.map(|(idx, segment)| MediaSegment {
uri: segment.path.to_string(),
duration: (segment.duration.nseconds() as f64
/ gst::ClockTime::SECOND.nseconds() as f64) as f32,
map: Some(m3u8_rs::Map {
uri: "init.cmfi".into(),
..Default::default()
}),
program_date_time: if idx == 0 {
Some(segment.date_time.into())
} else {
None
},
..Default::default()
})
.collect(),
end_list: false,
playlist_type: None,
i_frames_only: false,
start: None,
independent_segments: true,
..Default::default()
};
let mut file = std::fs::File::create(path).unwrap();
playlist
.write_to(&mut file)
.expect("Failed to write media playlist");
}
fn setup_appsink(appsink: &gst_app::AppSink, name: &str, path: &Path, is_video: bool) |
fn probe_encoder(state: Arc<Mutex<State>>, enc: gst::Element) {
enc.static_pad("src").unwrap().add_probe(
gst::PadProbeType::EVENT_DOWNSTREAM,
move |_pad, info| match info.data {
Some(gst::PadProbeData::Event(ref ev)) => match ev.view() {
gst::EventView::Caps(e) => {
let mime = gst_pbutils::codec_utils_caps_get_mime_codec(e.caps());
let mut state = state.lock().unwrap();
state.all_mimes.push(mime.unwrap().into());
state.maybe_write_manifest();
gst::PadProbeReturn::Remove
}
_ => gst::PadProbeReturn::Ok,
},
_ => gst::PadProbeReturn::Ok,
},
);
}
impl VideoStream {
fn setup(
&self,
state: Arc<Mutex<State>>,
pipeline: &gst::Pipeline,
path: &Path,
) -> Result<(), Error> {
let src = gst::ElementFactory::make("videotestsrc")
.property("is-live", true)
.build()?;
let raw_capsfilter = gst::ElementFactory::make("capsfilter")
.property(
"caps",
gst_video::VideoCapsBuilder::new()
.format(gst_video::VideoFormat::I420)
.width(self.width as i32)
.height(self.height as i32)
.framerate(30.into())
.build(),
)
.build()?;
let timeoverlay = gst::ElementFactory::make("timeoverlay").build()?;
let enc = gst::ElementFactory::make("x264enc")
.property("bframes", 0u32)
.property("bitrate", self.bitrate as u32 / 1000u32)
.property_from_str("tune", "zerolatency")
.build()?;
let h264_capsfilter = gst::ElementFactory::make("capsfilter")
.property(
"caps",
gst::Caps::builder("video/x-h264")
.field("profile", "main")
.build(),
)
.build()?;
let mux = gst::ElementFactory::make("cmafmux")
.property("fragment-duration", 2500.mseconds())
.property_from_str("header-update-mode", "update")
.property("write-mehd", true)
.build()?;
let appsink = gst_app::AppSink::builder().buffer_list(true).build();
pipeline.add_many([
&src,
&raw_capsfilter,
&timeoverlay,
&enc,
&h264_capsfilter,
&mux,
appsink.upcast_ref(),
])?;
gst::Element::link_many([
&src,
&raw_capsfilter,
&timeoverlay,
&enc,
&h264_capsfilter,
&mux,
appsink.upcast_ref(),
])?;
probe_encoder(state, enc);
setup_appsink(&appsink, &self.name, path, true);
Ok(())
}
}
impl AudioStream {
fn setup(
&self,
state: Arc<Mutex<State>>,
pipeline: &gst::Pipeline,
path: &Path,
) -> Result<(), Error> {
let src = gst::ElementFactory::make("audiotestsrc")
.property("is-live", true)
.property_from_str("wave", &self.wave)
.build()?;
let enc = gst::ElementFactory::make("avenc_aac").build()?;
let mux = gst::ElementFactory::make("cmafmux")
.property_from_str("header-update-mode", "update")
.property("write-mehd", true)
.property("fragment-duration", 2500.mseconds())
.build()?;
let appsink = gst_app::AppSink::builder().buffer_list(true).build();
pipeline.add_many([&src, &enc, &mux, appsink.upcast_ref()])?;
gst::Element::link_many([&src, &enc, &mux, appsink.upcast_ref()])?;
probe_encoder(state, enc);
setup_appsink(&appsink, &self.name, path, false);
Ok(())
}
}
fn main() -> Result<(), Error> {
gst::init()?;
gstfmp4::plugin_register_static()?;
let path = PathBuf::from("hls_live_stream");
let pipeline = gst::Pipeline::default();
std::fs::create_dir_all(&path).expect("failed to create directory");
let mut manifest_path = path.clone();
manifest_path.push("manifest.m3u8");
let state = Arc::new(Mutex::new(State {
video_streams: vec![VideoStream {
name: "video_0".to_string(),
bitrate: 2_048_000,
width: 1280,
height: 720,
}],
audio_streams: vec![
AudioStream {
name: "audio_0".to_string(),
lang: "eng".to_string(),
default: true,
wave: "sine".to_string(),
},
AudioStream {
name: "audio_1".to_string(),
lang: "fre".to_string(),
default: false,
wave: "white-noise".to_string(),
},
],
all_mimes: vec![],
path: manifest_path.clone(),
wrote_manifest: false,
}));
{
let state_lock = state.lock().unwrap();
for stream in &state_lock.video_streams {
stream.setup(state.clone(), &pipeline, &path)?;
}
for stream in &state_lock.audio_streams {
stream.setup(state.clone(), &pipeline, &path)?;
}
}
pipeline.set_state(gst::State::Playing)?;
let bus = pipeline
.bus()
.expect("Pipeline without bus. Shouldn't happen!");
for msg in bus.iter_timed(gst::ClockTime::NONE) {
use gst::MessageView;
match msg.view() {
MessageView::Eos(..) => {
println!("EOS");
break;
}
MessageView::Error(err) => {
pipeline.set_state(gst::State::Null)?;
eprintln!(
"Got error from {}: {} ({})",
msg.src()
.map(|s| String::from(s.path_string()))
.unwrap_or_else(|| "None".into()),
err.error(),
err.debug().unwrap_or_else(|| "".into()),
);
break;
}
_ => (),
}
}
pipeline.set_state(gst::State::Null)?;
Ok(())
}
| {
let mut path: PathBuf = path.into();
path.push(name);
let state = Arc::new(Mutex::new(StreamState {
segments: VecDeque::new(),
trimmed_segments: VecDeque::new(),
path,
start_date_time: None,
start_time: gst::ClockTime::NONE,
media_sequence: 0,
segment_index: 0,
}));
appsink.set_callbacks(
gst_app::AppSinkCallbacks::builder()
.new_sample(move |sink| {
let sample = sink.pull_sample().map_err(|_| gst::FlowError::Eos)?;
let mut state = state.lock().unwrap();
// The muxer only outputs non-empty buffer lists
let mut buffer_list = sample.buffer_list_owned().expect("no buffer list");
assert!(!buffer_list.is_empty());
let mut first = buffer_list.get(0).unwrap();
// Each list contains a full segment, i.e. does not start with a DELTA_UNIT
assert!(!first.flags().contains(gst::BufferFlags::DELTA_UNIT));
// If the buffer has the DISCONT and HEADER flag set then it contains the media
// header, i.e. the `ftyp`, `moov` and other media boxes.
//
// This might be the initial header or the updated header at the end of the stream.
if first
.flags()
.contains(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER)
{
let mut path = state.path.clone();
std::fs::create_dir_all(&path).expect("failed to create directory");
path.push("init.cmfi");
println!("writing header to {}", path.display());
let map = first.map_readable().unwrap();
std::fs::write(path, &map).expect("failed to write header");
drop(map);
// Remove the header from the buffer list
buffer_list.make_mut().remove(0, 1);
// If the list is now empty then it only contained the media header and nothing
// else.
if buffer_list.is_empty() {
return Ok(gst::FlowSuccess::Ok);
}
// Otherwise get the next buffer and continue working with that.
first = buffer_list.get(0).unwrap();
}
// If the buffer only has the HEADER flag set then this is a segment header that is
// followed by one or more actual media buffers.
assert!(first.flags().contains(gst::BufferFlags::HEADER));
let mut path = state.path.clone();
let basename = format!(
"segment_{}.{}",
state.segment_index,
if is_video { "cmfv" } else { "cmfa" }
);
state.segment_index += 1;
path.push(&basename);
let segment = sample
.segment()
.expect("no segment")
.downcast_ref::<gst::ClockTime>()
.expect("no time segment");
let pts = segment
.to_running_time(first.pts().unwrap())
.expect("can't get running time");
if state.start_time.is_none() {
state.start_time = Some(pts);
}
if state.start_date_time.is_none() {
let now_utc = Utc::now();
let now_gst = sink.clock().unwrap().time().unwrap();
let pts_clock_time = pts + sink.base_time().unwrap();
let diff = now_gst.checked_sub(pts_clock_time).unwrap();
let pts_utc = now_utc
.checked_sub_signed(Duration::nanoseconds(diff.nseconds() as i64))
.unwrap();
state.start_date_time = Some(pts_utc);
}
let duration = first.duration().unwrap();
let mut file = std::fs::File::create(&path).expect("failed to open fragment");
for buffer in &*buffer_list {
use std::io::prelude::*;
let map = buffer.map_readable().unwrap();
file.write_all(&map).expect("failed to write fragment");
}
let date_time = state
.start_date_time
.unwrap()
.checked_add_signed(Duration::nanoseconds(
pts.opt_checked_sub(state.start_time)
.unwrap()
.unwrap()
.nseconds() as i64,
))
.unwrap();
println!(
"wrote segment with date time {} to {}",
date_time,
path.display()
);
state.segments.push_back(Segment {
duration,
path: basename.to_string(),
date_time,
});
update_manifest(&mut state);
Ok(gst::FlowSuccess::Ok)
})
.eos(move |_sink| {
unreachable!();
})
.build(),
);
} | identifier_body |
hls_live.rs | // Copyright (C) 2022 Mathieu Duponchelle <mathieu@centricular.com>
//
// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0.
// If a copy of the MPL was not distributed with this file, You can obtain one at
// <https://mozilla.org/MPL/2.0/>.
//
// SPDX-License-Identifier: MPL-2.0
// This creates a live HLS stream with one video playlist and two video playlists.
// Basic trimming is implemented
use gst::prelude::*;
use std::collections::VecDeque;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use anyhow::Error;
use chrono::{DateTime, Duration, Utc};
use m3u8_rs::{
AlternativeMedia, AlternativeMediaType, MasterPlaylist, MediaPlaylist, MediaSegment,
VariantStream,
};
struct State {
video_streams: Vec<VideoStream>,
audio_streams: Vec<AudioStream>,
all_mimes: Vec<String>,
path: PathBuf,
wrote_manifest: bool,
}
impl State {
fn maybe_write_manifest(&mut self) {
if self.wrote_manifest {
return;
}
if self.all_mimes.len() < self.video_streams.len() + self.audio_streams.len() {
return;
}
let mut all_mimes = self.all_mimes.clone();
all_mimes.sort();
all_mimes.dedup();
let playlist = MasterPlaylist {
version: Some(7),
variants: self
.video_streams
.iter()
.map(|stream| {
let mut path = PathBuf::new();
path.push(&stream.name);
path.push("manifest.m3u8");
VariantStream {
uri: path.as_path().display().to_string(),
bandwidth: stream.bitrate,
codecs: Some(all_mimes.join(",")),
resolution: Some(m3u8_rs::Resolution {
width: stream.width,
height: stream.height,
}),
audio: Some("audio".to_string()),
..Default::default()
}
})
.collect(),
alternatives: self
.audio_streams
.iter()
.map(|stream| {
let mut path = PathBuf::new();
path.push(&stream.name);
path.push("manifest.m3u8");
AlternativeMedia {
media_type: AlternativeMediaType::Audio,
uri: Some(path.as_path().display().to_string()),
group_id: "audio".to_string(),
language: Some(stream.lang.clone()),
name: stream.name.clone(),
default: stream.default,
autoselect: stream.default,
channels: Some("2".to_string()),
..Default::default()
}
})
.collect(),
independent_segments: true,
..Default::default()
};
println!("Writing master manifest to {}", self.path.display());
let mut file = std::fs::File::create(&self.path).unwrap();
playlist
.write_to(&mut file)
.expect("Failed to write master playlist");
self.wrote_manifest = true;
}
}
struct Segment {
date_time: DateTime<Utc>,
duration: gst::ClockTime,
path: String,
}
struct UnreffedSegment {
removal_time: DateTime<Utc>,
path: String,
}
struct StreamState {
path: PathBuf,
segments: VecDeque<Segment>,
trimmed_segments: VecDeque<UnreffedSegment>,
start_date_time: Option<DateTime<Utc>>,
start_time: Option<gst::ClockTime>,
media_sequence: u64,
segment_index: u32,
}
struct | {
name: String,
bitrate: u64,
width: u64,
height: u64,
}
struct AudioStream {
name: String,
lang: String,
default: bool,
wave: String,
}
fn trim_segments(state: &mut StreamState) {
// Arbitrary 5 segments window
while state.segments.len() > 5 {
let segment = state.segments.pop_front().unwrap();
state.media_sequence += 1;
state.trimmed_segments.push_back(UnreffedSegment {
// HLS spec mandates that segments are removed from the filesystem no sooner
// than the duration of the longest playlist + duration of the segment.
// This is 15 seconds (12.5 + 2.5) in our case, we use 20 seconds to be on the
// safe side
removal_time: segment
.date_time
.checked_add_signed(Duration::seconds(20))
.unwrap(),
path: segment.path.clone(),
});
}
while let Some(segment) = state.trimmed_segments.front() {
if segment.removal_time < state.segments.front().unwrap().date_time {
let segment = state.trimmed_segments.pop_front().unwrap();
let mut path = state.path.clone();
path.push(segment.path);
println!("Removing {}", path.display());
std::fs::remove_file(path).expect("Failed to remove old segment");
} else {
break;
}
}
}
fn update_manifest(state: &mut StreamState) {
// Now write the manifest
let mut path = state.path.clone();
path.push("manifest.m3u8");
println!("writing manifest to {}", path.display());
trim_segments(state);
let playlist = MediaPlaylist {
version: Some(7),
target_duration: 2.5,
media_sequence: state.media_sequence,
segments: state
.segments
.iter()
.enumerate()
.map(|(idx, segment)| MediaSegment {
uri: segment.path.to_string(),
duration: (segment.duration.nseconds() as f64
/ gst::ClockTime::SECOND.nseconds() as f64) as f32,
map: Some(m3u8_rs::Map {
uri: "init.cmfi".into(),
..Default::default()
}),
program_date_time: if idx == 0 {
Some(segment.date_time.into())
} else {
None
},
..Default::default()
})
.collect(),
end_list: false,
playlist_type: None,
i_frames_only: false,
start: None,
independent_segments: true,
..Default::default()
};
let mut file = std::fs::File::create(path).unwrap();
playlist
.write_to(&mut file)
.expect("Failed to write media playlist");
}
fn setup_appsink(appsink: &gst_app::AppSink, name: &str, path: &Path, is_video: bool) {
let mut path: PathBuf = path.into();
path.push(name);
let state = Arc::new(Mutex::new(StreamState {
segments: VecDeque::new(),
trimmed_segments: VecDeque::new(),
path,
start_date_time: None,
start_time: gst::ClockTime::NONE,
media_sequence: 0,
segment_index: 0,
}));
appsink.set_callbacks(
gst_app::AppSinkCallbacks::builder()
.new_sample(move |sink| {
let sample = sink.pull_sample().map_err(|_| gst::FlowError::Eos)?;
let mut state = state.lock().unwrap();
// The muxer only outputs non-empty buffer lists
let mut buffer_list = sample.buffer_list_owned().expect("no buffer list");
assert!(!buffer_list.is_empty());
let mut first = buffer_list.get(0).unwrap();
// Each list contains a full segment, i.e. does not start with a DELTA_UNIT
assert!(!first.flags().contains(gst::BufferFlags::DELTA_UNIT));
// If the buffer has the DISCONT and HEADER flag set then it contains the media
// header, i.e. the `ftyp`, `moov` and other media boxes.
//
// This might be the initial header or the updated header at the end of the stream.
if first
.flags()
.contains(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER)
{
let mut path = state.path.clone();
std::fs::create_dir_all(&path).expect("failed to create directory");
path.push("init.cmfi");
println!("writing header to {}", path.display());
let map = first.map_readable().unwrap();
std::fs::write(path, &map).expect("failed to write header");
drop(map);
// Remove the header from the buffer list
buffer_list.make_mut().remove(0, 1);
// If the list is now empty then it only contained the media header and nothing
// else.
if buffer_list.is_empty() {
return Ok(gst::FlowSuccess::Ok);
}
// Otherwise get the next buffer and continue working with that.
first = buffer_list.get(0).unwrap();
}
// If the buffer only has the HEADER flag set then this is a segment header that is
// followed by one or more actual media buffers.
assert!(first.flags().contains(gst::BufferFlags::HEADER));
let mut path = state.path.clone();
let basename = format!(
"segment_{}.{}",
state.segment_index,
if is_video { "cmfv" } else { "cmfa" }
);
state.segment_index += 1;
path.push(&basename);
let segment = sample
.segment()
.expect("no segment")
.downcast_ref::<gst::ClockTime>()
.expect("no time segment");
let pts = segment
.to_running_time(first.pts().unwrap())
.expect("can't get running time");
if state.start_time.is_none() {
state.start_time = Some(pts);
}
if state.start_date_time.is_none() {
let now_utc = Utc::now();
let now_gst = sink.clock().unwrap().time().unwrap();
let pts_clock_time = pts + sink.base_time().unwrap();
let diff = now_gst.checked_sub(pts_clock_time).unwrap();
let pts_utc = now_utc
.checked_sub_signed(Duration::nanoseconds(diff.nseconds() as i64))
.unwrap();
state.start_date_time = Some(pts_utc);
}
let duration = first.duration().unwrap();
let mut file = std::fs::File::create(&path).expect("failed to open fragment");
for buffer in &*buffer_list {
use std::io::prelude::*;
let map = buffer.map_readable().unwrap();
file.write_all(&map).expect("failed to write fragment");
}
let date_time = state
.start_date_time
.unwrap()
.checked_add_signed(Duration::nanoseconds(
pts.opt_checked_sub(state.start_time)
.unwrap()
.unwrap()
.nseconds() as i64,
))
.unwrap();
println!(
"wrote segment with date time {} to {}",
date_time,
path.display()
);
state.segments.push_back(Segment {
duration,
path: basename.to_string(),
date_time,
});
update_manifest(&mut state);
Ok(gst::FlowSuccess::Ok)
})
.eos(move |_sink| {
unreachable!();
})
.build(),
);
}
fn probe_encoder(state: Arc<Mutex<State>>, enc: gst::Element) {
enc.static_pad("src").unwrap().add_probe(
gst::PadProbeType::EVENT_DOWNSTREAM,
move |_pad, info| match info.data {
Some(gst::PadProbeData::Event(ref ev)) => match ev.view() {
gst::EventView::Caps(e) => {
let mime = gst_pbutils::codec_utils_caps_get_mime_codec(e.caps());
let mut state = state.lock().unwrap();
state.all_mimes.push(mime.unwrap().into());
state.maybe_write_manifest();
gst::PadProbeReturn::Remove
}
_ => gst::PadProbeReturn::Ok,
},
_ => gst::PadProbeReturn::Ok,
},
);
}
impl VideoStream {
fn setup(
&self,
state: Arc<Mutex<State>>,
pipeline: &gst::Pipeline,
path: &Path,
) -> Result<(), Error> {
let src = gst::ElementFactory::make("videotestsrc")
.property("is-live", true)
.build()?;
let raw_capsfilter = gst::ElementFactory::make("capsfilter")
.property(
"caps",
gst_video::VideoCapsBuilder::new()
.format(gst_video::VideoFormat::I420)
.width(self.width as i32)
.height(self.height as i32)
.framerate(30.into())
.build(),
)
.build()?;
let timeoverlay = gst::ElementFactory::make("timeoverlay").build()?;
let enc = gst::ElementFactory::make("x264enc")
.property("bframes", 0u32)
.property("bitrate", self.bitrate as u32 / 1000u32)
.property_from_str("tune", "zerolatency")
.build()?;
let h264_capsfilter = gst::ElementFactory::make("capsfilter")
.property(
"caps",
gst::Caps::builder("video/x-h264")
.field("profile", "main")
.build(),
)
.build()?;
let mux = gst::ElementFactory::make("cmafmux")
.property("fragment-duration", 2500.mseconds())
.property_from_str("header-update-mode", "update")
.property("write-mehd", true)
.build()?;
let appsink = gst_app::AppSink::builder().buffer_list(true).build();
pipeline.add_many([
&src,
&raw_capsfilter,
&timeoverlay,
&enc,
&h264_capsfilter,
&mux,
appsink.upcast_ref(),
])?;
gst::Element::link_many([
&src,
&raw_capsfilter,
&timeoverlay,
&enc,
&h264_capsfilter,
&mux,
appsink.upcast_ref(),
])?;
probe_encoder(state, enc);
setup_appsink(&appsink, &self.name, path, true);
Ok(())
}
}
impl AudioStream {
fn setup(
&self,
state: Arc<Mutex<State>>,
pipeline: &gst::Pipeline,
path: &Path,
) -> Result<(), Error> {
let src = gst::ElementFactory::make("audiotestsrc")
.property("is-live", true)
.property_from_str("wave", &self.wave)
.build()?;
let enc = gst::ElementFactory::make("avenc_aac").build()?;
let mux = gst::ElementFactory::make("cmafmux")
.property_from_str("header-update-mode", "update")
.property("write-mehd", true)
.property("fragment-duration", 2500.mseconds())
.build()?;
let appsink = gst_app::AppSink::builder().buffer_list(true).build();
pipeline.add_many([&src, &enc, &mux, appsink.upcast_ref()])?;
gst::Element::link_many([&src, &enc, &mux, appsink.upcast_ref()])?;
probe_encoder(state, enc);
setup_appsink(&appsink, &self.name, path, false);
Ok(())
}
}
fn main() -> Result<(), Error> {
gst::init()?;
gstfmp4::plugin_register_static()?;
let path = PathBuf::from("hls_live_stream");
let pipeline = gst::Pipeline::default();
std::fs::create_dir_all(&path).expect("failed to create directory");
let mut manifest_path = path.clone();
manifest_path.push("manifest.m3u8");
let state = Arc::new(Mutex::new(State {
video_streams: vec![VideoStream {
name: "video_0".to_string(),
bitrate: 2_048_000,
width: 1280,
height: 720,
}],
audio_streams: vec![
AudioStream {
name: "audio_0".to_string(),
lang: "eng".to_string(),
default: true,
wave: "sine".to_string(),
},
AudioStream {
name: "audio_1".to_string(),
lang: "fre".to_string(),
default: false,
wave: "white-noise".to_string(),
},
],
all_mimes: vec![],
path: manifest_path.clone(),
wrote_manifest: false,
}));
{
let state_lock = state.lock().unwrap();
for stream in &state_lock.video_streams {
stream.setup(state.clone(), &pipeline, &path)?;
}
for stream in &state_lock.audio_streams {
stream.setup(state.clone(), &pipeline, &path)?;
}
}
pipeline.set_state(gst::State::Playing)?;
let bus = pipeline
.bus()
.expect("Pipeline without bus. Shouldn't happen!");
for msg in bus.iter_timed(gst::ClockTime::NONE) {
use gst::MessageView;
match msg.view() {
MessageView::Eos(..) => {
println!("EOS");
break;
}
MessageView::Error(err) => {
pipeline.set_state(gst::State::Null)?;
eprintln!(
"Got error from {}: {} ({})",
msg.src()
.map(|s| String::from(s.path_string()))
.unwrap_or_else(|| "None".into()),
err.error(),
err.debug().unwrap_or_else(|| "".into()),
);
break;
}
_ => (),
}
}
pipeline.set_state(gst::State::Null)?;
Ok(())
}
| VideoStream | identifier_name |
tasks.py | from __future__ import absolute_import, unicode_literals
import json
import pkg_resources
from celery.utils.log import get_task_logger
from celery.worker.control import Panel
from reviewbot.celery import celery
from rbtools.api.client import RBClient
from reviewbot.processing.review import Review
from reviewbot.repositories import repositories
from reviewbot.utils.filesystem import cleanup_tempfiles
# TODO: Make the cookie file configurable.
COOKIE_FILE = 'reviewbot-cookies.txt'
# TODO: Include version information in the agent.
AGENT = 'ReviewBot'
# Status Update states
PENDING = 'pending'
DONE_SUCCESS = 'done-success'
DONE_FAILURE = 'done-failure'
ERROR = 'error'
logger = get_task_logger(__name__)
@celery.task(ignore_result=True)
def RunTool(server_url='',
session='',
username='',
review_request_id=-1,
diff_revision=-1,
status_update_id=-1,
review_settings={},
tool_options={},
repository_name='',
base_commit_id='',
*args, **kwargs):
|
@Panel.register
def update_tools_list(panel, payload):
"""Update the list of installed tools.
This will detect the installed analysis tool plugins
and inform Review Board of them.
Args:
panel (celery.worker.control.Panel):
The worker control panel.
payload (dict):
The payload as assembled by the extension.
Returns:
bool:
Whether the task completed successfully.
"""
logger.info('Request to refresh installed tools from "%s"',
payload['url'])
logger.info('Iterating Tools')
tools = []
for ep in pkg_resources.iter_entry_points(group='reviewbot.tools'):
entry_point = ep.name
tool_class = ep.load()
tool = tool_class()
logger.info('Tool: %s' % entry_point)
if tool.check_dependencies():
tools.append({
'name': tool_class.name,
'entry_point': entry_point,
'version': tool_class.version,
'description': tool_class.description,
'tool_options': json.dumps(tool_class.options),
'timeout': tool_class.timeout,
'working_directory_required':
tool_class.working_directory_required,
})
else:
logger.warning('%s dependency check failed.', ep.name)
logger.info('Done iterating Tools')
hostname = panel.hostname
try:
api_client = RBClient(
payload['url'],
cookie_file=COOKIE_FILE,
agent=AGENT,
session=payload['session'])
api_root = api_client.get_root()
except Exception as e:
logger.exception('Could not reach RB server: %s', e)
return {
'status': 'error',
'error': 'Could not reach Review Board server: %s' % e,
}
try:
api_tools = _get_extension_resource(api_root).get_tools()
api_tools.create(hostname=hostname, tools=json.dumps(tools))
except Exception as e:
logger.exception('Problem POSTing tools: %s', e)
return {
'status': 'error',
'error': 'Problem uploading tools: %s' % e,
}
return {
'status': 'ok',
'tools': tools,
}
def _get_extension_resource(api_root):
"""Return the Review Bot extension resource.
Args:
api_root (rbtools.api.resource.Resource):
The server API root.
Returns:
rbtools.api.resource.Resource:
The extension's API resource.
"""
# TODO: Cache this. We only use this resource as a link to sub-resources.
return api_root.get_extension(
extension_name='reviewbotext.extension.ReviewBotExtension')
| """Execute an automated review on a review request.
Args:
server_url (unicode):
The URL of the Review Board server.
session (unicode):
The encoded session identifier.
username (unicode):
The name of the user who owns the ``session``.
review_request_id (int):
The ID of the review request being reviewed (ID for use in the
API, which is the "display_id" field).
diff_revision (int):
The ID of the diff revision being reviewed.
status_update_id (int):
The ID of the status update for this invocation of the tool.
review_settings (dict):
Settings for how the review should be created.
tool_options (dict):
The tool-specific settings.
repository_name (unicode):
The name of the repository to clone to run the tool, if the tool
requires full working directory access.
base_commit_id (unicode):
The ID of the commit that the patch should be applied to.
args (tuple):
Any additional positional arguments (perhaps used by a newer
version of the Review Bot extension).
kwargs (dict):
Any additional keyword arguments (perhaps used by a newer version
of the Review Bot extension).
Returns:
bool:
Whether the task completed successfully.
"""
try:
routing_key = RunTool.request.delivery_info['routing_key']
route_parts = routing_key.partition('.')
tool_name = route_parts[0]
log_detail = ('(server=%s, review_request_id=%s, diff_revision=%s)'
% (server_url, review_request_id, diff_revision))
logger.info('Running tool "%s" %s', tool_name, log_detail)
try:
logger.info('Initializing RB API %s', log_detail)
api_client = RBClient(server_url,
cookie_file=COOKIE_FILE,
agent=AGENT,
session=session)
api_root = api_client.get_root()
except Exception as e:
logger.error('Could not contact Review Board server: %s %s',
e, log_detail)
return False
logger.info('Loading requested tool "%s" %s', tool_name, log_detail)
tools = [
entrypoint.load()
for entrypoint in pkg_resources.iter_entry_points(
group='reviewbot.tools', name=tool_name)
]
if len(tools) == 0:
logger.error('Tool "%s" not found %s', tool_name, log_detail)
return False
elif len(tools) > 1:
logger.error('Tool "%s" is ambiguous (found %s) %s',
tool_name, ', '.join(tool.name for tool in tools),
log_detail)
return False
else:
tool = tools[0]
repository = None
try:
logger.info('Creating status update %s', log_detail)
status_update = api_root.get_status_update(
review_request_id=review_request_id,
status_update_id=status_update_id)
except Exception as e:
logger.exception('Unable to create status update: %s %s',
e, log_detail)
return False
if tool.working_directory_required:
if not base_commit_id:
logger.error('Working directory is required but the diffset '
'has no base_commit_id %s', log_detail)
status_update.update(
state=ERROR,
description='Diff does not include parent commit '
'information.')
return False
try:
repository = repositories[repository_name]
except KeyError:
logger.error('Unable to find configured repository "%s" %s',
repository_name, log_detail)
return False
try:
logger.info('Initializing review %s', log_detail)
review = Review(api_root, review_request_id, diff_revision,
review_settings)
status_update.update(description='running...')
except Exception as e:
logger.exception('Failed to initialize review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Initializing tool "%s %s" %s',
tool.name, tool.version, log_detail)
t = tool()
except Exception as e:
logger.exception('Error initializing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Executing tool "%s" %s', tool.name, log_detail)
t.execute(review, settings=tool_options, repository=repository,
base_commit_id=base_commit_id)
logger.info('Tool "%s" completed successfully %s',
tool.name, log_detail)
except Exception as e:
logger.exception('Error executing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
if t.output:
file_attachments = \
api_root.get_user_file_attachments(username=username)
attachment = \
file_attachments.upload_attachment('tool-output', t.output)
status_update.update(url=attachment.absolute_url,
url_text='Tool console output')
try:
if len(review.comments) == 0:
status_update.update(state=DONE_SUCCESS,
description='passed.')
else:
logger.info('Publishing review %s', log_detail)
review_id = review.publish().id
status_update.update(state=DONE_FAILURE,
description='failed.',
review_id=review_id)
except Exception as e:
logger.exception('Error when publishing review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
logger.info('Review completed successfully %s', log_detail)
return True
finally:
cleanup_tempfiles() | identifier_body |
tasks.py | from __future__ import absolute_import, unicode_literals
import json
import pkg_resources
from celery.utils.log import get_task_logger
from celery.worker.control import Panel
from reviewbot.celery import celery
from rbtools.api.client import RBClient
from reviewbot.processing.review import Review
from reviewbot.repositories import repositories
from reviewbot.utils.filesystem import cleanup_tempfiles
# TODO: Make the cookie file configurable.
COOKIE_FILE = 'reviewbot-cookies.txt'
# TODO: Include version information in the agent.
AGENT = 'ReviewBot'
# Status Update states
PENDING = 'pending'
DONE_SUCCESS = 'done-success'
DONE_FAILURE = 'done-failure'
ERROR = 'error'
logger = get_task_logger(__name__)
@celery.task(ignore_result=True)
def RunTool(server_url='',
session='',
username='',
review_request_id=-1,
diff_revision=-1,
status_update_id=-1,
review_settings={},
tool_options={},
repository_name='',
base_commit_id='',
*args, **kwargs):
"""Execute an automated review on a review request.
Args:
server_url (unicode):
The URL of the Review Board server.
session (unicode):
The encoded session identifier.
username (unicode):
The name of the user who owns the ``session``.
review_request_id (int):
The ID of the review request being reviewed (ID for use in the
API, which is the "display_id" field).
diff_revision (int):
The ID of the diff revision being reviewed.
status_update_id (int):
The ID of the status update for this invocation of the tool.
review_settings (dict):
Settings for how the review should be created.
tool_options (dict):
The tool-specific settings.
repository_name (unicode):
The name of the repository to clone to run the tool, if the tool
requires full working directory access.
base_commit_id (unicode):
The ID of the commit that the patch should be applied to.
args (tuple):
Any additional positional arguments (perhaps used by a newer
version of the Review Bot extension).
kwargs (dict):
Any additional keyword arguments (perhaps used by a newer version
of the Review Bot extension).
Returns:
bool:
Whether the task completed successfully.
"""
try:
routing_key = RunTool.request.delivery_info['routing_key']
route_parts = routing_key.partition('.')
tool_name = route_parts[0]
log_detail = ('(server=%s, review_request_id=%s, diff_revision=%s)'
% (server_url, review_request_id, diff_revision))
logger.info('Running tool "%s" %s', tool_name, log_detail)
try:
logger.info('Initializing RB API %s', log_detail)
api_client = RBClient(server_url,
cookie_file=COOKIE_FILE,
agent=AGENT,
session=session)
api_root = api_client.get_root()
except Exception as e:
logger.error('Could not contact Review Board server: %s %s',
e, log_detail)
return False
logger.info('Loading requested tool "%s" %s', tool_name, log_detail)
tools = [
entrypoint.load()
for entrypoint in pkg_resources.iter_entry_points(
group='reviewbot.tools', name=tool_name)
]
if len(tools) == 0:
logger.error('Tool "%s" not found %s', tool_name, log_detail)
return False
elif len(tools) > 1:
logger.error('Tool "%s" is ambiguous (found %s) %s',
tool_name, ', '.join(tool.name for tool in tools),
log_detail)
return False
else:
tool = tools[0]
repository = None
try:
logger.info('Creating status update %s', log_detail)
status_update = api_root.get_status_update(
review_request_id=review_request_id,
status_update_id=status_update_id)
except Exception as e:
logger.exception('Unable to create status update: %s %s',
e, log_detail)
return False
if tool.working_directory_required:
if not base_commit_id:
logger.error('Working directory is required but the diffset '
'has no base_commit_id %s', log_detail)
status_update.update(
state=ERROR,
description='Diff does not include parent commit '
'information.')
return False
try:
repository = repositories[repository_name]
except KeyError:
logger.error('Unable to find configured repository "%s" %s',
repository_name, log_detail)
return False
try:
logger.info('Initializing review %s', log_detail)
review = Review(api_root, review_request_id, diff_revision,
review_settings)
status_update.update(description='running...')
except Exception as e:
logger.exception('Failed to initialize review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Initializing tool "%s %s" %s',
tool.name, tool.version, log_detail)
t = tool()
except Exception as e:
logger.exception('Error initializing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Executing tool "%s" %s', tool.name, log_detail)
t.execute(review, settings=tool_options, repository=repository,
base_commit_id=base_commit_id)
logger.info('Tool "%s" completed successfully %s',
tool.name, log_detail)
except Exception as e:
logger.exception('Error executing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
if t.output:
file_attachments = \
api_root.get_user_file_attachments(username=username)
attachment = \
file_attachments.upload_attachment('tool-output', t.output)
status_update.update(url=attachment.absolute_url,
url_text='Tool console output')
try:
if len(review.comments) == 0:
status_update.update(state=DONE_SUCCESS,
description='passed.')
else:
logger.info('Publishing review %s', log_detail)
review_id = review.publish().id
status_update.update(state=DONE_FAILURE,
description='failed.',
review_id=review_id)
except Exception as e:
logger.exception('Error when publishing review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
logger.info('Review completed successfully %s', log_detail)
return True
finally:
cleanup_tempfiles()
@Panel.register
def update_tools_list(panel, payload):
"""Update the list of installed tools.
This will detect the installed analysis tool plugins
and inform Review Board of them.
Args:
panel (celery.worker.control.Panel):
The worker control panel.
payload (dict):
The payload as assembled by the extension.
Returns:
bool:
Whether the task completed successfully.
"""
logger.info('Request to refresh installed tools from "%s"',
payload['url'])
logger.info('Iterating Tools')
tools = []
for ep in pkg_resources.iter_entry_points(group='reviewbot.tools'):
entry_point = ep.name
tool_class = ep.load()
tool = tool_class()
logger.info('Tool: %s' % entry_point)
if tool.check_dependencies():
tools.append({
'name': tool_class.name,
'entry_point': entry_point,
'version': tool_class.version,
'description': tool_class.description,
'tool_options': json.dumps(tool_class.options),
'timeout': tool_class.timeout,
'working_directory_required':
tool_class.working_directory_required,
})
else:
logger.warning('%s dependency check failed.', ep.name)
logger.info('Done iterating Tools')
hostname = panel.hostname
try:
api_client = RBClient(
payload['url'],
cookie_file=COOKIE_FILE,
agent=AGENT,
session=payload['session'])
api_root = api_client.get_root()
except Exception as e:
logger.exception('Could not reach RB server: %s', e)
return {
'status': 'error',
'error': 'Could not reach Review Board server: %s' % e,
}
try:
api_tools = _get_extension_resource(api_root).get_tools()
api_tools.create(hostname=hostname, tools=json.dumps(tools))
except Exception as e:
logger.exception('Problem POSTing tools: %s', e)
return {
'status': 'error',
'error': 'Problem uploading tools: %s' % e,
}
return {
'status': 'ok',
'tools': tools,
}
def | (api_root):
"""Return the Review Bot extension resource.
Args:
api_root (rbtools.api.resource.Resource):
The server API root.
Returns:
rbtools.api.resource.Resource:
The extension's API resource.
"""
# TODO: Cache this. We only use this resource as a link to sub-resources.
return api_root.get_extension(
extension_name='reviewbotext.extension.ReviewBotExtension')
| _get_extension_resource | identifier_name |
tasks.py | from __future__ import absolute_import, unicode_literals
import json
import pkg_resources
from celery.utils.log import get_task_logger
from celery.worker.control import Panel
from reviewbot.celery import celery
from rbtools.api.client import RBClient
from reviewbot.processing.review import Review
from reviewbot.repositories import repositories
from reviewbot.utils.filesystem import cleanup_tempfiles
# TODO: Make the cookie file configurable.
COOKIE_FILE = 'reviewbot-cookies.txt'
# TODO: Include version information in the agent.
AGENT = 'ReviewBot'
# Status Update states
PENDING = 'pending'
DONE_SUCCESS = 'done-success'
DONE_FAILURE = 'done-failure'
ERROR = 'error'
logger = get_task_logger(__name__)
@celery.task(ignore_result=True)
def RunTool(server_url='',
session='',
username='',
review_request_id=-1,
diff_revision=-1,
status_update_id=-1,
review_settings={},
tool_options={},
repository_name='',
base_commit_id='',
*args, **kwargs):
"""Execute an automated review on a review request.
Args:
server_url (unicode):
The URL of the Review Board server.
session (unicode):
The encoded session identifier.
username (unicode):
The name of the user who owns the ``session``.
review_request_id (int):
The ID of the review request being reviewed (ID for use in the
API, which is the "display_id" field).
diff_revision (int):
The ID of the diff revision being reviewed.
status_update_id (int):
The ID of the status update for this invocation of the tool.
review_settings (dict):
Settings for how the review should be created.
tool_options (dict):
The tool-specific settings.
repository_name (unicode):
The name of the repository to clone to run the tool, if the tool
requires full working directory access.
base_commit_id (unicode):
The ID of the commit that the patch should be applied to.
args (tuple):
Any additional positional arguments (perhaps used by a newer
version of the Review Bot extension).
kwargs (dict):
Any additional keyword arguments (perhaps used by a newer version
of the Review Bot extension).
Returns:
bool:
Whether the task completed successfully.
"""
try:
routing_key = RunTool.request.delivery_info['routing_key']
route_parts = routing_key.partition('.')
tool_name = route_parts[0]
log_detail = ('(server=%s, review_request_id=%s, diff_revision=%s)'
% (server_url, review_request_id, diff_revision))
logger.info('Running tool "%s" %s', tool_name, log_detail)
try:
logger.info('Initializing RB API %s', log_detail)
api_client = RBClient(server_url,
cookie_file=COOKIE_FILE,
agent=AGENT,
session=session)
api_root = api_client.get_root()
except Exception as e:
logger.error('Could not contact Review Board server: %s %s',
e, log_detail)
return False
logger.info('Loading requested tool "%s" %s', tool_name, log_detail)
tools = [
entrypoint.load()
for entrypoint in pkg_resources.iter_entry_points(
group='reviewbot.tools', name=tool_name)
]
if len(tools) == 0:
logger.error('Tool "%s" not found %s', tool_name, log_detail)
return False
elif len(tools) > 1:
logger.error('Tool "%s" is ambiguous (found %s) %s',
tool_name, ', '.join(tool.name for tool in tools),
log_detail)
return False
else:
tool = tools[0]
repository = None
try:
logger.info('Creating status update %s', log_detail)
status_update = api_root.get_status_update(
review_request_id=review_request_id,
status_update_id=status_update_id)
except Exception as e:
logger.exception('Unable to create status update: %s %s',
e, log_detail)
return False
if tool.working_directory_required:
if not base_commit_id:
logger.error('Working directory is required but the diffset '
'has no base_commit_id %s', log_detail)
status_update.update(
state=ERROR,
description='Diff does not include parent commit '
'information.')
return False
try:
repository = repositories[repository_name]
except KeyError:
logger.error('Unable to find configured repository "%s" %s',
repository_name, log_detail)
return False
try:
logger.info('Initializing review %s', log_detail)
review = Review(api_root, review_request_id, diff_revision,
review_settings)
status_update.update(description='running...')
except Exception as e:
logger.exception('Failed to initialize review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Initializing tool "%s %s" %s',
tool.name, tool.version, log_detail)
t = tool()
except Exception as e:
logger.exception('Error initializing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Executing tool "%s" %s', tool.name, log_detail)
t.execute(review, settings=tool_options, repository=repository,
base_commit_id=base_commit_id)
logger.info('Tool "%s" completed successfully %s',
tool.name, log_detail)
except Exception as e:
logger.exception('Error executing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
if t.output:
file_attachments = \
api_root.get_user_file_attachments(username=username)
attachment = \
file_attachments.upload_attachment('tool-output', t.output)
status_update.update(url=attachment.absolute_url,
url_text='Tool console output')
try:
if len(review.comments) == 0:
|
else:
logger.info('Publishing review %s', log_detail)
review_id = review.publish().id
status_update.update(state=DONE_FAILURE,
description='failed.',
review_id=review_id)
except Exception as e:
logger.exception('Error when publishing review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
logger.info('Review completed successfully %s', log_detail)
return True
finally:
cleanup_tempfiles()
@Panel.register
def update_tools_list(panel, payload):
"""Update the list of installed tools.
This will detect the installed analysis tool plugins
and inform Review Board of them.
Args:
panel (celery.worker.control.Panel):
The worker control panel.
payload (dict):
The payload as assembled by the extension.
Returns:
bool:
Whether the task completed successfully.
"""
logger.info('Request to refresh installed tools from "%s"',
payload['url'])
logger.info('Iterating Tools')
tools = []
for ep in pkg_resources.iter_entry_points(group='reviewbot.tools'):
entry_point = ep.name
tool_class = ep.load()
tool = tool_class()
logger.info('Tool: %s' % entry_point)
if tool.check_dependencies():
tools.append({
'name': tool_class.name,
'entry_point': entry_point,
'version': tool_class.version,
'description': tool_class.description,
'tool_options': json.dumps(tool_class.options),
'timeout': tool_class.timeout,
'working_directory_required':
tool_class.working_directory_required,
})
else:
logger.warning('%s dependency check failed.', ep.name)
logger.info('Done iterating Tools')
hostname = panel.hostname
try:
api_client = RBClient(
payload['url'],
cookie_file=COOKIE_FILE,
agent=AGENT,
session=payload['session'])
api_root = api_client.get_root()
except Exception as e:
logger.exception('Could not reach RB server: %s', e)
return {
'status': 'error',
'error': 'Could not reach Review Board server: %s' % e,
}
try:
api_tools = _get_extension_resource(api_root).get_tools()
api_tools.create(hostname=hostname, tools=json.dumps(tools))
except Exception as e:
logger.exception('Problem POSTing tools: %s', e)
return {
'status': 'error',
'error': 'Problem uploading tools: %s' % e,
}
return {
'status': 'ok',
'tools': tools,
}
def _get_extension_resource(api_root):
"""Return the Review Bot extension resource.
Args:
api_root (rbtools.api.resource.Resource):
The server API root.
Returns:
rbtools.api.resource.Resource:
The extension's API resource.
"""
# TODO: Cache this. We only use this resource as a link to sub-resources.
return api_root.get_extension(
extension_name='reviewbotext.extension.ReviewBotExtension')
| status_update.update(state=DONE_SUCCESS,
description='passed.') | conditional_block |
tasks.py | from __future__ import absolute_import, unicode_literals
import json
import pkg_resources
from celery.utils.log import get_task_logger
from celery.worker.control import Panel
from reviewbot.celery import celery |
from reviewbot.processing.review import Review
from reviewbot.repositories import repositories
from reviewbot.utils.filesystem import cleanup_tempfiles
# TODO: Make the cookie file configurable.
COOKIE_FILE = 'reviewbot-cookies.txt'
# TODO: Include version information in the agent.
AGENT = 'ReviewBot'
# Status Update states
PENDING = 'pending'
DONE_SUCCESS = 'done-success'
DONE_FAILURE = 'done-failure'
ERROR = 'error'
logger = get_task_logger(__name__)
@celery.task(ignore_result=True)
def RunTool(server_url='',
session='',
username='',
review_request_id=-1,
diff_revision=-1,
status_update_id=-1,
review_settings={},
tool_options={},
repository_name='',
base_commit_id='',
*args, **kwargs):
"""Execute an automated review on a review request.
Args:
server_url (unicode):
The URL of the Review Board server.
session (unicode):
The encoded session identifier.
username (unicode):
The name of the user who owns the ``session``.
review_request_id (int):
The ID of the review request being reviewed (ID for use in the
API, which is the "display_id" field).
diff_revision (int):
The ID of the diff revision being reviewed.
status_update_id (int):
The ID of the status update for this invocation of the tool.
review_settings (dict):
Settings for how the review should be created.
tool_options (dict):
The tool-specific settings.
repository_name (unicode):
The name of the repository to clone to run the tool, if the tool
requires full working directory access.
base_commit_id (unicode):
The ID of the commit that the patch should be applied to.
args (tuple):
Any additional positional arguments (perhaps used by a newer
version of the Review Bot extension).
kwargs (dict):
Any additional keyword arguments (perhaps used by a newer version
of the Review Bot extension).
Returns:
bool:
Whether the task completed successfully.
"""
try:
routing_key = RunTool.request.delivery_info['routing_key']
route_parts = routing_key.partition('.')
tool_name = route_parts[0]
log_detail = ('(server=%s, review_request_id=%s, diff_revision=%s)'
% (server_url, review_request_id, diff_revision))
logger.info('Running tool "%s" %s', tool_name, log_detail)
try:
logger.info('Initializing RB API %s', log_detail)
api_client = RBClient(server_url,
cookie_file=COOKIE_FILE,
agent=AGENT,
session=session)
api_root = api_client.get_root()
except Exception as e:
logger.error('Could not contact Review Board server: %s %s',
e, log_detail)
return False
logger.info('Loading requested tool "%s" %s', tool_name, log_detail)
tools = [
entrypoint.load()
for entrypoint in pkg_resources.iter_entry_points(
group='reviewbot.tools', name=tool_name)
]
if len(tools) == 0:
logger.error('Tool "%s" not found %s', tool_name, log_detail)
return False
elif len(tools) > 1:
logger.error('Tool "%s" is ambiguous (found %s) %s',
tool_name, ', '.join(tool.name for tool in tools),
log_detail)
return False
else:
tool = tools[0]
repository = None
try:
logger.info('Creating status update %s', log_detail)
status_update = api_root.get_status_update(
review_request_id=review_request_id,
status_update_id=status_update_id)
except Exception as e:
logger.exception('Unable to create status update: %s %s',
e, log_detail)
return False
if tool.working_directory_required:
if not base_commit_id:
logger.error('Working directory is required but the diffset '
'has no base_commit_id %s', log_detail)
status_update.update(
state=ERROR,
description='Diff does not include parent commit '
'information.')
return False
try:
repository = repositories[repository_name]
except KeyError:
logger.error('Unable to find configured repository "%s" %s',
repository_name, log_detail)
return False
try:
logger.info('Initializing review %s', log_detail)
review = Review(api_root, review_request_id, diff_revision,
review_settings)
status_update.update(description='running...')
except Exception as e:
logger.exception('Failed to initialize review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Initializing tool "%s %s" %s',
tool.name, tool.version, log_detail)
t = tool()
except Exception as e:
logger.exception('Error initializing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Executing tool "%s" %s', tool.name, log_detail)
t.execute(review, settings=tool_options, repository=repository,
base_commit_id=base_commit_id)
logger.info('Tool "%s" completed successfully %s',
tool.name, log_detail)
except Exception as e:
logger.exception('Error executing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
if t.output:
file_attachments = \
api_root.get_user_file_attachments(username=username)
attachment = \
file_attachments.upload_attachment('tool-output', t.output)
status_update.update(url=attachment.absolute_url,
url_text='Tool console output')
try:
if len(review.comments) == 0:
status_update.update(state=DONE_SUCCESS,
description='passed.')
else:
logger.info('Publishing review %s', log_detail)
review_id = review.publish().id
status_update.update(state=DONE_FAILURE,
description='failed.',
review_id=review_id)
except Exception as e:
logger.exception('Error when publishing review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
logger.info('Review completed successfully %s', log_detail)
return True
finally:
cleanup_tempfiles()
@Panel.register
def update_tools_list(panel, payload):
"""Update the list of installed tools.
This will detect the installed analysis tool plugins
and inform Review Board of them.
Args:
panel (celery.worker.control.Panel):
The worker control panel.
payload (dict):
The payload as assembled by the extension.
Returns:
bool:
Whether the task completed successfully.
"""
logger.info('Request to refresh installed tools from "%s"',
payload['url'])
logger.info('Iterating Tools')
tools = []
for ep in pkg_resources.iter_entry_points(group='reviewbot.tools'):
entry_point = ep.name
tool_class = ep.load()
tool = tool_class()
logger.info('Tool: %s' % entry_point)
if tool.check_dependencies():
tools.append({
'name': tool_class.name,
'entry_point': entry_point,
'version': tool_class.version,
'description': tool_class.description,
'tool_options': json.dumps(tool_class.options),
'timeout': tool_class.timeout,
'working_directory_required':
tool_class.working_directory_required,
})
else:
logger.warning('%s dependency check failed.', ep.name)
logger.info('Done iterating Tools')
hostname = panel.hostname
try:
api_client = RBClient(
payload['url'],
cookie_file=COOKIE_FILE,
agent=AGENT,
session=payload['session'])
api_root = api_client.get_root()
except Exception as e:
logger.exception('Could not reach RB server: %s', e)
return {
'status': 'error',
'error': 'Could not reach Review Board server: %s' % e,
}
try:
api_tools = _get_extension_resource(api_root).get_tools()
api_tools.create(hostname=hostname, tools=json.dumps(tools))
except Exception as e:
logger.exception('Problem POSTing tools: %s', e)
return {
'status': 'error',
'error': 'Problem uploading tools: %s' % e,
}
return {
'status': 'ok',
'tools': tools,
}
def _get_extension_resource(api_root):
"""Return the Review Bot extension resource.
Args:
api_root (rbtools.api.resource.Resource):
The server API root.
Returns:
rbtools.api.resource.Resource:
The extension's API resource.
"""
# TODO: Cache this. We only use this resource as a link to sub-resources.
return api_root.get_extension(
extension_name='reviewbotext.extension.ReviewBotExtension') | from rbtools.api.client import RBClient | random_line_split |
mod.rs | use std::collections::HashMap;
use std::borrow::Cow;
use chrono::{DateTime, Utc, NaiveDateTime};
use log;
pub use self::chunked_message::{ChunkSize, ChunkedMessage};
pub use self::compression::MessageCompression;
pub use self::wire_message::WireMessage;
use crate::{Level, util, Error};
use crate::errors::Result;
use serde::de;
use serde::de::Deserialize;
use serde_with::with_prefix;
mod chunked_message;
mod compression;
mod wire_message;
/// Message is thre representation of a GELF message.
///
/// `Message` provides a fluid setter and getter interface to all of GELF's
/// features. Only the `host`-field is not available. It is managed the
/// `Logger`.
///
/// A `Message` can also be constructed from a `log::LogRecord`. All
/// available metadata is transferred over to the message object.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct Message<'a> {
short_message: Cow<'a, str>,
full_message: Option<Cow<'a, str>>,
#[serde(deserialize_with = "parse_unix_seconds")]
timestamp: Option<DateTime<Utc>>,
level: Level,
#[serde(flatten, with = "prefix_metadata")]
metadata: HashMap<Cow<'a, str>, Cow<'a, str>>,
}
impl<'a> Message<'a> {
/// Construct a new log message.
///
/// All fields will use their defaults. This means usually Option::None.
/// A notable exception is `level`. The GELF spec requires this field to
/// default to Level::Alert.
pub fn new<S>(
short_message: S,
) -> Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
Self::new_with_level(short_message, Level::Alert)
}
/// Construct a new log message with a defined level
///
/// All fields will use their defaults. This means usually Option::None.
pub fn new_with_level<S>(
short_message: S,
level: Level,
) -> Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
Message {
short_message: short_message.into(),
level,
full_message: None,
timestamp: None,
metadata: HashMap::new(),
}
}
/// Return the `short_message`
pub fn short_message(&self) -> &Cow<'a, str> {
&self.short_message
}
/// Set the `short_message`
pub fn set_short_message<S>(
&mut self,
msg: S
) -> &mut Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
self.short_message = msg.into();
self
}
/// Return the `full_message`
pub fn full_message(&self) -> &Option<Cow<'a, str>> {
&self.full_message
}
/// Set the `full_message`
pub fn set_full_message<S>(
&mut self,
msg: S
) -> &mut Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
self.full_message = Some(msg.into());
self
}
// Clear the `full_message`
pub fn clear_full_message(&mut self) -> &mut Self {
self.full_message = None;
self
}
/// Return the `timestamp`
pub fn timestamp(&self) -> &Option<DateTime<Utc>> {
&self.timestamp
}
/// Set the `timestamp`
pub fn set_timestamp(&mut self, ts: DateTime<Utc>) -> &mut Self {
self.timestamp = Some(ts);
self
}
/// Clear the `timestamp`
pub fn clear_timestamp(&mut self) -> &mut Self {
self.timestamp = None;
self
}
/// Return the `level`
pub fn level(&self) -> Level {
self.level
}
/// Set the `level`
pub fn set_level(&mut self, level: Level) -> &mut Self |
/// Return a metadata field with given key
pub fn metadata(&self, key: &'a str) -> Option<&Cow<'a, str>> {
self.metadata.get(key)
}
/// Return all metadata
pub fn all_metadata(&self) -> &HashMap<Cow<'a, str>, Cow<'a, str>> {
&self.metadata
}
/// Set a metadata field with given key to value
pub fn set_metadata<S, T>(
&mut self,
key: S,
value: T,
) -> Result<&mut Self>
where
S: Into<Cow<'a, str>> + AsRef<str>,
T: Into<Cow<'a, str>> + AsRef<str>,
{
let key = key.into();
if key == "id" {
return Err(Error::IllegalNameForAdditional { name: key.into() }.into());
}
self.metadata.insert(key, value.into());
Ok(self)
}
}
impl<'a> From<&'a log::Record<'a>> for Message<'a> {
/// Create a `Message` from given `log::LogRecord` including all metadata
fn from(record: &'a log::Record) -> Message<'a> {
// Create message with given text and level
let short_message = format!("{}", record.args());
let mut msg = Message::new_with_level(
short_message,
record.level().into(),
);
msg.set_timestamp(Utc::now());
// Add default metadata, and ignore the results (`let _ = ...`) as all keys are valid
// and set_metadata only fails on invalid keys
let _ = msg.set_metadata("file", record.file().unwrap_or("(none)").to_string());
let _ = msg.set_metadata("line", record.line().map(|v| v.to_string()).unwrap_or_else(|| "(none)".into()));
let _ = msg.set_metadata("module_path", record.module_path().unwrap_or("(none)").to_string());
let _ = msg.set_metadata("process_id", util::pid().to_string());
msg
}
}
with_prefix!(prefix_metadata "_");
fn parse_unix_seconds<'de, D>(d: D) -> std::result::Result<Option<DateTime<Utc>>, D::Error>
where D: de::Deserializer<'de>
{
let value: Option<f64> = Deserialize::deserialize(d)?;
let value = match value {
Some(v) => v,
None => return Ok(None)
};
let seconds = value.trunc() as i64;
let nsecs = (value.fract() * 1_000_000_000_f64).abs() as u32;
let ndt = NaiveDateTime::from_timestamp_opt(seconds, nsecs);
if let Some(ndt) = ndt {
Ok(Some(DateTime::<Utc>::from_utc(ndt, Utc)))
} else {
Err(de::Error::custom(format!(
"Invalid or out of range value '{}' for DateTime",
value
)))
}
}
#[cfg(test)]
mod test {
use super::*;
use rand::{thread_rng, Rng};
use rand::distributions::{Alphanumeric, Uniform};
use serde_json::de::SliceRead;
use serde_json::StreamDeserializer;
use chrono::Timelike;
fn random_message() -> Message<'static> {
let short_message: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(100)
.collect();
let full_message: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(200)
.collect();
let mut rng = thread_rng();
let int = rng.sample::<i64, _>(Uniform::new_inclusive(0, 7));
let mut message = Message::new(short_message);
message.set_full_message(full_message);
message.set_level(Level::from(int));
random_metadata().into_iter().for_each(|pair| {
message.set_metadata(pair.0, pair.1).unwrap();
});
message
}
fn random_metadata() -> HashMap<String, String> {
let mut rng = thread_rng();
let int = rng.sample::<usize, _>(Uniform::new_inclusive(5, 30));
std::iter::repeat_with(|| {
let value: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(200)
.collect();
let key: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(10)
.collect();
(key, value)
}).take(int)
.fold(HashMap::new(), |mut acc, m| {
acc.insert(m.0, m.1);
acc
})
}
fn random_messages(amount: usize) -> impl Iterator<Item=Message<'static>> {
std::iter::repeat_with(random_message).take(amount)
}
#[test]
fn test_deserialize_valid_json() {
let message = random_message();
let input = serde_json::to_string(&message).unwrap();
let actual_message: Message = serde_json::from_str(input.as_str()).expect("No erro parsing");
assert_eq!(actual_message.short_message, message.short_message);
assert_eq!(actual_message.full_message, message.full_message);
assert_eq!(actual_message.timestamp, message.timestamp);
assert_eq!(actual_message.metadata, message.metadata);
assert_eq!(actual_message.level, message.level);
}
#[test]
fn test_deserialize_multiple_valid_jsons() {
let messages = random_messages(10).collect::<Vec<Message>>();
let input = messages.clone().into_iter()
.map(|m| serde_json::to_string(&m).unwrap())
.fold(String::new(), |mut acc, v| {
acc.push_str(v.as_str());
acc
});
let read = SliceRead::new(input.as_bytes());
let mut stream: StreamDeserializer<SliceRead, Message> = serde_json::StreamDeserializer::new(read);
let mut actual_parsed: Vec<Message> = vec![];
while let Some(m) = stream.next() {
actual_parsed.push(m.unwrap());
}
assert_eq!(actual_parsed, messages);
assert_eq!(stream.byte_offset(), input.len());
}
#[test]
fn test_parse_timestamp_json() {
let raw_message = r#"
{"version": "1.1",
"short_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel",
"full_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n",
"timestamp": 1578669969.108120000,
"level": 6,
"_thread_name": "Thread-11",
"_logger_name": "org.springframework.integration.endpoint.EventDrivenConsumer"}
"#;
let actual_message: Message = serde_json::from_str(raw_message).expect("Parse with success");
let actual_timestamp = actual_message.timestamp().as_ref().expect("Timestamp");
assert_eq!(actual_timestamp.timestamp(), 1_578_669_969);
assert!(actual_timestamp.nanosecond() < 108_120_000);
assert_eq!(actual_message.full_message().as_ref().expect("Full Message"), "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n");
assert_eq!(actual_message.level(), Level::Informational);
assert_eq!(actual_message.metadata("thread_name").expect("thread name"), "Thread-11");
assert_eq!(actual_message.metadata("logger_name").expect("logger name"), "org.springframework.integration.endpoint.EventDrivenConsumer");
}
}
| {
self.level = level;
self
} | identifier_body |
mod.rs | use std::collections::HashMap;
use std::borrow::Cow;
use chrono::{DateTime, Utc, NaiveDateTime};
use log;
pub use self::chunked_message::{ChunkSize, ChunkedMessage};
pub use self::compression::MessageCompression;
pub use self::wire_message::WireMessage;
use crate::{Level, util, Error};
use crate::errors::Result;
use serde::de;
use serde::de::Deserialize;
use serde_with::with_prefix;
mod chunked_message;
mod compression;
mod wire_message;
/// Message is thre representation of a GELF message.
///
/// `Message` provides a fluid setter and getter interface to all of GELF's
/// features. Only the `host`-field is not available. It is managed the
/// `Logger`.
///
/// A `Message` can also be constructed from a `log::LogRecord`. All
/// available metadata is transferred over to the message object.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct Message<'a> {
short_message: Cow<'a, str>,
full_message: Option<Cow<'a, str>>,
#[serde(deserialize_with = "parse_unix_seconds")]
timestamp: Option<DateTime<Utc>>,
level: Level,
#[serde(flatten, with = "prefix_metadata")]
metadata: HashMap<Cow<'a, str>, Cow<'a, str>>,
}
impl<'a> Message<'a> {
/// Construct a new log message.
///
/// All fields will use their defaults. This means usually Option::None.
/// A notable exception is `level`. The GELF spec requires this field to
/// default to Level::Alert.
pub fn new<S>(
short_message: S,
) -> Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
Self::new_with_level(short_message, Level::Alert)
}
/// Construct a new log message with a defined level
///
/// All fields will use their defaults. This means usually Option::None.
pub fn | <S>(
short_message: S,
level: Level,
) -> Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
Message {
short_message: short_message.into(),
level,
full_message: None,
timestamp: None,
metadata: HashMap::new(),
}
}
/// Return the `short_message`
pub fn short_message(&self) -> &Cow<'a, str> {
&self.short_message
}
/// Set the `short_message`
pub fn set_short_message<S>(
&mut self,
msg: S
) -> &mut Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
self.short_message = msg.into();
self
}
/// Return the `full_message`
pub fn full_message(&self) -> &Option<Cow<'a, str>> {
&self.full_message
}
/// Set the `full_message`
pub fn set_full_message<S>(
&mut self,
msg: S
) -> &mut Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
self.full_message = Some(msg.into());
self
}
// Clear the `full_message`
pub fn clear_full_message(&mut self) -> &mut Self {
self.full_message = None;
self
}
/// Return the `timestamp`
pub fn timestamp(&self) -> &Option<DateTime<Utc>> {
&self.timestamp
}
/// Set the `timestamp`
pub fn set_timestamp(&mut self, ts: DateTime<Utc>) -> &mut Self {
self.timestamp = Some(ts);
self
}
/// Clear the `timestamp`
pub fn clear_timestamp(&mut self) -> &mut Self {
self.timestamp = None;
self
}
/// Return the `level`
pub fn level(&self) -> Level {
self.level
}
/// Set the `level`
pub fn set_level(&mut self, level: Level) -> &mut Self {
self.level = level;
self
}
/// Return a metadata field with given key
pub fn metadata(&self, key: &'a str) -> Option<&Cow<'a, str>> {
self.metadata.get(key)
}
/// Return all metadata
pub fn all_metadata(&self) -> &HashMap<Cow<'a, str>, Cow<'a, str>> {
&self.metadata
}
/// Set a metadata field with given key to value
pub fn set_metadata<S, T>(
&mut self,
key: S,
value: T,
) -> Result<&mut Self>
where
S: Into<Cow<'a, str>> + AsRef<str>,
T: Into<Cow<'a, str>> + AsRef<str>,
{
let key = key.into();
if key == "id" {
return Err(Error::IllegalNameForAdditional { name: key.into() }.into());
}
self.metadata.insert(key, value.into());
Ok(self)
}
}
impl<'a> From<&'a log::Record<'a>> for Message<'a> {
/// Create a `Message` from given `log::LogRecord` including all metadata
fn from(record: &'a log::Record) -> Message<'a> {
// Create message with given text and level
let short_message = format!("{}", record.args());
let mut msg = Message::new_with_level(
short_message,
record.level().into(),
);
msg.set_timestamp(Utc::now());
// Add default metadata, and ignore the results (`let _ = ...`) as all keys are valid
// and set_metadata only fails on invalid keys
let _ = msg.set_metadata("file", record.file().unwrap_or("(none)").to_string());
let _ = msg.set_metadata("line", record.line().map(|v| v.to_string()).unwrap_or_else(|| "(none)".into()));
let _ = msg.set_metadata("module_path", record.module_path().unwrap_or("(none)").to_string());
let _ = msg.set_metadata("process_id", util::pid().to_string());
msg
}
}
with_prefix!(prefix_metadata "_");
fn parse_unix_seconds<'de, D>(d: D) -> std::result::Result<Option<DateTime<Utc>>, D::Error>
where D: de::Deserializer<'de>
{
let value: Option<f64> = Deserialize::deserialize(d)?;
let value = match value {
Some(v) => v,
None => return Ok(None)
};
let seconds = value.trunc() as i64;
let nsecs = (value.fract() * 1_000_000_000_f64).abs() as u32;
let ndt = NaiveDateTime::from_timestamp_opt(seconds, nsecs);
if let Some(ndt) = ndt {
Ok(Some(DateTime::<Utc>::from_utc(ndt, Utc)))
} else {
Err(de::Error::custom(format!(
"Invalid or out of range value '{}' for DateTime",
value
)))
}
}
#[cfg(test)]
mod test {
use super::*;
use rand::{thread_rng, Rng};
use rand::distributions::{Alphanumeric, Uniform};
use serde_json::de::SliceRead;
use serde_json::StreamDeserializer;
use chrono::Timelike;
fn random_message() -> Message<'static> {
let short_message: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(100)
.collect();
let full_message: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(200)
.collect();
let mut rng = thread_rng();
let int = rng.sample::<i64, _>(Uniform::new_inclusive(0, 7));
let mut message = Message::new(short_message);
message.set_full_message(full_message);
message.set_level(Level::from(int));
random_metadata().into_iter().for_each(|pair| {
message.set_metadata(pair.0, pair.1).unwrap();
});
message
}
fn random_metadata() -> HashMap<String, String> {
let mut rng = thread_rng();
let int = rng.sample::<usize, _>(Uniform::new_inclusive(5, 30));
std::iter::repeat_with(|| {
let value: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(200)
.collect();
let key: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(10)
.collect();
(key, value)
}).take(int)
.fold(HashMap::new(), |mut acc, m| {
acc.insert(m.0, m.1);
acc
})
}
fn random_messages(amount: usize) -> impl Iterator<Item=Message<'static>> {
std::iter::repeat_with(random_message).take(amount)
}
#[test]
fn test_deserialize_valid_json() {
let message = random_message();
let input = serde_json::to_string(&message).unwrap();
let actual_message: Message = serde_json::from_str(input.as_str()).expect("No erro parsing");
assert_eq!(actual_message.short_message, message.short_message);
assert_eq!(actual_message.full_message, message.full_message);
assert_eq!(actual_message.timestamp, message.timestamp);
assert_eq!(actual_message.metadata, message.metadata);
assert_eq!(actual_message.level, message.level);
}
#[test]
fn test_deserialize_multiple_valid_jsons() {
let messages = random_messages(10).collect::<Vec<Message>>();
let input = messages.clone().into_iter()
.map(|m| serde_json::to_string(&m).unwrap())
.fold(String::new(), |mut acc, v| {
acc.push_str(v.as_str());
acc
});
let read = SliceRead::new(input.as_bytes());
let mut stream: StreamDeserializer<SliceRead, Message> = serde_json::StreamDeserializer::new(read);
let mut actual_parsed: Vec<Message> = vec![];
while let Some(m) = stream.next() {
actual_parsed.push(m.unwrap());
}
assert_eq!(actual_parsed, messages);
assert_eq!(stream.byte_offset(), input.len());
}
#[test]
fn test_parse_timestamp_json() {
let raw_message = r#"
{"version": "1.1",
"short_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel",
"full_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n",
"timestamp": 1578669969.108120000,
"level": 6,
"_thread_name": "Thread-11",
"_logger_name": "org.springframework.integration.endpoint.EventDrivenConsumer"}
"#;
let actual_message: Message = serde_json::from_str(raw_message).expect("Parse with success");
let actual_timestamp = actual_message.timestamp().as_ref().expect("Timestamp");
assert_eq!(actual_timestamp.timestamp(), 1_578_669_969);
assert!(actual_timestamp.nanosecond() < 108_120_000);
assert_eq!(actual_message.full_message().as_ref().expect("Full Message"), "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n");
assert_eq!(actual_message.level(), Level::Informational);
assert_eq!(actual_message.metadata("thread_name").expect("thread name"), "Thread-11");
assert_eq!(actual_message.metadata("logger_name").expect("logger name"), "org.springframework.integration.endpoint.EventDrivenConsumer");
}
}
| new_with_level | identifier_name |
mod.rs | use std::collections::HashMap;
use std::borrow::Cow;
use chrono::{DateTime, Utc, NaiveDateTime};
use log;
pub use self::chunked_message::{ChunkSize, ChunkedMessage};
pub use self::compression::MessageCompression;
pub use self::wire_message::WireMessage;
use crate::{Level, util, Error};
use crate::errors::Result;
use serde::de;
use serde::de::Deserialize;
use serde_with::with_prefix;
mod chunked_message;
mod compression;
mod wire_message;
/// Message is thre representation of a GELF message.
///
/// `Message` provides a fluid setter and getter interface to all of GELF's
/// features. Only the `host`-field is not available. It is managed the
/// `Logger`.
///
/// A `Message` can also be constructed from a `log::LogRecord`. All
/// available metadata is transferred over to the message object.
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct Message<'a> {
short_message: Cow<'a, str>,
full_message: Option<Cow<'a, str>>,
#[serde(deserialize_with = "parse_unix_seconds")]
timestamp: Option<DateTime<Utc>>,
level: Level,
#[serde(flatten, with = "prefix_metadata")]
metadata: HashMap<Cow<'a, str>, Cow<'a, str>>,
}
impl<'a> Message<'a> {
/// Construct a new log message.
///
/// All fields will use their defaults. This means usually Option::None.
/// A notable exception is `level`. The GELF spec requires this field to
/// default to Level::Alert.
pub fn new<S>(
short_message: S,
) -> Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
Self::new_with_level(short_message, Level::Alert)
}
/// Construct a new log message with a defined level
///
/// All fields will use their defaults. This means usually Option::None.
pub fn new_with_level<S>(
short_message: S,
level: Level,
) -> Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
Message {
short_message: short_message.into(),
level,
full_message: None,
timestamp: None,
metadata: HashMap::new(),
}
}
/// Return the `short_message`
pub fn short_message(&self) -> &Cow<'a, str> {
&self.short_message
}
/// Set the `short_message`
pub fn set_short_message<S>(
&mut self,
msg: S
) -> &mut Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
self.short_message = msg.into();
self
}
/// Return the `full_message`
pub fn full_message(&self) -> &Option<Cow<'a, str>> {
&self.full_message
}
/// Set the `full_message`
pub fn set_full_message<S>(
&mut self,
msg: S
) -> &mut Self
where
S: Into<Cow<'a, str>> + AsRef<str>
{
self.full_message = Some(msg.into());
self
}
// Clear the `full_message`
pub fn clear_full_message(&mut self) -> &mut Self {
self.full_message = None;
self
}
/// Return the `timestamp`
pub fn timestamp(&self) -> &Option<DateTime<Utc>> {
&self.timestamp
}
/// Set the `timestamp`
pub fn set_timestamp(&mut self, ts: DateTime<Utc>) -> &mut Self {
self.timestamp = Some(ts);
self
} | self
}
/// Return the `level`
pub fn level(&self) -> Level {
self.level
}
/// Set the `level`
pub fn set_level(&mut self, level: Level) -> &mut Self {
self.level = level;
self
}
/// Return a metadata field with given key
pub fn metadata(&self, key: &'a str) -> Option<&Cow<'a, str>> {
self.metadata.get(key)
}
/// Return all metadata
pub fn all_metadata(&self) -> &HashMap<Cow<'a, str>, Cow<'a, str>> {
&self.metadata
}
/// Set a metadata field with given key to value
pub fn set_metadata<S, T>(
&mut self,
key: S,
value: T,
) -> Result<&mut Self>
where
S: Into<Cow<'a, str>> + AsRef<str>,
T: Into<Cow<'a, str>> + AsRef<str>,
{
let key = key.into();
if key == "id" {
return Err(Error::IllegalNameForAdditional { name: key.into() }.into());
}
self.metadata.insert(key, value.into());
Ok(self)
}
}
impl<'a> From<&'a log::Record<'a>> for Message<'a> {
/// Create a `Message` from given `log::LogRecord` including all metadata
fn from(record: &'a log::Record) -> Message<'a> {
// Create message with given text and level
let short_message = format!("{}", record.args());
let mut msg = Message::new_with_level(
short_message,
record.level().into(),
);
msg.set_timestamp(Utc::now());
// Add default metadata, and ignore the results (`let _ = ...`) as all keys are valid
// and set_metadata only fails on invalid keys
let _ = msg.set_metadata("file", record.file().unwrap_or("(none)").to_string());
let _ = msg.set_metadata("line", record.line().map(|v| v.to_string()).unwrap_or_else(|| "(none)".into()));
let _ = msg.set_metadata("module_path", record.module_path().unwrap_or("(none)").to_string());
let _ = msg.set_metadata("process_id", util::pid().to_string());
msg
}
}
with_prefix!(prefix_metadata "_");
fn parse_unix_seconds<'de, D>(d: D) -> std::result::Result<Option<DateTime<Utc>>, D::Error>
where D: de::Deserializer<'de>
{
let value: Option<f64> = Deserialize::deserialize(d)?;
let value = match value {
Some(v) => v,
None => return Ok(None)
};
let seconds = value.trunc() as i64;
let nsecs = (value.fract() * 1_000_000_000_f64).abs() as u32;
let ndt = NaiveDateTime::from_timestamp_opt(seconds, nsecs);
if let Some(ndt) = ndt {
Ok(Some(DateTime::<Utc>::from_utc(ndt, Utc)))
} else {
Err(de::Error::custom(format!(
"Invalid or out of range value '{}' for DateTime",
value
)))
}
}
#[cfg(test)]
mod test {
use super::*;
use rand::{thread_rng, Rng};
use rand::distributions::{Alphanumeric, Uniform};
use serde_json::de::SliceRead;
use serde_json::StreamDeserializer;
use chrono::Timelike;
fn random_message() -> Message<'static> {
let short_message: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(100)
.collect();
let full_message: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(200)
.collect();
let mut rng = thread_rng();
let int = rng.sample::<i64, _>(Uniform::new_inclusive(0, 7));
let mut message = Message::new(short_message);
message.set_full_message(full_message);
message.set_level(Level::from(int));
random_metadata().into_iter().for_each(|pair| {
message.set_metadata(pair.0, pair.1).unwrap();
});
message
}
fn random_metadata() -> HashMap<String, String> {
let mut rng = thread_rng();
let int = rng.sample::<usize, _>(Uniform::new_inclusive(5, 30));
std::iter::repeat_with(|| {
let value: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(200)
.collect();
let key: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(10)
.collect();
(key, value)
}).take(int)
.fold(HashMap::new(), |mut acc, m| {
acc.insert(m.0, m.1);
acc
})
}
fn random_messages(amount: usize) -> impl Iterator<Item=Message<'static>> {
std::iter::repeat_with(random_message).take(amount)
}
#[test]
fn test_deserialize_valid_json() {
let message = random_message();
let input = serde_json::to_string(&message).unwrap();
let actual_message: Message = serde_json::from_str(input.as_str()).expect("No erro parsing");
assert_eq!(actual_message.short_message, message.short_message);
assert_eq!(actual_message.full_message, message.full_message);
assert_eq!(actual_message.timestamp, message.timestamp);
assert_eq!(actual_message.metadata, message.metadata);
assert_eq!(actual_message.level, message.level);
}
#[test]
fn test_deserialize_multiple_valid_jsons() {
let messages = random_messages(10).collect::<Vec<Message>>();
let input = messages.clone().into_iter()
.map(|m| serde_json::to_string(&m).unwrap())
.fold(String::new(), |mut acc, v| {
acc.push_str(v.as_str());
acc
});
let read = SliceRead::new(input.as_bytes());
let mut stream: StreamDeserializer<SliceRead, Message> = serde_json::StreamDeserializer::new(read);
let mut actual_parsed: Vec<Message> = vec![];
while let Some(m) = stream.next() {
actual_parsed.push(m.unwrap());
}
assert_eq!(actual_parsed, messages);
assert_eq!(stream.byte_offset(), input.len());
}
#[test]
fn test_parse_timestamp_json() {
let raw_message = r#"
{"version": "1.1",
"short_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel",
"full_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n",
"timestamp": 1578669969.108120000,
"level": 6,
"_thread_name": "Thread-11",
"_logger_name": "org.springframework.integration.endpoint.EventDrivenConsumer"}
"#;
let actual_message: Message = serde_json::from_str(raw_message).expect("Parse with success");
let actual_timestamp = actual_message.timestamp().as_ref().expect("Timestamp");
assert_eq!(actual_timestamp.timestamp(), 1_578_669_969);
assert!(actual_timestamp.nanosecond() < 108_120_000);
assert_eq!(actual_message.full_message().as_ref().expect("Full Message"), "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n");
assert_eq!(actual_message.level(), Level::Informational);
assert_eq!(actual_message.metadata("thread_name").expect("thread name"), "Thread-11");
assert_eq!(actual_message.metadata("logger_name").expect("logger name"), "org.springframework.integration.endpoint.EventDrivenConsumer");
}
} |
/// Clear the `timestamp`
pub fn clear_timestamp(&mut self) -> &mut Self {
self.timestamp = None; | random_line_split |
manifest.py | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Create and edit manifest for session contents"""
from future_builtins import filter
import re
from duplicity import globals
from duplicity import log
from duplicity import globals
from duplicity import util
|
class Manifest:
"""
List of volumes and information about each one
"""
def __init__(self, fh=None):
"""
Create blank Manifest
@param fh: fileobj for manifest
@type fh: DupPath
@rtype: Manifest
@return: manifest
"""
self.hostname = None
self.local_dirname = None
self.volume_info_dict = {} # dictionary vol numbers -> vol infos
self.fh = fh
self.files_changed = []
def set_dirinfo(self):
"""
Set information about directory from globals,
and write to manifest file.
@rtype: Manifest
@return: manifest
"""
self.hostname = globals.hostname
self.local_dirname = globals.local_path.name # @UndefinedVariable
if self.fh:
if self.hostname:
self.fh.write("Hostname %s\n" % self.hostname)
if self.local_dirname:
self.fh.write("Localdir %s\n" % Quote(self.local_dirname))
return self
def check_dirinfo(self):
"""
Return None if dirinfo is the same, otherwise error message
Does not raise an error message if hostname or local_dirname
are not available.
@rtype: string
@return: None or error message
"""
if globals.allow_source_mismatch:
return
if self.hostname and self.hostname != globals.hostname:
errmsg = _("Fatal Error: Backup source host has changed.\n"
"Current hostname: %s\n"
"Previous hostname: %s") % (globals.hostname, self.hostname)
code = log.ErrorCode.hostname_mismatch
code_extra = "%s %s" % (util.escape(globals.hostname), util.escape(self.hostname))
elif (self.local_dirname and self.local_dirname != globals.local_path.name): # @UndefinedVariable
errmsg = _("Fatal Error: Backup source directory has changed.\n"
"Current directory: %s\n"
"Previous directory: %s") % (globals.local_path.name, self.local_dirname) # @UndefinedVariable
code = log.ErrorCode.source_dir_mismatch
code_extra = "%s %s" % (util.escape(globals.local_path.name),
util.escape(self.local_dirname)) # @UndefinedVariable
else:
return
log.FatalError(errmsg + "\n\n" +
_("Aborting because you may have accidentally tried to "
"backup two different data sets to the same remote "
"location, or using the same archive directory. If "
"this is not a mistake, use the "
"--allow-source-mismatch switch to avoid seeing this "
"message"), code, code_extra)
def set_files_changed_info(self, files_changed):
if files_changed:
self.files_changed = files_changed
if self.fh:
self.fh.write("Filelist %d\n" % len(self.files_changed))
for fileinfo in self.files_changed:
self.fh.write(" %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0])))
def add_volume_info(self, vi):
"""
Add volume info vi to manifest and write to manifest
@param vi: volume info to add
@type vi: VolumeInfo
@return: void
"""
vol_num = vi.volume_number
self.volume_info_dict[vol_num] = vi
if self.fh:
self.fh.write(vi.to_string() + "\n")
def del_volume_info(self, vol_num):
"""
Remove volume vol_num from the manifest
@param vol_num: volume number to delete
@type vi: int
@return: void
"""
try:
del self.volume_info_dict[vol_num]
except Exception:
raise ManifestError("Volume %d not present in manifest" % (vol_num,))
def to_string(self):
"""
Return string version of self (just concatenate vi strings)
@rtype: string
@return: self in string form
"""
result = ""
if self.hostname:
result += "Hostname %s\n" % self.hostname
if self.local_dirname:
result += "Localdir %s\n" % Quote(self.local_dirname)
result += "Filelist %d\n" % len(self.files_changed)
for fileinfo in self.files_changed:
result += " %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0]))
vol_num_list = self.volume_info_dict.keys()
vol_num_list.sort()
def vol_num_to_string(vol_num):
return self.volume_info_dict[vol_num].to_string()
result = "%s%s\n" % (result,
"\n".join(map(vol_num_to_string, vol_num_list)))
return result
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s, return self
"""
def get_field(fieldname):
"""
Return the value of a field by parsing s, or None if no field
"""
m = re.search("(^|\\n)%s\\s(.*?)\n" % fieldname, s, re.I)
if not m:
return None
else:
return Unquote(m.group(2))
self.hostname = get_field("hostname")
self.local_dirname = get_field("localdir")
highest_vol = 0
latest_vol = 0
vi_regexp = re.compile("(?:^|\\n)(volume\\s.*(?:\\n.*)*?)(?=\\nvolume\\s|$)", re.I)
vi_iterator = vi_regexp.finditer(s)
for match in vi_iterator:
vi = VolumeInfo().from_string(match.group(1))
self.add_volume_info(vi)
latest_vol = vi.volume_number
highest_vol = max(highest_vol, latest_vol)
log.Debug(_("Found manifest volume %s") % latest_vol)
# If we restarted after losing some remote volumes, the highest volume
# seen may be higher than the last volume recorded. That is, the
# manifest could contain "vol1, vol2, vol3, vol2." If so, we don't
# want to keep vol3's info.
for i in range(latest_vol + 1, highest_vol + 1):
self.del_volume_info(i)
log.Info(_("Found %s volumes in manifest") % latest_vol)
# Get file changed list - not needed if --file-changed not present
filecount = 0
if globals.file_changed is not None:
filelist_regexp = re.compile("(^|\\n)filelist\\s([0-9]+)\\n(.*?)(\\nvolume\\s|$)", re.I | re.S)
match = filelist_regexp.search(s)
if match:
filecount = int(match.group(2))
if filecount > 0:
def parse_fileinfo(line):
fileinfo = line.strip().split()
return (fileinfo[0], ''.join(fileinfo[1:]))
self.files_changed = list(map(parse_fileinfo, match.group(3).split('\n')))
if filecount != len(self.files_changed):
log.Error(_("Manifest file '%s' is corrupt: File count says %d, File list contains %d" %
(self.fh.base if self.fh else "", filecount, len(self.files_changed))))
self.corrupt_filelist = True
return self
def get_files_changed(self):
return self.files_changed
def __eq__(self, other):
"""
Two manifests are equal if they contain the same volume infos
"""
vi_list1 = self.volume_info_dict.keys()
vi_list1.sort()
vi_list2 = other.volume_info_dict.keys()
vi_list2.sort()
if vi_list1 != vi_list2:
log.Notice(_("Manifests not equal because different volume numbers"))
return False
for i in range(len(vi_list1)):
if not vi_list1[i] == vi_list2[i]:
log.Notice(_("Manifests not equal because volume lists differ"))
return False
if (self.hostname != other.hostname or
self.local_dirname != other.local_dirname):
log.Notice(_("Manifests not equal because hosts or directories differ"))
return False
return True
def __ne__(self, other):
"""
Defines !=. Not doing this always leads to annoying bugs...
"""
return not self.__eq__(other)
def write_to_path(self, path):
"""
Write string version of manifest to given path
"""
assert not path.exists()
fout = path.open("wb")
fout.write(self.to_string())
assert not fout.close()
path.setdata()
def get_containing_volumes(self, index_prefix):
"""
Return list of volume numbers that may contain index_prefix
"""
return filter(lambda vol_num:
self.volume_info_dict[vol_num].contains(index_prefix),
self.volume_info_dict.keys())
class VolumeInfoError(Exception):
"""
Raised when there is a problem initializing a VolumeInfo from string
"""
pass
class VolumeInfo:
"""
Information about a single volume
"""
def __init__(self):
"""VolumeInfo initializer"""
self.volume_number = None
self.start_index = None
self.start_block = None
self.end_index = None
self.end_block = None
self.hashes = {}
def set_info(self, vol_number,
start_index, start_block,
end_index, end_block):
"""
Set essential VolumeInfo information, return self
Call with starting and ending paths stored in the volume. If
a multivol diff gets split between volumes, count it as being
part of both volumes.
"""
self.volume_number = vol_number
self.start_index = start_index
self.start_block = start_block
self.end_index = end_index
self.end_block = end_block
return self
def set_hash(self, hash_name, data):
"""
Set the value of hash hash_name (e.g. "MD5") to data
"""
self.hashes[hash_name] = data
def get_best_hash(self):
"""
Return pair (hash_type, hash_data)
SHA1 is the best hash, and MD5 is the second best hash. None
is returned if no hash is available.
"""
if not self.hashes:
return None
try:
return ("SHA1", self.hashes['SHA1'])
except KeyError:
pass
try:
return ("MD5", self.hashes['MD5'])
except KeyError:
pass
return self.hashes.items()[0]
def to_string(self):
"""
Return nicely formatted string reporting all information
"""
def index_to_string(index):
"""Return printable version of index without any whitespace"""
if index:
s = "/".join(index)
return Quote(s)
else:
return "."
slist = ["Volume %d:" % self.volume_number]
whitespace = " "
slist.append("%sStartingPath %s %s" %
(whitespace, index_to_string(self.start_index), (self.start_block or " ")))
slist.append("%sEndingPath %s %s" %
(whitespace, index_to_string(self.end_index), (self.end_block or " ")))
for key in self.hashes:
slist.append("%sHash %s %s" %
(whitespace, key, self.hashes[key]))
return "\n".join(slist)
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s as created by to_string
"""
def string_to_index(s):
"""
Return tuple index from string
"""
s = Unquote(s)
if s == ".":
return ()
return tuple(s.split("/"))
linelist = s.strip().split("\n")
# Set volume number
m = re.search("^Volume ([0-9]+):", linelist[0], re.I)
if not m:
raise VolumeInfoError("Bad first line '%s'" % (linelist[0],))
self.volume_number = int(m.group(1))
# Set other fields
for line in linelist[1:]:
if not line:
continue
line_split = line.strip().split()
field_name = line_split[0].lower()
other_fields = line_split[1:]
if field_name == "Volume":
log.Warn(_("Warning, found extra Volume identifier"))
break
elif field_name == "startingpath":
self.start_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.start_block = int(other_fields[1])
else:
self.start_block = None
elif field_name == "endingpath":
self.end_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.end_block = int(other_fields[1])
else:
self.end_block = None
elif field_name == "hash":
self.set_hash(other_fields[0], other_fields[1])
if self.start_index is None or self.end_index is None:
raise VolumeInfoError("Start or end index not set")
return self
def __eq__(self, other):
"""
Used in test suite
"""
if not isinstance(other, VolumeInfo):
log.Notice(_("Other is not VolumeInfo"))
return None
if self.volume_number != other.volume_number:
log.Notice(_("Volume numbers don't match"))
return None
if self.start_index != other.start_index:
log.Notice(_("start_indicies don't match"))
return None
if self.end_index != other.end_index:
log.Notice(_("end_index don't match"))
return None
hash_list1 = self.hashes.items()
hash_list1.sort()
hash_list2 = other.hashes.items()
hash_list2.sort()
if hash_list1 != hash_list2:
log.Notice(_("Hashes don't match"))
return None
return 1
def __ne__(self, other):
"""
Defines !=
"""
return not self.__eq__(other)
def contains(self, index_prefix, recursive=1):
"""
Return true if volume might contain index
If recursive is true, then return true if any index starting
with index_prefix could be contained. Otherwise, just check
if index_prefix itself is between starting and ending
indicies.
"""
if recursive:
return (self.start_index[:len(index_prefix)] <=
index_prefix <= self.end_index)
else:
return self.start_index <= index_prefix <= self.end_index
nonnormal_char_re = re.compile("(\\s|[\\\\\"'])")
def Quote(s):
"""
Return quoted version of s safe to put in a manifest or volume info
"""
if not nonnormal_char_re.search(s):
return s # no quoting necessary
slist = []
for char in s:
if nonnormal_char_re.search(char):
slist.append("\\x%02x" % ord(char))
else:
slist.append(char)
return '"%s"' % "".join(slist)
def Unquote(quoted_string):
"""
Return original string from quoted_string produced by above
"""
if not quoted_string[0] == '"' or quoted_string[0] == "'":
return quoted_string
assert quoted_string[0] == quoted_string[-1]
return_list = []
i = 1 # skip initial char
while i < len(quoted_string) - 1:
char = quoted_string[i]
if char == "\\":
# quoted section
assert quoted_string[i + 1] == "x"
return_list.append(chr(int(quoted_string[i + 2:i + 4], 16)))
i += 4
else:
return_list.append(char)
i += 1
return "".join(return_list) | class ManifestError(Exception):
"""
Exception raised when problem with manifest
"""
pass | random_line_split |
manifest.py | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Create and edit manifest for session contents"""
from future_builtins import filter
import re
from duplicity import globals
from duplicity import log
from duplicity import globals
from duplicity import util
class ManifestError(Exception):
"""
Exception raised when problem with manifest
"""
pass
class Manifest:
"""
List of volumes and information about each one
"""
def __init__(self, fh=None):
"""
Create blank Manifest
@param fh: fileobj for manifest
@type fh: DupPath
@rtype: Manifest
@return: manifest
"""
self.hostname = None
self.local_dirname = None
self.volume_info_dict = {} # dictionary vol numbers -> vol infos
self.fh = fh
self.files_changed = []
def set_dirinfo(self):
"""
Set information about directory from globals,
and write to manifest file.
@rtype: Manifest
@return: manifest
"""
self.hostname = globals.hostname
self.local_dirname = globals.local_path.name # @UndefinedVariable
if self.fh:
if self.hostname:
self.fh.write("Hostname %s\n" % self.hostname)
if self.local_dirname:
self.fh.write("Localdir %s\n" % Quote(self.local_dirname))
return self
def check_dirinfo(self):
"""
Return None if dirinfo is the same, otherwise error message
Does not raise an error message if hostname or local_dirname
are not available.
@rtype: string
@return: None or error message
"""
if globals.allow_source_mismatch:
return
if self.hostname and self.hostname != globals.hostname:
errmsg = _("Fatal Error: Backup source host has changed.\n"
"Current hostname: %s\n"
"Previous hostname: %s") % (globals.hostname, self.hostname)
code = log.ErrorCode.hostname_mismatch
code_extra = "%s %s" % (util.escape(globals.hostname), util.escape(self.hostname))
elif (self.local_dirname and self.local_dirname != globals.local_path.name): # @UndefinedVariable
errmsg = _("Fatal Error: Backup source directory has changed.\n"
"Current directory: %s\n"
"Previous directory: %s") % (globals.local_path.name, self.local_dirname) # @UndefinedVariable
code = log.ErrorCode.source_dir_mismatch
code_extra = "%s %s" % (util.escape(globals.local_path.name),
util.escape(self.local_dirname)) # @UndefinedVariable
else:
return
log.FatalError(errmsg + "\n\n" +
_("Aborting because you may have accidentally tried to "
"backup two different data sets to the same remote "
"location, or using the same archive directory. If "
"this is not a mistake, use the "
"--allow-source-mismatch switch to avoid seeing this "
"message"), code, code_extra)
def set_files_changed_info(self, files_changed):
if files_changed:
self.files_changed = files_changed
if self.fh:
self.fh.write("Filelist %d\n" % len(self.files_changed))
for fileinfo in self.files_changed:
self.fh.write(" %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0])))
def add_volume_info(self, vi):
"""
Add volume info vi to manifest and write to manifest
@param vi: volume info to add
@type vi: VolumeInfo
@return: void
"""
vol_num = vi.volume_number
self.volume_info_dict[vol_num] = vi
if self.fh:
self.fh.write(vi.to_string() + "\n")
def del_volume_info(self, vol_num):
"""
Remove volume vol_num from the manifest
@param vol_num: volume number to delete
@type vi: int
@return: void
"""
try:
del self.volume_info_dict[vol_num]
except Exception:
raise ManifestError("Volume %d not present in manifest" % (vol_num,))
def to_string(self):
"""
Return string version of self (just concatenate vi strings)
@rtype: string
@return: self in string form
"""
result = ""
if self.hostname:
result += "Hostname %s\n" % self.hostname
if self.local_dirname:
result += "Localdir %s\n" % Quote(self.local_dirname)
result += "Filelist %d\n" % len(self.files_changed)
for fileinfo in self.files_changed:
result += " %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0]))
vol_num_list = self.volume_info_dict.keys()
vol_num_list.sort()
def vol_num_to_string(vol_num):
return self.volume_info_dict[vol_num].to_string()
result = "%s%s\n" % (result,
"\n".join(map(vol_num_to_string, vol_num_list)))
return result
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s, return self
"""
def get_field(fieldname):
"""
Return the value of a field by parsing s, or None if no field
"""
m = re.search("(^|\\n)%s\\s(.*?)\n" % fieldname, s, re.I)
if not m:
return None
else:
return Unquote(m.group(2))
self.hostname = get_field("hostname")
self.local_dirname = get_field("localdir")
highest_vol = 0
latest_vol = 0
vi_regexp = re.compile("(?:^|\\n)(volume\\s.*(?:\\n.*)*?)(?=\\nvolume\\s|$)", re.I)
vi_iterator = vi_regexp.finditer(s)
for match in vi_iterator:
vi = VolumeInfo().from_string(match.group(1))
self.add_volume_info(vi)
latest_vol = vi.volume_number
highest_vol = max(highest_vol, latest_vol)
log.Debug(_("Found manifest volume %s") % latest_vol)
# If we restarted after losing some remote volumes, the highest volume
# seen may be higher than the last volume recorded. That is, the
# manifest could contain "vol1, vol2, vol3, vol2." If so, we don't
# want to keep vol3's info.
for i in range(latest_vol + 1, highest_vol + 1):
self.del_volume_info(i)
log.Info(_("Found %s volumes in manifest") % latest_vol)
# Get file changed list - not needed if --file-changed not present
filecount = 0
if globals.file_changed is not None:
filelist_regexp = re.compile("(^|\\n)filelist\\s([0-9]+)\\n(.*?)(\\nvolume\\s|$)", re.I | re.S)
match = filelist_regexp.search(s)
if match:
filecount = int(match.group(2))
if filecount > 0:
def parse_fileinfo(line):
fileinfo = line.strip().split()
return (fileinfo[0], ''.join(fileinfo[1:]))
self.files_changed = list(map(parse_fileinfo, match.group(3).split('\n')))
if filecount != len(self.files_changed):
log.Error(_("Manifest file '%s' is corrupt: File count says %d, File list contains %d" %
(self.fh.base if self.fh else "", filecount, len(self.files_changed))))
self.corrupt_filelist = True
return self
def get_files_changed(self):
return self.files_changed
def __eq__(self, other):
"""
Two manifests are equal if they contain the same volume infos
"""
vi_list1 = self.volume_info_dict.keys()
vi_list1.sort()
vi_list2 = other.volume_info_dict.keys()
vi_list2.sort()
if vi_list1 != vi_list2:
log.Notice(_("Manifests not equal because different volume numbers"))
return False
for i in range(len(vi_list1)):
if not vi_list1[i] == vi_list2[i]:
log.Notice(_("Manifests not equal because volume lists differ"))
return False
if (self.hostname != other.hostname or
self.local_dirname != other.local_dirname):
log.Notice(_("Manifests not equal because hosts or directories differ"))
return False
return True
def __ne__(self, other):
"""
Defines !=. Not doing this always leads to annoying bugs...
"""
return not self.__eq__(other)
def write_to_path(self, path):
|
def get_containing_volumes(self, index_prefix):
"""
Return list of volume numbers that may contain index_prefix
"""
return filter(lambda vol_num:
self.volume_info_dict[vol_num].contains(index_prefix),
self.volume_info_dict.keys())
class VolumeInfoError(Exception):
"""
Raised when there is a problem initializing a VolumeInfo from string
"""
pass
class VolumeInfo:
"""
Information about a single volume
"""
def __init__(self):
"""VolumeInfo initializer"""
self.volume_number = None
self.start_index = None
self.start_block = None
self.end_index = None
self.end_block = None
self.hashes = {}
def set_info(self, vol_number,
start_index, start_block,
end_index, end_block):
"""
Set essential VolumeInfo information, return self
Call with starting and ending paths stored in the volume. If
a multivol diff gets split between volumes, count it as being
part of both volumes.
"""
self.volume_number = vol_number
self.start_index = start_index
self.start_block = start_block
self.end_index = end_index
self.end_block = end_block
return self
def set_hash(self, hash_name, data):
"""
Set the value of hash hash_name (e.g. "MD5") to data
"""
self.hashes[hash_name] = data
def get_best_hash(self):
"""
Return pair (hash_type, hash_data)
SHA1 is the best hash, and MD5 is the second best hash. None
is returned if no hash is available.
"""
if not self.hashes:
return None
try:
return ("SHA1", self.hashes['SHA1'])
except KeyError:
pass
try:
return ("MD5", self.hashes['MD5'])
except KeyError:
pass
return self.hashes.items()[0]
def to_string(self):
"""
Return nicely formatted string reporting all information
"""
def index_to_string(index):
"""Return printable version of index without any whitespace"""
if index:
s = "/".join(index)
return Quote(s)
else:
return "."
slist = ["Volume %d:" % self.volume_number]
whitespace = " "
slist.append("%sStartingPath %s %s" %
(whitespace, index_to_string(self.start_index), (self.start_block or " ")))
slist.append("%sEndingPath %s %s" %
(whitespace, index_to_string(self.end_index), (self.end_block or " ")))
for key in self.hashes:
slist.append("%sHash %s %s" %
(whitespace, key, self.hashes[key]))
return "\n".join(slist)
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s as created by to_string
"""
def string_to_index(s):
"""
Return tuple index from string
"""
s = Unquote(s)
if s == ".":
return ()
return tuple(s.split("/"))
linelist = s.strip().split("\n")
# Set volume number
m = re.search("^Volume ([0-9]+):", linelist[0], re.I)
if not m:
raise VolumeInfoError("Bad first line '%s'" % (linelist[0],))
self.volume_number = int(m.group(1))
# Set other fields
for line in linelist[1:]:
if not line:
continue
line_split = line.strip().split()
field_name = line_split[0].lower()
other_fields = line_split[1:]
if field_name == "Volume":
log.Warn(_("Warning, found extra Volume identifier"))
break
elif field_name == "startingpath":
self.start_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.start_block = int(other_fields[1])
else:
self.start_block = None
elif field_name == "endingpath":
self.end_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.end_block = int(other_fields[1])
else:
self.end_block = None
elif field_name == "hash":
self.set_hash(other_fields[0], other_fields[1])
if self.start_index is None or self.end_index is None:
raise VolumeInfoError("Start or end index not set")
return self
def __eq__(self, other):
"""
Used in test suite
"""
if not isinstance(other, VolumeInfo):
log.Notice(_("Other is not VolumeInfo"))
return None
if self.volume_number != other.volume_number:
log.Notice(_("Volume numbers don't match"))
return None
if self.start_index != other.start_index:
log.Notice(_("start_indicies don't match"))
return None
if self.end_index != other.end_index:
log.Notice(_("end_index don't match"))
return None
hash_list1 = self.hashes.items()
hash_list1.sort()
hash_list2 = other.hashes.items()
hash_list2.sort()
if hash_list1 != hash_list2:
log.Notice(_("Hashes don't match"))
return None
return 1
def __ne__(self, other):
"""
Defines !=
"""
return not self.__eq__(other)
def contains(self, index_prefix, recursive=1):
"""
Return true if volume might contain index
If recursive is true, then return true if any index starting
with index_prefix could be contained. Otherwise, just check
if index_prefix itself is between starting and ending
indicies.
"""
if recursive:
return (self.start_index[:len(index_prefix)] <=
index_prefix <= self.end_index)
else:
return self.start_index <= index_prefix <= self.end_index
nonnormal_char_re = re.compile("(\\s|[\\\\\"'])")
def Quote(s):
"""
Return quoted version of s safe to put in a manifest or volume info
"""
if not nonnormal_char_re.search(s):
return s # no quoting necessary
slist = []
for char in s:
if nonnormal_char_re.search(char):
slist.append("\\x%02x" % ord(char))
else:
slist.append(char)
return '"%s"' % "".join(slist)
def Unquote(quoted_string):
"""
Return original string from quoted_string produced by above
"""
if not quoted_string[0] == '"' or quoted_string[0] == "'":
return quoted_string
assert quoted_string[0] == quoted_string[-1]
return_list = []
i = 1 # skip initial char
while i < len(quoted_string) - 1:
char = quoted_string[i]
if char == "\\":
# quoted section
assert quoted_string[i + 1] == "x"
return_list.append(chr(int(quoted_string[i + 2:i + 4], 16)))
i += 4
else:
return_list.append(char)
i += 1
return "".join(return_list)
| """
Write string version of manifest to given path
"""
assert not path.exists()
fout = path.open("wb")
fout.write(self.to_string())
assert not fout.close()
path.setdata() | identifier_body |
manifest.py | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Create and edit manifest for session contents"""
from future_builtins import filter
import re
from duplicity import globals
from duplicity import log
from duplicity import globals
from duplicity import util
class ManifestError(Exception):
"""
Exception raised when problem with manifest
"""
pass
class Manifest:
"""
List of volumes and information about each one
"""
def __init__(self, fh=None):
"""
Create blank Manifest
@param fh: fileobj for manifest
@type fh: DupPath
@rtype: Manifest
@return: manifest
"""
self.hostname = None
self.local_dirname = None
self.volume_info_dict = {} # dictionary vol numbers -> vol infos
self.fh = fh
self.files_changed = []
def set_dirinfo(self):
"""
Set information about directory from globals,
and write to manifest file.
@rtype: Manifest
@return: manifest
"""
self.hostname = globals.hostname
self.local_dirname = globals.local_path.name # @UndefinedVariable
if self.fh:
if self.hostname:
self.fh.write("Hostname %s\n" % self.hostname)
if self.local_dirname:
self.fh.write("Localdir %s\n" % Quote(self.local_dirname))
return self
def check_dirinfo(self):
"""
Return None if dirinfo is the same, otherwise error message
Does not raise an error message if hostname or local_dirname
are not available.
@rtype: string
@return: None or error message
"""
if globals.allow_source_mismatch:
return
if self.hostname and self.hostname != globals.hostname:
errmsg = _("Fatal Error: Backup source host has changed.\n"
"Current hostname: %s\n"
"Previous hostname: %s") % (globals.hostname, self.hostname)
code = log.ErrorCode.hostname_mismatch
code_extra = "%s %s" % (util.escape(globals.hostname), util.escape(self.hostname))
elif (self.local_dirname and self.local_dirname != globals.local_path.name): # @UndefinedVariable
errmsg = _("Fatal Error: Backup source directory has changed.\n"
"Current directory: %s\n"
"Previous directory: %s") % (globals.local_path.name, self.local_dirname) # @UndefinedVariable
code = log.ErrorCode.source_dir_mismatch
code_extra = "%s %s" % (util.escape(globals.local_path.name),
util.escape(self.local_dirname)) # @UndefinedVariable
else:
return
log.FatalError(errmsg + "\n\n" +
_("Aborting because you may have accidentally tried to "
"backup two different data sets to the same remote "
"location, or using the same archive directory. If "
"this is not a mistake, use the "
"--allow-source-mismatch switch to avoid seeing this "
"message"), code, code_extra)
def set_files_changed_info(self, files_changed):
if files_changed:
self.files_changed = files_changed
if self.fh:
self.fh.write("Filelist %d\n" % len(self.files_changed))
for fileinfo in self.files_changed:
self.fh.write(" %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0])))
def add_volume_info(self, vi):
"""
Add volume info vi to manifest and write to manifest
@param vi: volume info to add
@type vi: VolumeInfo
@return: void
"""
vol_num = vi.volume_number
self.volume_info_dict[vol_num] = vi
if self.fh:
self.fh.write(vi.to_string() + "\n")
def del_volume_info(self, vol_num):
"""
Remove volume vol_num from the manifest
@param vol_num: volume number to delete
@type vi: int
@return: void
"""
try:
del self.volume_info_dict[vol_num]
except Exception:
raise ManifestError("Volume %d not present in manifest" % (vol_num,))
def | (self):
"""
Return string version of self (just concatenate vi strings)
@rtype: string
@return: self in string form
"""
result = ""
if self.hostname:
result += "Hostname %s\n" % self.hostname
if self.local_dirname:
result += "Localdir %s\n" % Quote(self.local_dirname)
result += "Filelist %d\n" % len(self.files_changed)
for fileinfo in self.files_changed:
result += " %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0]))
vol_num_list = self.volume_info_dict.keys()
vol_num_list.sort()
def vol_num_to_string(vol_num):
return self.volume_info_dict[vol_num].to_string()
result = "%s%s\n" % (result,
"\n".join(map(vol_num_to_string, vol_num_list)))
return result
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s, return self
"""
def get_field(fieldname):
"""
Return the value of a field by parsing s, or None if no field
"""
m = re.search("(^|\\n)%s\\s(.*?)\n" % fieldname, s, re.I)
if not m:
return None
else:
return Unquote(m.group(2))
self.hostname = get_field("hostname")
self.local_dirname = get_field("localdir")
highest_vol = 0
latest_vol = 0
vi_regexp = re.compile("(?:^|\\n)(volume\\s.*(?:\\n.*)*?)(?=\\nvolume\\s|$)", re.I)
vi_iterator = vi_regexp.finditer(s)
for match in vi_iterator:
vi = VolumeInfo().from_string(match.group(1))
self.add_volume_info(vi)
latest_vol = vi.volume_number
highest_vol = max(highest_vol, latest_vol)
log.Debug(_("Found manifest volume %s") % latest_vol)
# If we restarted after losing some remote volumes, the highest volume
# seen may be higher than the last volume recorded. That is, the
# manifest could contain "vol1, vol2, vol3, vol2." If so, we don't
# want to keep vol3's info.
for i in range(latest_vol + 1, highest_vol + 1):
self.del_volume_info(i)
log.Info(_("Found %s volumes in manifest") % latest_vol)
# Get file changed list - not needed if --file-changed not present
filecount = 0
if globals.file_changed is not None:
filelist_regexp = re.compile("(^|\\n)filelist\\s([0-9]+)\\n(.*?)(\\nvolume\\s|$)", re.I | re.S)
match = filelist_regexp.search(s)
if match:
filecount = int(match.group(2))
if filecount > 0:
def parse_fileinfo(line):
fileinfo = line.strip().split()
return (fileinfo[0], ''.join(fileinfo[1:]))
self.files_changed = list(map(parse_fileinfo, match.group(3).split('\n')))
if filecount != len(self.files_changed):
log.Error(_("Manifest file '%s' is corrupt: File count says %d, File list contains %d" %
(self.fh.base if self.fh else "", filecount, len(self.files_changed))))
self.corrupt_filelist = True
return self
def get_files_changed(self):
return self.files_changed
def __eq__(self, other):
"""
Two manifests are equal if they contain the same volume infos
"""
vi_list1 = self.volume_info_dict.keys()
vi_list1.sort()
vi_list2 = other.volume_info_dict.keys()
vi_list2.sort()
if vi_list1 != vi_list2:
log.Notice(_("Manifests not equal because different volume numbers"))
return False
for i in range(len(vi_list1)):
if not vi_list1[i] == vi_list2[i]:
log.Notice(_("Manifests not equal because volume lists differ"))
return False
if (self.hostname != other.hostname or
self.local_dirname != other.local_dirname):
log.Notice(_("Manifests not equal because hosts or directories differ"))
return False
return True
def __ne__(self, other):
"""
Defines !=. Not doing this always leads to annoying bugs...
"""
return not self.__eq__(other)
def write_to_path(self, path):
"""
Write string version of manifest to given path
"""
assert not path.exists()
fout = path.open("wb")
fout.write(self.to_string())
assert not fout.close()
path.setdata()
def get_containing_volumes(self, index_prefix):
"""
Return list of volume numbers that may contain index_prefix
"""
return filter(lambda vol_num:
self.volume_info_dict[vol_num].contains(index_prefix),
self.volume_info_dict.keys())
class VolumeInfoError(Exception):
"""
Raised when there is a problem initializing a VolumeInfo from string
"""
pass
class VolumeInfo:
"""
Information about a single volume
"""
def __init__(self):
"""VolumeInfo initializer"""
self.volume_number = None
self.start_index = None
self.start_block = None
self.end_index = None
self.end_block = None
self.hashes = {}
def set_info(self, vol_number,
start_index, start_block,
end_index, end_block):
"""
Set essential VolumeInfo information, return self
Call with starting and ending paths stored in the volume. If
a multivol diff gets split between volumes, count it as being
part of both volumes.
"""
self.volume_number = vol_number
self.start_index = start_index
self.start_block = start_block
self.end_index = end_index
self.end_block = end_block
return self
def set_hash(self, hash_name, data):
"""
Set the value of hash hash_name (e.g. "MD5") to data
"""
self.hashes[hash_name] = data
def get_best_hash(self):
"""
Return pair (hash_type, hash_data)
SHA1 is the best hash, and MD5 is the second best hash. None
is returned if no hash is available.
"""
if not self.hashes:
return None
try:
return ("SHA1", self.hashes['SHA1'])
except KeyError:
pass
try:
return ("MD5", self.hashes['MD5'])
except KeyError:
pass
return self.hashes.items()[0]
def to_string(self):
"""
Return nicely formatted string reporting all information
"""
def index_to_string(index):
"""Return printable version of index without any whitespace"""
if index:
s = "/".join(index)
return Quote(s)
else:
return "."
slist = ["Volume %d:" % self.volume_number]
whitespace = " "
slist.append("%sStartingPath %s %s" %
(whitespace, index_to_string(self.start_index), (self.start_block or " ")))
slist.append("%sEndingPath %s %s" %
(whitespace, index_to_string(self.end_index), (self.end_block or " ")))
for key in self.hashes:
slist.append("%sHash %s %s" %
(whitespace, key, self.hashes[key]))
return "\n".join(slist)
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s as created by to_string
"""
def string_to_index(s):
"""
Return tuple index from string
"""
s = Unquote(s)
if s == ".":
return ()
return tuple(s.split("/"))
linelist = s.strip().split("\n")
# Set volume number
m = re.search("^Volume ([0-9]+):", linelist[0], re.I)
if not m:
raise VolumeInfoError("Bad first line '%s'" % (linelist[0],))
self.volume_number = int(m.group(1))
# Set other fields
for line in linelist[1:]:
if not line:
continue
line_split = line.strip().split()
field_name = line_split[0].lower()
other_fields = line_split[1:]
if field_name == "Volume":
log.Warn(_("Warning, found extra Volume identifier"))
break
elif field_name == "startingpath":
self.start_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.start_block = int(other_fields[1])
else:
self.start_block = None
elif field_name == "endingpath":
self.end_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.end_block = int(other_fields[1])
else:
self.end_block = None
elif field_name == "hash":
self.set_hash(other_fields[0], other_fields[1])
if self.start_index is None or self.end_index is None:
raise VolumeInfoError("Start or end index not set")
return self
def __eq__(self, other):
"""
Used in test suite
"""
if not isinstance(other, VolumeInfo):
log.Notice(_("Other is not VolumeInfo"))
return None
if self.volume_number != other.volume_number:
log.Notice(_("Volume numbers don't match"))
return None
if self.start_index != other.start_index:
log.Notice(_("start_indicies don't match"))
return None
if self.end_index != other.end_index:
log.Notice(_("end_index don't match"))
return None
hash_list1 = self.hashes.items()
hash_list1.sort()
hash_list2 = other.hashes.items()
hash_list2.sort()
if hash_list1 != hash_list2:
log.Notice(_("Hashes don't match"))
return None
return 1
def __ne__(self, other):
"""
Defines !=
"""
return not self.__eq__(other)
def contains(self, index_prefix, recursive=1):
"""
Return true if volume might contain index
If recursive is true, then return true if any index starting
with index_prefix could be contained. Otherwise, just check
if index_prefix itself is between starting and ending
indicies.
"""
if recursive:
return (self.start_index[:len(index_prefix)] <=
index_prefix <= self.end_index)
else:
return self.start_index <= index_prefix <= self.end_index
nonnormal_char_re = re.compile("(\\s|[\\\\\"'])")
def Quote(s):
"""
Return quoted version of s safe to put in a manifest or volume info
"""
if not nonnormal_char_re.search(s):
return s # no quoting necessary
slist = []
for char in s:
if nonnormal_char_re.search(char):
slist.append("\\x%02x" % ord(char))
else:
slist.append(char)
return '"%s"' % "".join(slist)
def Unquote(quoted_string):
"""
Return original string from quoted_string produced by above
"""
if not quoted_string[0] == '"' or quoted_string[0] == "'":
return quoted_string
assert quoted_string[0] == quoted_string[-1]
return_list = []
i = 1 # skip initial char
while i < len(quoted_string) - 1:
char = quoted_string[i]
if char == "\\":
# quoted section
assert quoted_string[i + 1] == "x"
return_list.append(chr(int(quoted_string[i + 2:i + 4], 16)))
i += 4
else:
return_list.append(char)
i += 1
return "".join(return_list)
| to_string | identifier_name |
manifest.py | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Create and edit manifest for session contents"""
from future_builtins import filter
import re
from duplicity import globals
from duplicity import log
from duplicity import globals
from duplicity import util
class ManifestError(Exception):
"""
Exception raised when problem with manifest
"""
pass
class Manifest:
"""
List of volumes and information about each one
"""
def __init__(self, fh=None):
"""
Create blank Manifest
@param fh: fileobj for manifest
@type fh: DupPath
@rtype: Manifest
@return: manifest
"""
self.hostname = None
self.local_dirname = None
self.volume_info_dict = {} # dictionary vol numbers -> vol infos
self.fh = fh
self.files_changed = []
def set_dirinfo(self):
"""
Set information about directory from globals,
and write to manifest file.
@rtype: Manifest
@return: manifest
"""
self.hostname = globals.hostname
self.local_dirname = globals.local_path.name # @UndefinedVariable
if self.fh:
if self.hostname:
self.fh.write("Hostname %s\n" % self.hostname)
if self.local_dirname:
self.fh.write("Localdir %s\n" % Quote(self.local_dirname))
return self
def check_dirinfo(self):
"""
Return None if dirinfo is the same, otherwise error message
Does not raise an error message if hostname or local_dirname
are not available.
@rtype: string
@return: None or error message
"""
if globals.allow_source_mismatch:
return
if self.hostname and self.hostname != globals.hostname:
errmsg = _("Fatal Error: Backup source host has changed.\n"
"Current hostname: %s\n"
"Previous hostname: %s") % (globals.hostname, self.hostname)
code = log.ErrorCode.hostname_mismatch
code_extra = "%s %s" % (util.escape(globals.hostname), util.escape(self.hostname))
elif (self.local_dirname and self.local_dirname != globals.local_path.name): # @UndefinedVariable
errmsg = _("Fatal Error: Backup source directory has changed.\n"
"Current directory: %s\n"
"Previous directory: %s") % (globals.local_path.name, self.local_dirname) # @UndefinedVariable
code = log.ErrorCode.source_dir_mismatch
code_extra = "%s %s" % (util.escape(globals.local_path.name),
util.escape(self.local_dirname)) # @UndefinedVariable
else:
return
log.FatalError(errmsg + "\n\n" +
_("Aborting because you may have accidentally tried to "
"backup two different data sets to the same remote "
"location, or using the same archive directory. If "
"this is not a mistake, use the "
"--allow-source-mismatch switch to avoid seeing this "
"message"), code, code_extra)
def set_files_changed_info(self, files_changed):
if files_changed:
self.files_changed = files_changed
if self.fh:
self.fh.write("Filelist %d\n" % len(self.files_changed))
for fileinfo in self.files_changed:
self.fh.write(" %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0])))
def add_volume_info(self, vi):
"""
Add volume info vi to manifest and write to manifest
@param vi: volume info to add
@type vi: VolumeInfo
@return: void
"""
vol_num = vi.volume_number
self.volume_info_dict[vol_num] = vi
if self.fh:
self.fh.write(vi.to_string() + "\n")
def del_volume_info(self, vol_num):
"""
Remove volume vol_num from the manifest
@param vol_num: volume number to delete
@type vi: int
@return: void
"""
try:
del self.volume_info_dict[vol_num]
except Exception:
raise ManifestError("Volume %d not present in manifest" % (vol_num,))
def to_string(self):
"""
Return string version of self (just concatenate vi strings)
@rtype: string
@return: self in string form
"""
result = ""
if self.hostname:
result += "Hostname %s\n" % self.hostname
if self.local_dirname:
result += "Localdir %s\n" % Quote(self.local_dirname)
result += "Filelist %d\n" % len(self.files_changed)
for fileinfo in self.files_changed:
result += " %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0]))
vol_num_list = self.volume_info_dict.keys()
vol_num_list.sort()
def vol_num_to_string(vol_num):
return self.volume_info_dict[vol_num].to_string()
result = "%s%s\n" % (result,
"\n".join(map(vol_num_to_string, vol_num_list)))
return result
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s, return self
"""
def get_field(fieldname):
"""
Return the value of a field by parsing s, or None if no field
"""
m = re.search("(^|\\n)%s\\s(.*?)\n" % fieldname, s, re.I)
if not m:
return None
else:
return Unquote(m.group(2))
self.hostname = get_field("hostname")
self.local_dirname = get_field("localdir")
highest_vol = 0
latest_vol = 0
vi_regexp = re.compile("(?:^|\\n)(volume\\s.*(?:\\n.*)*?)(?=\\nvolume\\s|$)", re.I)
vi_iterator = vi_regexp.finditer(s)
for match in vi_iterator:
vi = VolumeInfo().from_string(match.group(1))
self.add_volume_info(vi)
latest_vol = vi.volume_number
highest_vol = max(highest_vol, latest_vol)
log.Debug(_("Found manifest volume %s") % latest_vol)
# If we restarted after losing some remote volumes, the highest volume
# seen may be higher than the last volume recorded. That is, the
# manifest could contain "vol1, vol2, vol3, vol2." If so, we don't
# want to keep vol3's info.
for i in range(latest_vol + 1, highest_vol + 1):
self.del_volume_info(i)
log.Info(_("Found %s volumes in manifest") % latest_vol)
# Get file changed list - not needed if --file-changed not present
filecount = 0
if globals.file_changed is not None:
filelist_regexp = re.compile("(^|\\n)filelist\\s([0-9]+)\\n(.*?)(\\nvolume\\s|$)", re.I | re.S)
match = filelist_regexp.search(s)
if match:
filecount = int(match.group(2))
if filecount > 0:
def parse_fileinfo(line):
fileinfo = line.strip().split()
return (fileinfo[0], ''.join(fileinfo[1:]))
self.files_changed = list(map(parse_fileinfo, match.group(3).split('\n')))
if filecount != len(self.files_changed):
log.Error(_("Manifest file '%s' is corrupt: File count says %d, File list contains %d" %
(self.fh.base if self.fh else "", filecount, len(self.files_changed))))
self.corrupt_filelist = True
return self
def get_files_changed(self):
return self.files_changed
def __eq__(self, other):
"""
Two manifests are equal if they contain the same volume infos
"""
vi_list1 = self.volume_info_dict.keys()
vi_list1.sort()
vi_list2 = other.volume_info_dict.keys()
vi_list2.sort()
if vi_list1 != vi_list2:
log.Notice(_("Manifests not equal because different volume numbers"))
return False
for i in range(len(vi_list1)):
if not vi_list1[i] == vi_list2[i]:
|
if (self.hostname != other.hostname or
self.local_dirname != other.local_dirname):
log.Notice(_("Manifests not equal because hosts or directories differ"))
return False
return True
def __ne__(self, other):
"""
Defines !=. Not doing this always leads to annoying bugs...
"""
return not self.__eq__(other)
def write_to_path(self, path):
"""
Write string version of manifest to given path
"""
assert not path.exists()
fout = path.open("wb")
fout.write(self.to_string())
assert not fout.close()
path.setdata()
def get_containing_volumes(self, index_prefix):
"""
Return list of volume numbers that may contain index_prefix
"""
return filter(lambda vol_num:
self.volume_info_dict[vol_num].contains(index_prefix),
self.volume_info_dict.keys())
class VolumeInfoError(Exception):
"""
Raised when there is a problem initializing a VolumeInfo from string
"""
pass
class VolumeInfo:
"""
Information about a single volume
"""
def __init__(self):
"""VolumeInfo initializer"""
self.volume_number = None
self.start_index = None
self.start_block = None
self.end_index = None
self.end_block = None
self.hashes = {}
def set_info(self, vol_number,
start_index, start_block,
end_index, end_block):
"""
Set essential VolumeInfo information, return self
Call with starting and ending paths stored in the volume. If
a multivol diff gets split between volumes, count it as being
part of both volumes.
"""
self.volume_number = vol_number
self.start_index = start_index
self.start_block = start_block
self.end_index = end_index
self.end_block = end_block
return self
def set_hash(self, hash_name, data):
"""
Set the value of hash hash_name (e.g. "MD5") to data
"""
self.hashes[hash_name] = data
def get_best_hash(self):
"""
Return pair (hash_type, hash_data)
SHA1 is the best hash, and MD5 is the second best hash. None
is returned if no hash is available.
"""
if not self.hashes:
return None
try:
return ("SHA1", self.hashes['SHA1'])
except KeyError:
pass
try:
return ("MD5", self.hashes['MD5'])
except KeyError:
pass
return self.hashes.items()[0]
def to_string(self):
"""
Return nicely formatted string reporting all information
"""
def index_to_string(index):
"""Return printable version of index without any whitespace"""
if index:
s = "/".join(index)
return Quote(s)
else:
return "."
slist = ["Volume %d:" % self.volume_number]
whitespace = " "
slist.append("%sStartingPath %s %s" %
(whitespace, index_to_string(self.start_index), (self.start_block or " ")))
slist.append("%sEndingPath %s %s" %
(whitespace, index_to_string(self.end_index), (self.end_block or " ")))
for key in self.hashes:
slist.append("%sHash %s %s" %
(whitespace, key, self.hashes[key]))
return "\n".join(slist)
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s as created by to_string
"""
def string_to_index(s):
"""
Return tuple index from string
"""
s = Unquote(s)
if s == ".":
return ()
return tuple(s.split("/"))
linelist = s.strip().split("\n")
# Set volume number
m = re.search("^Volume ([0-9]+):", linelist[0], re.I)
if not m:
raise VolumeInfoError("Bad first line '%s'" % (linelist[0],))
self.volume_number = int(m.group(1))
# Set other fields
for line in linelist[1:]:
if not line:
continue
line_split = line.strip().split()
field_name = line_split[0].lower()
other_fields = line_split[1:]
if field_name == "Volume":
log.Warn(_("Warning, found extra Volume identifier"))
break
elif field_name == "startingpath":
self.start_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.start_block = int(other_fields[1])
else:
self.start_block = None
elif field_name == "endingpath":
self.end_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.end_block = int(other_fields[1])
else:
self.end_block = None
elif field_name == "hash":
self.set_hash(other_fields[0], other_fields[1])
if self.start_index is None or self.end_index is None:
raise VolumeInfoError("Start or end index not set")
return self
def __eq__(self, other):
"""
Used in test suite
"""
if not isinstance(other, VolumeInfo):
log.Notice(_("Other is not VolumeInfo"))
return None
if self.volume_number != other.volume_number:
log.Notice(_("Volume numbers don't match"))
return None
if self.start_index != other.start_index:
log.Notice(_("start_indicies don't match"))
return None
if self.end_index != other.end_index:
log.Notice(_("end_index don't match"))
return None
hash_list1 = self.hashes.items()
hash_list1.sort()
hash_list2 = other.hashes.items()
hash_list2.sort()
if hash_list1 != hash_list2:
log.Notice(_("Hashes don't match"))
return None
return 1
def __ne__(self, other):
"""
Defines !=
"""
return not self.__eq__(other)
def contains(self, index_prefix, recursive=1):
"""
Return true if volume might contain index
If recursive is true, then return true if any index starting
with index_prefix could be contained. Otherwise, just check
if index_prefix itself is between starting and ending
indicies.
"""
if recursive:
return (self.start_index[:len(index_prefix)] <=
index_prefix <= self.end_index)
else:
return self.start_index <= index_prefix <= self.end_index
nonnormal_char_re = re.compile("(\\s|[\\\\\"'])")
def Quote(s):
"""
Return quoted version of s safe to put in a manifest or volume info
"""
if not nonnormal_char_re.search(s):
return s # no quoting necessary
slist = []
for char in s:
if nonnormal_char_re.search(char):
slist.append("\\x%02x" % ord(char))
else:
slist.append(char)
return '"%s"' % "".join(slist)
def Unquote(quoted_string):
"""
Return original string from quoted_string produced by above
"""
if not quoted_string[0] == '"' or quoted_string[0] == "'":
return quoted_string
assert quoted_string[0] == quoted_string[-1]
return_list = []
i = 1 # skip initial char
while i < len(quoted_string) - 1:
char = quoted_string[i]
if char == "\\":
# quoted section
assert quoted_string[i + 1] == "x"
return_list.append(chr(int(quoted_string[i + 2:i + 4], 16)))
i += 4
else:
return_list.append(char)
i += 1
return "".join(return_list)
| log.Notice(_("Manifests not equal because volume lists differ"))
return False | conditional_block |
ship.py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:37:25 2018
@author: hoog
"""
from functions import *
import os
# Ship constants
sensor_colours = [(100,100,240),(240,100,100)] # NO WALL, WALL
class ship:
"""Class for holding an indivudual racer and all the variables it needs. """
############### INITIALIZATION #########################
# Functions that are run once at the start of each generation
########################################################
def __init__(self, startpos = (75,75), angle = 0, colour = (240,100,100),
maxSpeed = 20, maxAccel = 1, maxAngle = 0.1,
width = 1600, height = 900, maze = None,
intermediates = (8,), inputdistance = [50,100,150], inputangle = [1.2,0.6,0,-0.6,-1.2],
parentname = "", parentcolour = (240,100,100), name = None,orders = [1,2,3,4,5,6,7,8]):
""" Creates the ship with randomly assigned weights """
self.startpos, self.startangle, self.colour = startpos, angle, colour
self.maxSpeed, self.maxAccel, self.maxAngle = maxSpeed, maxAccel, maxAngle
self.maze = maze
self.width, self.height = width, height
self.parentname, self.parentcolour = parentname, parentcolour
# Create dimensions array based on input, intermediate dimensions and output (4)
self.inputType = 1 # 0: point, 1: linear
self.setDimension(inputdistance,inputangle,intermediates,orders)
self.drag = 0.99
self.initWeights()
self.sightLength = 200
if name is not None:
self.name = name
else:
self.name = self.getName()
self.reset()
def setDimension(self,inputdistance,inputangle,intermediates, orders):
""" Sets parameters needed for decision making """
if self.inputType == 0: # Matrix of angles and distances
self.dimensions = [len(inputdistance)*len(inputangle)]
elif self.inputType == 1: # Only angles, each one getting one input
self.dimensions = [len(inputangle)*len(orders)*2] # times 2 for having the orders work in reverse test
inputdistance = 1
self.dimensions.extend(intermediates)
self.dimensions.append(4)
self.inputdistance, self.inputangle, self.intermediates, self.orders = inputdistance, inputangle, intermediates, orders
def reset(self):
""" Returns the ship to its starting location and reinitializes """
self.resetPos()
self.vx, self.vy = 0, 0
self.accel, self.dangle = 0, 0
self.crashed = False
self.timeDriving, self.score, self.checkpoint, self.laps = 0, 0, 0, 0
self.targetCheckpointPos = self.maze.checkpoints[0].getMidInt()
self.inputColour = [sensor_colours[0] for i in range(self.dimensions[0])]
self.scan = np.array([0 for i in range(self.dimensions[0])])
self.cost = [0 for i in range(6)]
#Extrapos for CTS LOS
self.extrapos = []
def resetPos(self):
""" Go back to start location """
self.angle = self.startangle
self.pos = []
self.pos.extend(self.startpos)
def newSpawn(self, colour = (100,100,240)):
self.initWeights()
self.name = self.getName()
self.parentname = ""
self.parentcolour = colour
def initWeights(self):
""" Initializes weights to randomly selected ones."""
self.weights = []
self.bias = []
for i, dim in enumerate(self.dimensions[1:]):
self.weights.append(np.random.uniform(-1,1,(self.dimensions[i],dim)))
self.bias.append(np.random.uniform(-1,1,dim))
def copyWeights(self, shp, stray = 0, colour = (240,100,100)):
""" Changes weights to be around the ones provided by shp.
This is used to generate offspring from the shp provided.
"""
self.weights = []
self.bias = []
if(stray == 0): # straight copy
for i, wt in enumerate(shp.weights):
self.weights.append(wt.copy())
for i,bs in enumerate(shp.bias):
self.bias.append(bs.copy())
else: # Copy with some random added in
for i, wt in enumerate(shp.weights):
self.weights.append(np.add(wt.copy(), np.random.normal(0,stray,(shp.dimensions[i],shp.dimensions[i+1]))))
for i,bs in enumerate(shp.bias):
self.bias.append(np.add(bs.copy(), np.random.normal(0,stray,shp.dimensions[i+1])))
self.normalizeWeights()
self.colour = colour
self.parentname = shp.name
self.parentcolour = shp.colour
self.setDimension(shp.inputdistance,shp.inputangle,shp.intermediates,shp.orders)
def saveWeights(self, basename, generation):
""" Saves the np array of weights for easy loading later"""
for i,wt in enumerate(self.weights):
np.save("./data/"+basename+"/"+basename + "_W"+str(i)+"_G" + str(generation),wt)
for i,bs in enumerate(self.bias):
np.save("./data/"+basename+"/"+basename + "_B"+str(i)+"_G" + str(generation),bs)
def loadWeights(self,basename,generation,colour = None):
temp = "./data/"+basename+"/"+basename
self.weights = []
done = False
i = 0
while(not done):
wn = temp + "_W"+str(i)+"_G" + str(generation)+".npy"
if(os.path.isfile(wn)):
self.weights.append(np.load(wn))
else: done = True
i += 1
self.bias = []
done = False
i = 0
while(not done):
bn = temp + "_B"+str(i)+"_G" + str(generation)+".npy"
if(os.path.isfile(bn)):
self.bias.append(np.load(bn))
else: done = True
i += 1
if(colour is not None):
self.colour = colour
def normalizeWeights(self):
""" Make sure the weights and biases stay inside (-1,1) """
for wt in self.weights:
wt[wt>1] = 1
wt[wt<-1] = -1
for bs in self.bias:
bs[bs>1] = 1
bs[bs<-1] = -1
def copyWeightsExper(self, shp, stray = 0, colour = (240,100,100)):
""" version of copyWeights() that only take 1 element of each weight matrix
and changes it absolutely to a new value, regardless of the input value.
"""
self.copyWeights(shp, stray = stray, colour = colour)
for wt in self.weights:
i = np.random.randint(wt.shape[0])
j = np.random.randint(wt.shape[1])
wt[i,j] = np.random.uniform(-1,1,1)
for bs in self.bias:
i = np.random.randint(bs.shape[0])
bs[i] = np.random.uniform(-1,1,1)
############### UPDATE #################################
# Functions that may be used at each timestep of the race
########################################################
def moveShip(self,screen,maze):
""" Based on the ship's brain and inputs, get a decision for this
timestep and apply it to the acceleration, braking and turning
"""
self.checkCheckpoint()
angle = 0
accel = 0
controlInputs = self.getDecision()
angle -= logis(controlInputs[0]) * self.maxAngle
angle += logis(controlInputs[1]) * self.maxAngle
accel += logis(controlInputs[2]) * self.maxAccel
brake = logis(controlInputs[3])
self.updateSpeed(accel,angle,brake)
self.updatePos()
self.getInputs(maze)
def checkCheckpoint(self):
"""Determines if we have passed a checkpoint this timestep"""
if self.maze.checkpoints[self.checkpoint].checkCollision(self.pos):
self.checkpoint +=1
if(self.checkpoint >= self.maze.checkpointsPerLap):
if(self.maze.mazeType == "circular"):
self.checkpoint = 0
self.laps +=1
elif(self.maze.mazeType == "linear"):
self.checkpoint = 0
self.laps +=1
self.resetPos()
self.targetCheckpointPos = self.maze.checkpoints[self.checkpoint].getMidInt()
def checkFuel(self):
""" Returns the score received based on checkpoint progress minus the time driving.
If this is below 0 the sihp is said to be out of fuel and crashes
"""
return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving
def updateSpeed(self,accel,dangle,brake):
""" Get new vx and vy to update position"""
self.dangle += dangle
self.dangle = self.dangle * self.drag*(1-brake/3)*0.6
self.angle += self.dangle
self.accel = accel
self.vx += accel * np.cos(self.angle)
self.vy += accel * np.sin(self.angle)
# flat cap on speed
if(self.vx > self.maxSpeed): self.vx = self.maxSpeed
if(self.vy > self.maxSpeed): self.vy = self.maxSpeed
if(self.vx < -1*self.maxSpeed): self.vx = -1*self.maxSpeed
if(self.vy < -1*self.maxSpeed): self.vy = -1*self.maxSpeed
# apply drag and braking to slow down
self.vx = self.vx * self.drag*(1-brake/3)
self.vy = self.vy * self.drag*(1-brake/3)
def updatePos(self):
""" Update where the ship is each timestep based on calculated velocity."""
self.timeDriving +=1
self.pos[0] += self.vx
self.pos[1] += self.vy
def getInputs(self,maze):
""" Determine which of the input locations are in walls / out of bounds
for the input vector
"""
self.inputPos = []
#Extrapos for CTS LOS
self.extrapos = []
i,j=0,0
# array of front views
for ang in self.inputangle:
blocked = False
if self.inputType == 0:
for dis in self.inputdistance:
self.inputPos.append([int(self.pos[0] + dis*np.cos(self.angle+ang)),
int(self.pos[1] + dis*np.sin(self.angle+ang))])
if(maze.checkCollisions(self.inputPos[i]) or blocked):
blocked = True
self.inputColour[i] = sensor_colours[1]
self.scan[i] = 0
else:
self.inputColour[i] = sensor_colours[0]
self.scan[i] = 1
i +=1
elif self.inputType == 1:
#eXPERIMENTAL STUFF FOR continuous LOS
self.extrapos.append(maze.getMaximumSightDistance(self.pos, self.angle+ang, self.sightLength))
temp_length = self.extrapos[i][2]
for ord in self.orders:
self.scan[j] = (temp_length/self.sightLength)**ord
self.scan[j+1] = ((self.sightLength-temp_length)/self.sightLength)**ord # add opposite way, gets stronger closer instead of stronger further. might make a difference.
j +=2
i+=1
def getDecision(self):
""" Use the input vector and all the weights to decide how to control
the ship this timestep.
"""
temp = []
temp.append( np.array(self.scan) )
for i,wt in enumerate(self.weights):
#print(self.bias[i],temp[i].dot(wt))
temp.append(np.add(temp[i].dot(wt),self.bias[i]))
#print(str(self.bias) + " " + str(wt))
return temp[len(self.weights)].tolist() # np.add(np.add(np.add(self.scan.dot(self.weights[0]), self.bias[0]).dot(self.weights[1]),self.bias[1]).dot(self.weights[2]),self.bias[2]).T
def getScore(self):
""" determine the current score of the ship """
tempscore = 1000 - 0.01*self.timeDriving
tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)
tempscore += self.checkpoint *1000
tempscore += self.laps * 1000 * len(self.maze.checkpoints)
return tempscore
def crash(self):
""" Once the ship's run has expired it crashes. Here its score is
tallied and it is stopped until it is reset The cost increases as
weights tend away from 0, resulting in fewer extreme weights
"""
self.cost = 0
for wt in self.weights:
self.cost += np.abs(wt).sum()
for bs in self.bias:
self.cost += np.abs(bs).sum()
self.score -= 0.00001*self.cost
# Score improves with distance and time driving
self.score += self.getScore()
# Stop the ship from going further
self.crashed = True
self.vx = 0
self.vy = 0
self.accel = 0
self.dangle = 0
#print(self.getName() + " has crashed at: " + str(self.pos[0])+ " " + str(self.pos[1]))
def getIntPos(self):
"""Returns the current ship position as a tuple of integers """
return (int(self.pos[0]),int(self.pos[1]))
############### VISUAL #################################
# Functions related to creating various visual effects on screen
########################################################
def drawShip(self,screen,maze,frame,midpos = (450,800),zoom = 1,fancyShip = False, drawThrusters = True):
""" Draw triangular ship, get the input values and draw a red or blue
circle at their location
"""
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
# Draw Inputs
if not self.crashed:
if self.inputType == 0:
self.drawPointInputs(screen,maze,midpos=midpos)
elif self.inputType == 1:
self.drawVariableLOS(screen,frame,midpos=midpos)
# if(fancyShip): pygame.draw.polygon(screen, self.parentcolour,
# [[int(bp[0]+ 10 *np.cos(self.angle+3.14)),
# int(bp[1]+ 10 *np.sin(self.angle+3.14))],
# [int(bp[0]+ 10 *np.cos(self.angle+1)),
# int(bp[1]+ 10 *np.sin(self.angle+1))],
# [int(bp[0]),
# int(bp[1])],
# [int(bp[0]+ 10 *np.cos(self.angle-1)),
# int(bp[1]+ 10 *np.sin(self.angle-1))]])
# draw thrusters
if not self.crashed:
if(drawThrusters):
pygame.draw.polygon(screen, (140,140,40),
[[int(bp[0]+ self.accel*22 *np.cos(self.angle+3.14)),
int(bp[1]+ self.accel*22 *np.sin(self.angle+3.14))],
[int(bp[0]+ 7 *np.cos(self.angle + 2.64)),
int(bp[1]+ 7 *np.sin(self.angle + 2.64))],
[int(bp[0]+ 7 *np.cos(self.angle + 3.64)),
int(bp[1]+ 7 *np.sin(self.angle + 3.64))]])
pygame.draw.polygon(screen, (140,140,40),
[[int(bp[0]+ self.dangle*60 *np.cos(self.angle-1.57) + 7*np.cos(self.angle)),
int(bp[1]+ self.dangle*60 *np.sin(self.angle-1.57) + 7*np.sin(self.angle))],
[int(bp[0]+ 5 *np.cos(self.angle)),
int(bp[1]+ 5 *np.sin(self.angle))],
[int(bp[0]+ 9 *np.cos(self.angle)),
int(bp[1]+ 9 *np.sin(self.angle))]])
# draw ship
pygame.draw.polygon(screen, self.colour,
[[int(bp[0]+ 10 *np.cos(self.angle-0.15)),
int(bp[1]+ 10 *np.sin(self.angle-0.15))],
[int(bp[0]+ 10 *np.cos(self.angle+0.15)),
int(bp[1]+ 10 *np.sin(self.angle+0.15))],
[int(bp[0]+ 10 *np.cos(self.angle + 2.64)),
int(bp[1]+ 10 *np.sin(self.angle + 2.64))],
[int(bp[0]+ 10 *np.cos(self.angle + 3.64)),
int(bp[1]+ 10 *np.sin(self.angle + 3.64))]])
# Draw the cockpit
pygame.draw.circle(screen, (140,160,240), bp, 5,2)
def drawMatrix(self,screen,pos):
""" Draw a bunch of squares that light up red of green based on
different points in the decision process
"""
bp = pos # base position bp
namesurface = myfont.render(self.parentname, False, self.parentcolour)
screen.blit(namesurface,(bp[0]-50,bp[1] -60),)
tempOffset = namesurface.get_width()
namesurface = myfont.render(self.name, False, self.colour)
screen.blit(namesurface,(bp[0]-40 ,bp[1]-30),)
size = 10
separationx = 12
separationy = 20
# Cycle through array of inputs
for i in range(self.dimensions[0]):
# Create red - green colour based on array
temp_colour = (int((1-self.scan[i])*240),int(self.scan[i]*240),0)
# Draw square that is slightly offset of previous square
pygame.draw.rect(screen,temp_colour ,(bp[0] - separationx *int(i / len(self.inputdistance)),
bp[1] - separationx*(i%len(self.inputdistance)) + 3*separationx,size,size))
# Calculate intermediate decision array
temp_vector = self.scan
# Repeat
for j, bs in enumerate(self.bias):
temp_vector = np.add(temp_vector.dot(self.weights[j]), bs)
for i in range(temp_vector.shape[0]):
temp_colour = (int(max(min((1-temp_vector[i])*240,240),0)),int(max(min(temp_vector[i]*240,240),0)),0)
pygame.draw.rect(screen,temp_colour ,(bp[0] + (j+1)*separationy,bp[1] + separationx*i,size,size))
def drawPointInputs(self,screen,maze,midpos = (450,800)):
|
def highlight(self,screen,midpos = (800,450)):
""" Draw some expanding circles around the ship """
posInt = self.getIntPos()
posInt = getOffsetPos(posInt,midpos)
pygame.draw.circle(screen, [max(0,tmp - (10 - self.timeDriving%10)*10) for tmp in self.colour],
posInt, int(10+ (self.timeDriving%10 )),2)
pygame.draw.circle(screen, self.colour, posInt, int(20+ (self.timeDriving%10 )),2)
pygame.draw.circle(screen, [max(0,tmp - (self.timeDriving%10)*10) for tmp in self.colour],
posInt, int(30+ (self.timeDriving%10 )),2)
def drawTargetCheckpoint(self,screen,maze,pos,midpos = (450,800)):
""" Draw an arrow pointing to the next checkpoint we must reach """
tarpos = getOffsetPos(self.targetCheckpointPos,midpos)
temp = (int(pos[0]+(tarpos[0]-pos[0])/10),
int(pos[1]+(tarpos[1]-pos[1])/10))
pygame.draw.circle(screen,(130,240,130),temp,2,2)
def drawVariableLOS(self,screen,frame,midpos = (450,800)):
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
for g in self.extrapos:
#print("extrapos: ",g)
if g[0]:
drawPulsatingCirlce(screen, getOffsetPos(g[1],midpos),frame,size = 12,cycle_length = 60, colour = (255,0,0),magnitude = (self.sightLength - g[2]) / self.sightLength*0.3+0.7 )
#pygame.draw.circle(screen,(230,40,30),getOffsetPos(g[0],midpos),8,1)
#pygame.draw.circle(screen,(250,0,0),getOffsetPos(g[0],midpos),4,1)
pygame.draw.line(screen,(100,100,100),bp,getOffsetPos(g[1],midpos),1)
#pygame.draw.circle(screen,(30,140,130),[int(bp[0]),int(bp[1])],5,5)
else:
# Get the max length and draw a lighter line to show where the sensors are
endpoint = []
pygame.draw.line(screen,(30,30,30),bp,getOffsetPos(g[1],midpos),1)
def getName(self):
""" Get 6 letter "name" based on weight and bias totals """
l = []
for wt in self.weights:
l.append(chr( int( 97 + (sum(map(sum,wt)) * 10) % 26 ) ))
for bs in self.bias:
#print("BS: "+str(bs[0]))
l.append(chr( int( 97 + (sum(bs) * 10) % 26 ) ))
l[0] = chr(ord(l[0]) - 32)
self.name = ''.join(l)
return self.name
def setName(self,newName):
""" Changes the weights and biases randomly in order to have the
getName() function return the name specified here """
for i, wt in enumerate(self.weights):
tempcoef = 0
tempoff = ord(newName[i]) - ord(self.getName()[i])
if(tempoff > 0):
tempcoef = 0.1
else:
tempcoef = -0.1
#print("Was: "+newName + " " + self.getName() + " " + str(tempoff))
tempoff = np.abs(tempoff)
for j in range(tempoff):
a = np.random.randint(wt.shape[0])
b = np.random.randint(wt.shape[1])
wt[a,b] += tempcoef
for v, bs in enumerate(self.bias):
tempcoef = 0
tempoff = ord(newName[v+len(self.weights)]) - ord(self.getName()[v+len(self.weights)])
if(tempoff > 0):
tempcoef = 0.1
else:
tempcoef = -0.1
#print("Now: "+ str(v) + " " +newName + " " + self.getName() + " " + str(tempoff))
tempoff = np.abs(tempoff)
for j in range(tempoff):
c = np.random.randint(bs.shape[0])
bs[c] += tempcoef
def nameShip(self,newName,colour = None):
""" Forces the ship to conform to the name and colour provided """
self.setName(newName)
if colour is not None:
self.colour = colour
| bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
# Draw where the inputs are for decision making.
if(self.crashed == False or True):
self.drawTargetCheckpoint(screen,maze,bp,midpos = midpos)
for i,pos in enumerate(self.inputPos):
pygame.draw.circle(screen, self.inputColour[i], getOffsetPos(pos,midpos), 2,0) | identifier_body |
ship.py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:37:25 2018
@author: hoog
"""
from functions import *
import os
# Ship constants
sensor_colours = [(100,100,240),(240,100,100)] # NO WALL, WALL
class ship:
"""Class for holding an indivudual racer and all the variables it needs. """
############### INITIALIZATION #########################
# Functions that are run once at the start of each generation
########################################################
def __init__(self, startpos = (75,75), angle = 0, colour = (240,100,100),
maxSpeed = 20, maxAccel = 1, maxAngle = 0.1,
width = 1600, height = 900, maze = None,
intermediates = (8,), inputdistance = [50,100,150], inputangle = [1.2,0.6,0,-0.6,-1.2],
parentname = "", parentcolour = (240,100,100), name = None,orders = [1,2,3,4,5,6,7,8]):
""" Creates the ship with randomly assigned weights """
self.startpos, self.startangle, self.colour = startpos, angle, colour
self.maxSpeed, self.maxAccel, self.maxAngle = maxSpeed, maxAccel, maxAngle
self.maze = maze
self.width, self.height = width, height
self.parentname, self.parentcolour = parentname, parentcolour
# Create dimensions array based on input, intermediate dimensions and output (4)
self.inputType = 1 # 0: point, 1: linear
self.setDimension(inputdistance,inputangle,intermediates,orders)
self.drag = 0.99
self.initWeights()
self.sightLength = 200
if name is not None:
self.name = name
else:
self.name = self.getName()
self.reset()
def setDimension(self,inputdistance,inputangle,intermediates, orders):
""" Sets parameters needed for decision making """
if self.inputType == 0: # Matrix of angles and distances
self.dimensions = [len(inputdistance)*len(inputangle)]
elif self.inputType == 1: # Only angles, each one getting one input
self.dimensions = [len(inputangle)*len(orders)*2] # times 2 for having the orders work in reverse test
inputdistance = 1
self.dimensions.extend(intermediates)
self.dimensions.append(4)
self.inputdistance, self.inputangle, self.intermediates, self.orders = inputdistance, inputangle, intermediates, orders
def reset(self):
""" Returns the ship to its starting location and reinitializes """
self.resetPos()
self.vx, self.vy = 0, 0
self.accel, self.dangle = 0, 0
self.crashed = False
self.timeDriving, self.score, self.checkpoint, self.laps = 0, 0, 0, 0
self.targetCheckpointPos = self.maze.checkpoints[0].getMidInt()
self.inputColour = [sensor_colours[0] for i in range(self.dimensions[0])]
self.scan = np.array([0 for i in range(self.dimensions[0])])
self.cost = [0 for i in range(6)]
#Extrapos for CTS LOS
self.extrapos = []
def resetPos(self):
""" Go back to start location """
self.angle = self.startangle
self.pos = []
self.pos.extend(self.startpos)
def newSpawn(self, colour = (100,100,240)):
self.initWeights()
self.name = self.getName()
self.parentname = ""
self.parentcolour = colour
def initWeights(self):
""" Initializes weights to randomly selected ones."""
self.weights = []
self.bias = []
for i, dim in enumerate(self.dimensions[1:]):
self.weights.append(np.random.uniform(-1,1,(self.dimensions[i],dim)))
self.bias.append(np.random.uniform(-1,1,dim))
def copyWeights(self, shp, stray = 0, colour = (240,100,100)):
""" Changes weights to be around the ones provided by shp.
This is used to generate offspring from the shp provided.
"""
self.weights = []
self.bias = []
if(stray == 0): # straight copy
for i, wt in enumerate(shp.weights):
self.weights.append(wt.copy())
for i,bs in enumerate(shp.bias):
self.bias.append(bs.copy())
else: # Copy with some random added in
for i, wt in enumerate(shp.weights):
self.weights.append(np.add(wt.copy(), np.random.normal(0,stray,(shp.dimensions[i],shp.dimensions[i+1]))))
for i,bs in enumerate(shp.bias):
self.bias.append(np.add(bs.copy(), np.random.normal(0,stray,shp.dimensions[i+1])))
self.normalizeWeights()
self.colour = colour
self.parentname = shp.name
self.parentcolour = shp.colour
self.setDimension(shp.inputdistance,shp.inputangle,shp.intermediates,shp.orders)
def saveWeights(self, basename, generation):
""" Saves the np array of weights for easy loading later"""
for i,wt in enumerate(self.weights):
np.save("./data/"+basename+"/"+basename + "_W"+str(i)+"_G" + str(generation),wt)
for i,bs in enumerate(self.bias):
np.save("./data/"+basename+"/"+basename + "_B"+str(i)+"_G" + str(generation),bs)
def loadWeights(self,basename,generation,colour = None):
temp = "./data/"+basename+"/"+basename
self.weights = []
done = False
i = 0
while(not done):
wn = temp + "_W"+str(i)+"_G" + str(generation)+".npy"
if(os.path.isfile(wn)):
self.weights.append(np.load(wn))
else: done = True
i += 1
self.bias = []
done = False
i = 0
while(not done):
bn = temp + "_B"+str(i)+"_G" + str(generation)+".npy"
if(os.path.isfile(bn)):
self.bias.append(np.load(bn))
else: done = True
i += 1
if(colour is not None):
self.colour = colour
def normalizeWeights(self):
""" Make sure the weights and biases stay inside (-1,1) """
for wt in self.weights:
wt[wt>1] = 1
wt[wt<-1] = -1
for bs in self.bias:
bs[bs>1] = 1
bs[bs<-1] = -1
def copyWeightsExper(self, shp, stray = 0, colour = (240,100,100)):
""" version of copyWeights() that only take 1 element of each weight matrix
and changes it absolutely to a new value, regardless of the input value.
"""
self.copyWeights(shp, stray = stray, colour = colour)
for wt in self.weights:
i = np.random.randint(wt.shape[0])
j = np.random.randint(wt.shape[1])
wt[i,j] = np.random.uniform(-1,1,1)
for bs in self.bias:
i = np.random.randint(bs.shape[0])
bs[i] = np.random.uniform(-1,1,1)
############### UPDATE #################################
# Functions that may be used at each timestep of the race
########################################################
def moveShip(self,screen,maze):
""" Based on the ship's brain and inputs, get a decision for this
timestep and apply it to the acceleration, braking and turning
"""
self.checkCheckpoint()
angle = 0
accel = 0
controlInputs = self.getDecision()
angle -= logis(controlInputs[0]) * self.maxAngle
angle += logis(controlInputs[1]) * self.maxAngle
accel += logis(controlInputs[2]) * self.maxAccel
brake = logis(controlInputs[3])
self.updateSpeed(accel,angle,brake)
self.updatePos()
self.getInputs(maze)
def | (self):
"""Determines if we have passed a checkpoint this timestep"""
if self.maze.checkpoints[self.checkpoint].checkCollision(self.pos):
self.checkpoint +=1
if(self.checkpoint >= self.maze.checkpointsPerLap):
if(self.maze.mazeType == "circular"):
self.checkpoint = 0
self.laps +=1
elif(self.maze.mazeType == "linear"):
self.checkpoint = 0
self.laps +=1
self.resetPos()
self.targetCheckpointPos = self.maze.checkpoints[self.checkpoint].getMidInt()
def checkFuel(self):
""" Returns the score received based on checkpoint progress minus the time driving.
If this is below 0 the sihp is said to be out of fuel and crashes
"""
return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving
def updateSpeed(self,accel,dangle,brake):
""" Get new vx and vy to update position"""
self.dangle += dangle
self.dangle = self.dangle * self.drag*(1-brake/3)*0.6
self.angle += self.dangle
self.accel = accel
self.vx += accel * np.cos(self.angle)
self.vy += accel * np.sin(self.angle)
# flat cap on speed
if(self.vx > self.maxSpeed): self.vx = self.maxSpeed
if(self.vy > self.maxSpeed): self.vy = self.maxSpeed
if(self.vx < -1*self.maxSpeed): self.vx = -1*self.maxSpeed
if(self.vy < -1*self.maxSpeed): self.vy = -1*self.maxSpeed
# apply drag and braking to slow down
self.vx = self.vx * self.drag*(1-brake/3)
self.vy = self.vy * self.drag*(1-brake/3)
def updatePos(self):
""" Update where the ship is each timestep based on calculated velocity."""
self.timeDriving +=1
self.pos[0] += self.vx
self.pos[1] += self.vy
def getInputs(self,maze):
""" Determine which of the input locations are in walls / out of bounds
for the input vector
"""
self.inputPos = []
#Extrapos for CTS LOS
self.extrapos = []
i,j=0,0
# array of front views
for ang in self.inputangle:
blocked = False
if self.inputType == 0:
for dis in self.inputdistance:
self.inputPos.append([int(self.pos[0] + dis*np.cos(self.angle+ang)),
int(self.pos[1] + dis*np.sin(self.angle+ang))])
if(maze.checkCollisions(self.inputPos[i]) or blocked):
blocked = True
self.inputColour[i] = sensor_colours[1]
self.scan[i] = 0
else:
self.inputColour[i] = sensor_colours[0]
self.scan[i] = 1
i +=1
elif self.inputType == 1:
#eXPERIMENTAL STUFF FOR continuous LOS
self.extrapos.append(maze.getMaximumSightDistance(self.pos, self.angle+ang, self.sightLength))
temp_length = self.extrapos[i][2]
for ord in self.orders:
self.scan[j] = (temp_length/self.sightLength)**ord
self.scan[j+1] = ((self.sightLength-temp_length)/self.sightLength)**ord # add opposite way, gets stronger closer instead of stronger further. might make a difference.
j +=2
i+=1
def getDecision(self):
""" Use the input vector and all the weights to decide how to control
the ship this timestep.
"""
temp = []
temp.append( np.array(self.scan) )
for i,wt in enumerate(self.weights):
#print(self.bias[i],temp[i].dot(wt))
temp.append(np.add(temp[i].dot(wt),self.bias[i]))
#print(str(self.bias) + " " + str(wt))
return temp[len(self.weights)].tolist() # np.add(np.add(np.add(self.scan.dot(self.weights[0]), self.bias[0]).dot(self.weights[1]),self.bias[1]).dot(self.weights[2]),self.bias[2]).T
def getScore(self):
""" determine the current score of the ship """
tempscore = 1000 - 0.01*self.timeDriving
tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)
tempscore += self.checkpoint *1000
tempscore += self.laps * 1000 * len(self.maze.checkpoints)
return tempscore
def crash(self):
""" Once the ship's run has expired it crashes. Here its score is
tallied and it is stopped until it is reset The cost increases as
weights tend away from 0, resulting in fewer extreme weights
"""
self.cost = 0
for wt in self.weights:
self.cost += np.abs(wt).sum()
for bs in self.bias:
self.cost += np.abs(bs).sum()
self.score -= 0.00001*self.cost
# Score improves with distance and time driving
self.score += self.getScore()
# Stop the ship from going further
self.crashed = True
self.vx = 0
self.vy = 0
self.accel = 0
self.dangle = 0
#print(self.getName() + " has crashed at: " + str(self.pos[0])+ " " + str(self.pos[1]))
def getIntPos(self):
"""Returns the current ship position as a tuple of integers """
return (int(self.pos[0]),int(self.pos[1]))
############### VISUAL #################################
# Functions related to creating various visual effects on screen
########################################################
def drawShip(self,screen,maze,frame,midpos = (450,800),zoom = 1,fancyShip = False, drawThrusters = True):
""" Draw triangular ship, get the input values and draw a red or blue
circle at their location
"""
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
# Draw Inputs
if not self.crashed:
if self.inputType == 0:
self.drawPointInputs(screen,maze,midpos=midpos)
elif self.inputType == 1:
self.drawVariableLOS(screen,frame,midpos=midpos)
# if(fancyShip): pygame.draw.polygon(screen, self.parentcolour,
# [[int(bp[0]+ 10 *np.cos(self.angle+3.14)),
# int(bp[1]+ 10 *np.sin(self.angle+3.14))],
# [int(bp[0]+ 10 *np.cos(self.angle+1)),
# int(bp[1]+ 10 *np.sin(self.angle+1))],
# [int(bp[0]),
# int(bp[1])],
# [int(bp[0]+ 10 *np.cos(self.angle-1)),
# int(bp[1]+ 10 *np.sin(self.angle-1))]])
# draw thrusters
if not self.crashed:
if(drawThrusters):
pygame.draw.polygon(screen, (140,140,40),
[[int(bp[0]+ self.accel*22 *np.cos(self.angle+3.14)),
int(bp[1]+ self.accel*22 *np.sin(self.angle+3.14))],
[int(bp[0]+ 7 *np.cos(self.angle + 2.64)),
int(bp[1]+ 7 *np.sin(self.angle + 2.64))],
[int(bp[0]+ 7 *np.cos(self.angle + 3.64)),
int(bp[1]+ 7 *np.sin(self.angle + 3.64))]])
pygame.draw.polygon(screen, (140,140,40),
[[int(bp[0]+ self.dangle*60 *np.cos(self.angle-1.57) + 7*np.cos(self.angle)),
int(bp[1]+ self.dangle*60 *np.sin(self.angle-1.57) + 7*np.sin(self.angle))],
[int(bp[0]+ 5 *np.cos(self.angle)),
int(bp[1]+ 5 *np.sin(self.angle))],
[int(bp[0]+ 9 *np.cos(self.angle)),
int(bp[1]+ 9 *np.sin(self.angle))]])
# draw ship
pygame.draw.polygon(screen, self.colour,
[[int(bp[0]+ 10 *np.cos(self.angle-0.15)),
int(bp[1]+ 10 *np.sin(self.angle-0.15))],
[int(bp[0]+ 10 *np.cos(self.angle+0.15)),
int(bp[1]+ 10 *np.sin(self.angle+0.15))],
[int(bp[0]+ 10 *np.cos(self.angle + 2.64)),
int(bp[1]+ 10 *np.sin(self.angle + 2.64))],
[int(bp[0]+ 10 *np.cos(self.angle + 3.64)),
int(bp[1]+ 10 *np.sin(self.angle + 3.64))]])
# Draw the cockpit
pygame.draw.circle(screen, (140,160,240), bp, 5,2)
def drawMatrix(self,screen,pos):
""" Draw a bunch of squares that light up red of green based on
different points in the decision process
"""
bp = pos # base position bp
namesurface = myfont.render(self.parentname, False, self.parentcolour)
screen.blit(namesurface,(bp[0]-50,bp[1] -60),)
tempOffset = namesurface.get_width()
namesurface = myfont.render(self.name, False, self.colour)
screen.blit(namesurface,(bp[0]-40 ,bp[1]-30),)
size = 10
separationx = 12
separationy = 20
# Cycle through array of inputs
for i in range(self.dimensions[0]):
# Create red - green colour based on array
temp_colour = (int((1-self.scan[i])*240),int(self.scan[i]*240),0)
# Draw square that is slightly offset of previous square
pygame.draw.rect(screen,temp_colour ,(bp[0] - separationx *int(i / len(self.inputdistance)),
bp[1] - separationx*(i%len(self.inputdistance)) + 3*separationx,size,size))
# Calculate intermediate decision array
temp_vector = self.scan
# Repeat
for j, bs in enumerate(self.bias):
temp_vector = np.add(temp_vector.dot(self.weights[j]), bs)
for i in range(temp_vector.shape[0]):
temp_colour = (int(max(min((1-temp_vector[i])*240,240),0)),int(max(min(temp_vector[i]*240,240),0)),0)
pygame.draw.rect(screen,temp_colour ,(bp[0] + (j+1)*separationy,bp[1] + separationx*i,size,size))
def drawPointInputs(self,screen,maze,midpos = (450,800)):
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
# Draw where the inputs are for decision making.
if(self.crashed == False or True):
self.drawTargetCheckpoint(screen,maze,bp,midpos = midpos)
for i,pos in enumerate(self.inputPos):
pygame.draw.circle(screen, self.inputColour[i], getOffsetPos(pos,midpos), 2,0)
def highlight(self,screen,midpos = (800,450)):
""" Draw some expanding circles around the ship """
posInt = self.getIntPos()
posInt = getOffsetPos(posInt,midpos)
pygame.draw.circle(screen, [max(0,tmp - (10 - self.timeDriving%10)*10) for tmp in self.colour],
posInt, int(10+ (self.timeDriving%10 )),2)
pygame.draw.circle(screen, self.colour, posInt, int(20+ (self.timeDriving%10 )),2)
pygame.draw.circle(screen, [max(0,tmp - (self.timeDriving%10)*10) for tmp in self.colour],
posInt, int(30+ (self.timeDriving%10 )),2)
def drawTargetCheckpoint(self,screen,maze,pos,midpos = (450,800)):
""" Draw an arrow pointing to the next checkpoint we must reach """
tarpos = getOffsetPos(self.targetCheckpointPos,midpos)
temp = (int(pos[0]+(tarpos[0]-pos[0])/10),
int(pos[1]+(tarpos[1]-pos[1])/10))
pygame.draw.circle(screen,(130,240,130),temp,2,2)
def drawVariableLOS(self,screen,frame,midpos = (450,800)):
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
for g in self.extrapos:
#print("extrapos: ",g)
if g[0]:
drawPulsatingCirlce(screen, getOffsetPos(g[1],midpos),frame,size = 12,cycle_length = 60, colour = (255,0,0),magnitude = (self.sightLength - g[2]) / self.sightLength*0.3+0.7 )
#pygame.draw.circle(screen,(230,40,30),getOffsetPos(g[0],midpos),8,1)
#pygame.draw.circle(screen,(250,0,0),getOffsetPos(g[0],midpos),4,1)
pygame.draw.line(screen,(100,100,100),bp,getOffsetPos(g[1],midpos),1)
#pygame.draw.circle(screen,(30,140,130),[int(bp[0]),int(bp[1])],5,5)
else:
# Get the max length and draw a lighter line to show where the sensors are
endpoint = []
pygame.draw.line(screen,(30,30,30),bp,getOffsetPos(g[1],midpos),1)
def getName(self):
""" Get 6 letter "name" based on weight and bias totals """
l = []
for wt in self.weights:
l.append(chr( int( 97 + (sum(map(sum,wt)) * 10) % 26 ) ))
for bs in self.bias:
#print("BS: "+str(bs[0]))
l.append(chr( int( 97 + (sum(bs) * 10) % 26 ) ))
l[0] = chr(ord(l[0]) - 32)
self.name = ''.join(l)
return self.name
def setName(self,newName):
""" Changes the weights and biases randomly in order to have the
getName() function return the name specified here """
for i, wt in enumerate(self.weights):
tempcoef = 0
tempoff = ord(newName[i]) - ord(self.getName()[i])
if(tempoff > 0):
tempcoef = 0.1
else:
tempcoef = -0.1
#print("Was: "+newName + " " + self.getName() + " " + str(tempoff))
tempoff = np.abs(tempoff)
for j in range(tempoff):
a = np.random.randint(wt.shape[0])
b = np.random.randint(wt.shape[1])
wt[a,b] += tempcoef
for v, bs in enumerate(self.bias):
tempcoef = 0
tempoff = ord(newName[v+len(self.weights)]) - ord(self.getName()[v+len(self.weights)])
if(tempoff > 0):
tempcoef = 0.1
else:
tempcoef = -0.1
#print("Now: "+ str(v) + " " +newName + " " + self.getName() + " " + str(tempoff))
tempoff = np.abs(tempoff)
for j in range(tempoff):
c = np.random.randint(bs.shape[0])
bs[c] += tempcoef
def nameShip(self,newName,colour = None):
""" Forces the ship to conform to the name and colour provided """
self.setName(newName)
if colour is not None:
self.colour = colour
| checkCheckpoint | identifier_name |
ship.py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:37:25 2018
@author: hoog
"""
from functions import *
import os
# Ship constants
sensor_colours = [(100,100,240),(240,100,100)] # NO WALL, WALL
class ship:
"""Class for holding an indivudual racer and all the variables it needs. """
############### INITIALIZATION #########################
# Functions that are run once at the start of each generation
########################################################
def __init__(self, startpos = (75,75), angle = 0, colour = (240,100,100),
maxSpeed = 20, maxAccel = 1, maxAngle = 0.1,
width = 1600, height = 900, maze = None,
intermediates = (8,), inputdistance = [50,100,150], inputangle = [1.2,0.6,0,-0.6,-1.2],
parentname = "", parentcolour = (240,100,100), name = None,orders = [1,2,3,4,5,6,7,8]):
""" Creates the ship with randomly assigned weights """
self.startpos, self.startangle, self.colour = startpos, angle, colour
self.maxSpeed, self.maxAccel, self.maxAngle = maxSpeed, maxAccel, maxAngle
self.maze = maze
self.width, self.height = width, height
self.parentname, self.parentcolour = parentname, parentcolour
# Create dimensions array based on input, intermediate dimensions and output (4)
self.inputType = 1 # 0: point, 1: linear
self.setDimension(inputdistance,inputangle,intermediates,orders)
self.drag = 0.99
self.initWeights()
self.sightLength = 200
if name is not None:
self.name = name
else:
self.name = self.getName()
self.reset()
def setDimension(self,inputdistance,inputangle,intermediates, orders):
""" Sets parameters needed for decision making """
if self.inputType == 0: # Matrix of angles and distances
self.dimensions = [len(inputdistance)*len(inputangle)]
elif self.inputType == 1: # Only angles, each one getting one input
self.dimensions = [len(inputangle)*len(orders)*2] # times 2 for having the orders work in reverse test
inputdistance = 1
self.dimensions.extend(intermediates)
self.dimensions.append(4)
self.inputdistance, self.inputangle, self.intermediates, self.orders = inputdistance, inputangle, intermediates, orders
def reset(self):
""" Returns the ship to its starting location and reinitializes """
self.resetPos()
self.vx, self.vy = 0, 0
self.accel, self.dangle = 0, 0
self.crashed = False
self.timeDriving, self.score, self.checkpoint, self.laps = 0, 0, 0, 0
self.targetCheckpointPos = self.maze.checkpoints[0].getMidInt()
self.inputColour = [sensor_colours[0] for i in range(self.dimensions[0])]
self.scan = np.array([0 for i in range(self.dimensions[0])])
self.cost = [0 for i in range(6)]
#Extrapos for CTS LOS
self.extrapos = []
def resetPos(self):
""" Go back to start location """
self.angle = self.startangle
self.pos = []
self.pos.extend(self.startpos)
def newSpawn(self, colour = (100,100,240)):
self.initWeights()
self.name = self.getName()
self.parentname = ""
self.parentcolour = colour
def initWeights(self):
""" Initializes weights to randomly selected ones."""
self.weights = []
self.bias = []
for i, dim in enumerate(self.dimensions[1:]):
self.weights.append(np.random.uniform(-1,1,(self.dimensions[i],dim)))
self.bias.append(np.random.uniform(-1,1,dim))
def copyWeights(self, shp, stray = 0, colour = (240,100,100)):
""" Changes weights to be around the ones provided by shp.
This is used to generate offspring from the shp provided.
"""
self.weights = []
self.bias = []
if(stray == 0): # straight copy
for i, wt in enumerate(shp.weights):
self.weights.append(wt.copy())
for i,bs in enumerate(shp.bias):
self.bias.append(bs.copy())
else: # Copy with some random added in
for i, wt in enumerate(shp.weights):
self.weights.append(np.add(wt.copy(), np.random.normal(0,stray,(shp.dimensions[i],shp.dimensions[i+1]))))
for i,bs in enumerate(shp.bias):
self.bias.append(np.add(bs.copy(), np.random.normal(0,stray,shp.dimensions[i+1])))
self.normalizeWeights()
self.colour = colour
self.parentname = shp.name
self.parentcolour = shp.colour
self.setDimension(shp.inputdistance,shp.inputangle,shp.intermediates,shp.orders)
def saveWeights(self, basename, generation):
""" Saves the np array of weights for easy loading later"""
for i,wt in enumerate(self.weights):
np.save("./data/"+basename+"/"+basename + "_W"+str(i)+"_G" + str(generation),wt)
for i,bs in enumerate(self.bias):
np.save("./data/"+basename+"/"+basename + "_B"+str(i)+"_G" + str(generation),bs)
def loadWeights(self,basename,generation,colour = None):
temp = "./data/"+basename+"/"+basename
self.weights = []
done = False
i = 0
while(not done):
wn = temp + "_W"+str(i)+"_G" + str(generation)+".npy"
if(os.path.isfile(wn)):
self.weights.append(np.load(wn))
else: done = True
i += 1
self.bias = []
done = False
i = 0
while(not done):
bn = temp + "_B"+str(i)+"_G" + str(generation)+".npy"
if(os.path.isfile(bn)):
self.bias.append(np.load(bn))
else: done = True
i += 1
if(colour is not None):
self.colour = colour
def normalizeWeights(self):
""" Make sure the weights and biases stay inside (-1,1) """
for wt in self.weights:
wt[wt>1] = 1
wt[wt<-1] = -1
for bs in self.bias:
bs[bs>1] = 1
bs[bs<-1] = -1
def copyWeightsExper(self, shp, stray = 0, colour = (240,100,100)):
""" version of copyWeights() that only take 1 element of each weight matrix
and changes it absolutely to a new value, regardless of the input value.
"""
self.copyWeights(shp, stray = stray, colour = colour)
for wt in self.weights:
i = np.random.randint(wt.shape[0])
j = np.random.randint(wt.shape[1])
wt[i,j] = np.random.uniform(-1,1,1)
for bs in self.bias:
i = np.random.randint(bs.shape[0])
bs[i] = np.random.uniform(-1,1,1)
############### UPDATE #################################
# Functions that may be used at each timestep of the race
########################################################
def moveShip(self,screen,maze):
""" Based on the ship's brain and inputs, get a decision for this
timestep and apply it to the acceleration, braking and turning
"""
self.checkCheckpoint()
angle = 0
accel = 0
controlInputs = self.getDecision()
angle -= logis(controlInputs[0]) * self.maxAngle
angle += logis(controlInputs[1]) * self.maxAngle
accel += logis(controlInputs[2]) * self.maxAccel
brake = logis(controlInputs[3])
self.updateSpeed(accel,angle,brake)
self.updatePos()
self.getInputs(maze)
def checkCheckpoint(self):
"""Determines if we have passed a checkpoint this timestep"""
if self.maze.checkpoints[self.checkpoint].checkCollision(self.pos):
self.checkpoint +=1
if(self.checkpoint >= self.maze.checkpointsPerLap):
if(self.maze.mazeType == "circular"):
self.checkpoint = 0
self.laps +=1
elif(self.maze.mazeType == "linear"):
self.checkpoint = 0
self.laps +=1
self.resetPos()
self.targetCheckpointPos = self.maze.checkpoints[self.checkpoint].getMidInt()
def checkFuel(self):
""" Returns the score received based on checkpoint progress minus the time driving.
If this is below 0 the sihp is said to be out of fuel and crashes
"""
return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving
def updateSpeed(self,accel,dangle,brake):
""" Get new vx and vy to update position"""
self.dangle += dangle
self.dangle = self.dangle * self.drag*(1-brake/3)*0.6
self.angle += self.dangle
self.accel = accel
self.vx += accel * np.cos(self.angle)
self.vy += accel * np.sin(self.angle)
# flat cap on speed
if(self.vx > self.maxSpeed): self.vx = self.maxSpeed
if(self.vy > self.maxSpeed): self.vy = self.maxSpeed
if(self.vx < -1*self.maxSpeed): self.vx = -1*self.maxSpeed
if(self.vy < -1*self.maxSpeed): self.vy = -1*self.maxSpeed
# apply drag and braking to slow down
self.vx = self.vx * self.drag*(1-brake/3)
self.vy = self.vy * self.drag*(1-brake/3)
def updatePos(self):
""" Update where the ship is each timestep based on calculated velocity."""
self.timeDriving +=1
self.pos[0] += self.vx
self.pos[1] += self.vy
def getInputs(self,maze):
""" Determine which of the input locations are in walls / out of bounds
for the input vector
"""
self.inputPos = []
#Extrapos for CTS LOS
self.extrapos = []
i,j=0,0
# array of front views
for ang in self.inputangle:
blocked = False
if self.inputType == 0:
for dis in self.inputdistance:
self.inputPos.append([int(self.pos[0] + dis*np.cos(self.angle+ang)),
int(self.pos[1] + dis*np.sin(self.angle+ang))])
if(maze.checkCollisions(self.inputPos[i]) or blocked):
blocked = True
self.inputColour[i] = sensor_colours[1]
self.scan[i] = 0
else:
self.inputColour[i] = sensor_colours[0]
self.scan[i] = 1
i +=1
elif self.inputType == 1:
#eXPERIMENTAL STUFF FOR continuous LOS
self.extrapos.append(maze.getMaximumSightDistance(self.pos, self.angle+ang, self.sightLength))
temp_length = self.extrapos[i][2]
for ord in self.orders:
self.scan[j] = (temp_length/self.sightLength)**ord
self.scan[j+1] = ((self.sightLength-temp_length)/self.sightLength)**ord # add opposite way, gets stronger closer instead of stronger further. might make a difference.
j +=2
i+=1
def getDecision(self):
""" Use the input vector and all the weights to decide how to control
the ship this timestep.
"""
temp = []
temp.append( np.array(self.scan) )
for i,wt in enumerate(self.weights):
#print(self.bias[i],temp[i].dot(wt))
temp.append(np.add(temp[i].dot(wt),self.bias[i]))
#print(str(self.bias) + " " + str(wt))
return temp[len(self.weights)].tolist() # np.add(np.add(np.add(self.scan.dot(self.weights[0]), self.bias[0]).dot(self.weights[1]),self.bias[1]).dot(self.weights[2]),self.bias[2]).T
def getScore(self):
""" determine the current score of the ship """
tempscore = 1000 - 0.01*self.timeDriving
tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)
tempscore += self.checkpoint *1000
tempscore += self.laps * 1000 * len(self.maze.checkpoints)
return tempscore
def crash(self):
""" Once the ship's run has expired it crashes. Here its score is
tallied and it is stopped until it is reset The cost increases as
weights tend away from 0, resulting in fewer extreme weights
"""
self.cost = 0
for wt in self.weights:
self.cost += np.abs(wt).sum()
for bs in self.bias:
self.cost += np.abs(bs).sum()
self.score -= 0.00001*self.cost
# Score improves with distance and time driving
self.score += self.getScore()
# Stop the ship from going further
self.crashed = True
self.vx = 0
self.vy = 0
self.accel = 0
self.dangle = 0
#print(self.getName() + " has crashed at: " + str(self.pos[0])+ " " + str(self.pos[1]))
def getIntPos(self):
"""Returns the current ship position as a tuple of integers """
return (int(self.pos[0]),int(self.pos[1]))
############### VISUAL #################################
# Functions related to creating various visual effects on screen
########################################################
def drawShip(self,screen,maze,frame,midpos = (450,800),zoom = 1,fancyShip = False, drawThrusters = True):
""" Draw triangular ship, get the input values and draw a red or blue
circle at their location
"""
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
# Draw Inputs
if not self.crashed:
if self.inputType == 0:
self.drawPointInputs(screen,maze,midpos=midpos)
elif self.inputType == 1:
self.drawVariableLOS(screen,frame,midpos=midpos)
# if(fancyShip): pygame.draw.polygon(screen, self.parentcolour,
# [[int(bp[0]+ 10 *np.cos(self.angle+3.14)),
# int(bp[1]+ 10 *np.sin(self.angle+3.14))],
# [int(bp[0]+ 10 *np.cos(self.angle+1)),
# int(bp[1]+ 10 *np.sin(self.angle+1))],
# [int(bp[0]),
# int(bp[1])],
# [int(bp[0]+ 10 *np.cos(self.angle-1)),
# int(bp[1]+ 10 *np.sin(self.angle-1))]])
# draw thrusters
if not self.crashed:
if(drawThrusters):
pygame.draw.polygon(screen, (140,140,40),
[[int(bp[0]+ self.accel*22 *np.cos(self.angle+3.14)),
int(bp[1]+ self.accel*22 *np.sin(self.angle+3.14))],
[int(bp[0]+ 7 *np.cos(self.angle + 2.64)),
int(bp[1]+ 7 *np.sin(self.angle + 2.64))],
[int(bp[0]+ 7 *np.cos(self.angle + 3.64)),
int(bp[1]+ 7 *np.sin(self.angle + 3.64))]])
pygame.draw.polygon(screen, (140,140,40),
[[int(bp[0]+ self.dangle*60 *np.cos(self.angle-1.57) + 7*np.cos(self.angle)),
int(bp[1]+ self.dangle*60 *np.sin(self.angle-1.57) + 7*np.sin(self.angle))],
[int(bp[0]+ 5 *np.cos(self.angle)),
int(bp[1]+ 5 *np.sin(self.angle))],
[int(bp[0]+ 9 *np.cos(self.angle)),
int(bp[1]+ 9 *np.sin(self.angle))]])
# draw ship
pygame.draw.polygon(screen, self.colour,
[[int(bp[0]+ 10 *np.cos(self.angle-0.15)),
int(bp[1]+ 10 *np.sin(self.angle-0.15))],
[int(bp[0]+ 10 *np.cos(self.angle+0.15)),
int(bp[1]+ 10 *np.sin(self.angle+0.15))],
[int(bp[0]+ 10 *np.cos(self.angle + 2.64)),
int(bp[1]+ 10 *np.sin(self.angle + 2.64))],
[int(bp[0]+ 10 *np.cos(self.angle + 3.64)),
int(bp[1]+ 10 *np.sin(self.angle + 3.64))]])
# Draw the cockpit
pygame.draw.circle(screen, (140,160,240), bp, 5,2)
def drawMatrix(self,screen,pos):
""" Draw a bunch of squares that light up red of green based on
different points in the decision process
"""
bp = pos # base position bp
namesurface = myfont.render(self.parentname, False, self.parentcolour)
screen.blit(namesurface,(bp[0]-50,bp[1] -60),)
tempOffset = namesurface.get_width()
namesurface = myfont.render(self.name, False, self.colour)
screen.blit(namesurface,(bp[0]-40 ,bp[1]-30),)
size = 10
separationx = 12
separationy = 20
# Cycle through array of inputs
for i in range(self.dimensions[0]):
# Create red - green colour based on array
temp_colour = (int((1-self.scan[i])*240),int(self.scan[i]*240),0)
# Draw square that is slightly offset of previous square
pygame.draw.rect(screen,temp_colour ,(bp[0] - separationx *int(i / len(self.inputdistance)),
bp[1] - separationx*(i%len(self.inputdistance)) + 3*separationx,size,size))
# Calculate intermediate decision array
temp_vector = self.scan
# Repeat
for j, bs in enumerate(self.bias):
temp_vector = np.add(temp_vector.dot(self.weights[j]), bs)
for i in range(temp_vector.shape[0]):
temp_colour = (int(max(min((1-temp_vector[i])*240,240),0)),int(max(min(temp_vector[i]*240,240),0)),0)
pygame.draw.rect(screen,temp_colour ,(bp[0] + (j+1)*separationy,bp[1] + separationx*i,size,size))
def drawPointInputs(self,screen,maze,midpos = (450,800)):
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
# Draw where the inputs are for decision making.
if(self.crashed == False or True):
self.drawTargetCheckpoint(screen,maze,bp,midpos = midpos)
for i,pos in enumerate(self.inputPos):
pygame.draw.circle(screen, self.inputColour[i], getOffsetPos(pos,midpos), 2,0)
def highlight(self,screen,midpos = (800,450)):
""" Draw some expanding circles around the ship """
posInt = self.getIntPos()
posInt = getOffsetPos(posInt,midpos)
pygame.draw.circle(screen, [max(0,tmp - (10 - self.timeDriving%10)*10) for tmp in self.colour],
posInt, int(10+ (self.timeDriving%10 )),2)
pygame.draw.circle(screen, self.colour, posInt, int(20+ (self.timeDriving%10 )),2)
pygame.draw.circle(screen, [max(0,tmp - (self.timeDriving%10)*10) for tmp in self.colour],
posInt, int(30+ (self.timeDriving%10 )),2)
def drawTargetCheckpoint(self,screen,maze,pos,midpos = (450,800)):
""" Draw an arrow pointing to the next checkpoint we must reach """
tarpos = getOffsetPos(self.targetCheckpointPos,midpos)
temp = (int(pos[0]+(tarpos[0]-pos[0])/10),
int(pos[1]+(tarpos[1]-pos[1])/10))
pygame.draw.circle(screen,(130,240,130),temp,2,2)
def drawVariableLOS(self,screen,frame,midpos = (450,800)):
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
for g in self.extrapos:
#print("extrapos: ",g)
if g[0]:
drawPulsatingCirlce(screen, getOffsetPos(g[1],midpos),frame,size = 12,cycle_length = 60, colour = (255,0,0),magnitude = (self.sightLength - g[2]) / self.sightLength*0.3+0.7 )
#pygame.draw.circle(screen,(230,40,30),getOffsetPos(g[0],midpos),8,1)
#pygame.draw.circle(screen,(250,0,0),getOffsetPos(g[0],midpos),4,1)
pygame.draw.line(screen,(100,100,100),bp,getOffsetPos(g[1],midpos),1)
#pygame.draw.circle(screen,(30,140,130),[int(bp[0]),int(bp[1])],5,5)
else:
# Get the max length and draw a lighter line to show where the sensors are
|
def getName(self):
""" Get 6 letter "name" based on weight and bias totals """
l = []
for wt in self.weights:
l.append(chr( int( 97 + (sum(map(sum,wt)) * 10) % 26 ) ))
for bs in self.bias:
#print("BS: "+str(bs[0]))
l.append(chr( int( 97 + (sum(bs) * 10) % 26 ) ))
l[0] = chr(ord(l[0]) - 32)
self.name = ''.join(l)
return self.name
def setName(self,newName):
""" Changes the weights and biases randomly in order to have the
getName() function return the name specified here """
for i, wt in enumerate(self.weights):
tempcoef = 0
tempoff = ord(newName[i]) - ord(self.getName()[i])
if(tempoff > 0):
tempcoef = 0.1
else:
tempcoef = -0.1
#print("Was: "+newName + " " + self.getName() + " " + str(tempoff))
tempoff = np.abs(tempoff)
for j in range(tempoff):
a = np.random.randint(wt.shape[0])
b = np.random.randint(wt.shape[1])
wt[a,b] += tempcoef
for v, bs in enumerate(self.bias):
tempcoef = 0
tempoff = ord(newName[v+len(self.weights)]) - ord(self.getName()[v+len(self.weights)])
if(tempoff > 0):
tempcoef = 0.1
else:
tempcoef = -0.1
#print("Now: "+ str(v) + " " +newName + " " + self.getName() + " " + str(tempoff))
tempoff = np.abs(tempoff)
for j in range(tempoff):
c = np.random.randint(bs.shape[0])
bs[c] += tempcoef
def nameShip(self,newName,colour = None):
""" Forces the ship to conform to the name and colour provided """
self.setName(newName)
if colour is not None:
self.colour = colour
| endpoint = []
pygame.draw.line(screen,(30,30,30),bp,getOffsetPos(g[1],midpos),1) | conditional_block |
ship.py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:37:25 2018
@author: hoog
"""
from functions import *
import os
# Ship constants
sensor_colours = [(100,100,240),(240,100,100)] # NO WALL, WALL
class ship:
"""Class for holding an indivudual racer and all the variables it needs. """
############### INITIALIZATION #########################
# Functions that are run once at the start of each generation
########################################################
def __init__(self, startpos = (75,75), angle = 0, colour = (240,100,100),
maxSpeed = 20, maxAccel = 1, maxAngle = 0.1,
width = 1600, height = 900, maze = None,
intermediates = (8,), inputdistance = [50,100,150], inputangle = [1.2,0.6,0,-0.6,-1.2],
parentname = "", parentcolour = (240,100,100), name = None,orders = [1,2,3,4,5,6,7,8]):
""" Creates the ship with randomly assigned weights """
self.startpos, self.startangle, self.colour = startpos, angle, colour
self.maxSpeed, self.maxAccel, self.maxAngle = maxSpeed, maxAccel, maxAngle
self.maze = maze
self.width, self.height = width, height
self.parentname, self.parentcolour = parentname, parentcolour
# Create dimensions array based on input, intermediate dimensions and output (4)
self.inputType = 1 # 0: point, 1: linear
self.setDimension(inputdistance,inputangle,intermediates,orders)
self.drag = 0.99
self.initWeights()
self.sightLength = 200
if name is not None:
self.name = name
else:
self.name = self.getName()
self.reset()
def setDimension(self,inputdistance,inputangle,intermediates, orders):
""" Sets parameters needed for decision making """
if self.inputType == 0: # Matrix of angles and distances
self.dimensions = [len(inputdistance)*len(inputangle)]
elif self.inputType == 1: # Only angles, each one getting one input
self.dimensions = [len(inputangle)*len(orders)*2] # times 2 for having the orders work in reverse test
inputdistance = 1
self.dimensions.extend(intermediates)
self.dimensions.append(4)
self.inputdistance, self.inputangle, self.intermediates, self.orders = inputdistance, inputangle, intermediates, orders
def reset(self):
""" Returns the ship to its starting location and reinitializes """
self.resetPos()
self.vx, self.vy = 0, 0
self.accel, self.dangle = 0, 0
self.crashed = False
self.timeDriving, self.score, self.checkpoint, self.laps = 0, 0, 0, 0
self.targetCheckpointPos = self.maze.checkpoints[0].getMidInt()
self.inputColour = [sensor_colours[0] for i in range(self.dimensions[0])]
self.scan = np.array([0 for i in range(self.dimensions[0])])
self.cost = [0 for i in range(6)]
#Extrapos for CTS LOS
self.extrapos = []
def resetPos(self):
""" Go back to start location """
self.angle = self.startangle
self.pos = []
self.pos.extend(self.startpos)
def newSpawn(self, colour = (100,100,240)):
self.initWeights()
self.name = self.getName()
self.parentname = ""
self.parentcolour = colour
def initWeights(self):
""" Initializes weights to randomly selected ones."""
self.weights = []
self.bias = []
for i, dim in enumerate(self.dimensions[1:]):
self.weights.append(np.random.uniform(-1,1,(self.dimensions[i],dim)))
self.bias.append(np.random.uniform(-1,1,dim))
def copyWeights(self, shp, stray = 0, colour = (240,100,100)):
""" Changes weights to be around the ones provided by shp.
This is used to generate offspring from the shp provided.
"""
self.weights = []
self.bias = []
if(stray == 0): # straight copy
for i, wt in enumerate(shp.weights):
self.weights.append(wt.copy())
for i,bs in enumerate(shp.bias):
self.bias.append(bs.copy())
else: # Copy with some random added in
for i, wt in enumerate(shp.weights):
self.weights.append(np.add(wt.copy(), np.random.normal(0,stray,(shp.dimensions[i],shp.dimensions[i+1]))))
for i,bs in enumerate(shp.bias):
self.bias.append(np.add(bs.copy(), np.random.normal(0,stray,shp.dimensions[i+1])))
self.normalizeWeights()
self.colour = colour
self.parentname = shp.name
self.parentcolour = shp.colour
self.setDimension(shp.inputdistance,shp.inputangle,shp.intermediates,shp.orders)
def saveWeights(self, basename, generation):
""" Saves the np array of weights for easy loading later"""
for i,wt in enumerate(self.weights):
np.save("./data/"+basename+"/"+basename + "_W"+str(i)+"_G" + str(generation),wt)
for i,bs in enumerate(self.bias):
np.save("./data/"+basename+"/"+basename + "_B"+str(i)+"_G" + str(generation),bs)
def loadWeights(self,basename,generation,colour = None):
temp = "./data/"+basename+"/"+basename
self.weights = []
done = False
i = 0
while(not done):
wn = temp + "_W"+str(i)+"_G" + str(generation)+".npy"
if(os.path.isfile(wn)):
self.weights.append(np.load(wn))
else: done = True
i += 1
self.bias = []
done = False
i = 0
while(not done):
bn = temp + "_B"+str(i)+"_G" + str(generation)+".npy"
if(os.path.isfile(bn)):
self.bias.append(np.load(bn))
else: done = True
i += 1
if(colour is not None):
self.colour = colour
def normalizeWeights(self):
""" Make sure the weights and biases stay inside (-1,1) """
for wt in self.weights:
wt[wt>1] = 1
wt[wt<-1] = -1
for bs in self.bias:
bs[bs>1] = 1
bs[bs<-1] = -1
def copyWeightsExper(self, shp, stray = 0, colour = (240,100,100)):
""" version of copyWeights() that only take 1 element of each weight matrix
and changes it absolutely to a new value, regardless of the input value.
"""
self.copyWeights(shp, stray = stray, colour = colour)
for wt in self.weights:
i = np.random.randint(wt.shape[0])
j = np.random.randint(wt.shape[1])
wt[i,j] = np.random.uniform(-1,1,1)
for bs in self.bias:
i = np.random.randint(bs.shape[0])
bs[i] = np.random.uniform(-1,1,1)
############### UPDATE #################################
# Functions that may be used at each timestep of the race
########################################################
def moveShip(self,screen,maze):
""" Based on the ship's brain and inputs, get a decision for this
timestep and apply it to the acceleration, braking and turning
"""
self.checkCheckpoint()
angle = 0
accel = 0
controlInputs = self.getDecision()
angle -= logis(controlInputs[0]) * self.maxAngle
angle += logis(controlInputs[1]) * self.maxAngle
accel += logis(controlInputs[2]) * self.maxAccel
brake = logis(controlInputs[3])
self.updateSpeed(accel,angle,brake)
self.updatePos()
self.getInputs(maze)
def checkCheckpoint(self):
"""Determines if we have passed a checkpoint this timestep"""
if self.maze.checkpoints[self.checkpoint].checkCollision(self.pos):
self.checkpoint +=1
if(self.checkpoint >= self.maze.checkpointsPerLap):
if(self.maze.mazeType == "circular"):
self.checkpoint = 0
self.laps +=1
elif(self.maze.mazeType == "linear"):
self.checkpoint = 0
self.laps +=1
self.resetPos()
self.targetCheckpointPos = self.maze.checkpoints[self.checkpoint].getMidInt()
def checkFuel(self):
""" Returns the score received based on checkpoint progress minus the time driving.
If this is below 0 the sihp is said to be out of fuel and crashes
"""
return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving
def updateSpeed(self,accel,dangle,brake):
""" Get new vx and vy to update position""" | self.dangle = self.dangle * self.drag*(1-brake/3)*0.6
self.angle += self.dangle
self.accel = accel
self.vx += accel * np.cos(self.angle)
self.vy += accel * np.sin(self.angle)
# flat cap on speed
if(self.vx > self.maxSpeed): self.vx = self.maxSpeed
if(self.vy > self.maxSpeed): self.vy = self.maxSpeed
if(self.vx < -1*self.maxSpeed): self.vx = -1*self.maxSpeed
if(self.vy < -1*self.maxSpeed): self.vy = -1*self.maxSpeed
# apply drag and braking to slow down
self.vx = self.vx * self.drag*(1-brake/3)
self.vy = self.vy * self.drag*(1-brake/3)
def updatePos(self):
""" Update where the ship is each timestep based on calculated velocity."""
self.timeDriving +=1
self.pos[0] += self.vx
self.pos[1] += self.vy
def getInputs(self,maze):
""" Determine which of the input locations are in walls / out of bounds
for the input vector
"""
self.inputPos = []
#Extrapos for CTS LOS
self.extrapos = []
i,j=0,0
# array of front views
for ang in self.inputangle:
blocked = False
if self.inputType == 0:
for dis in self.inputdistance:
self.inputPos.append([int(self.pos[0] + dis*np.cos(self.angle+ang)),
int(self.pos[1] + dis*np.sin(self.angle+ang))])
if(maze.checkCollisions(self.inputPos[i]) or blocked):
blocked = True
self.inputColour[i] = sensor_colours[1]
self.scan[i] = 0
else:
self.inputColour[i] = sensor_colours[0]
self.scan[i] = 1
i +=1
elif self.inputType == 1:
#eXPERIMENTAL STUFF FOR continuous LOS
self.extrapos.append(maze.getMaximumSightDistance(self.pos, self.angle+ang, self.sightLength))
temp_length = self.extrapos[i][2]
for ord in self.orders:
self.scan[j] = (temp_length/self.sightLength)**ord
self.scan[j+1] = ((self.sightLength-temp_length)/self.sightLength)**ord # add opposite way, gets stronger closer instead of stronger further. might make a difference.
j +=2
i+=1
def getDecision(self):
""" Use the input vector and all the weights to decide how to control
the ship this timestep.
"""
temp = []
temp.append( np.array(self.scan) )
for i,wt in enumerate(self.weights):
#print(self.bias[i],temp[i].dot(wt))
temp.append(np.add(temp[i].dot(wt),self.bias[i]))
#print(str(self.bias) + " " + str(wt))
return temp[len(self.weights)].tolist() # np.add(np.add(np.add(self.scan.dot(self.weights[0]), self.bias[0]).dot(self.weights[1]),self.bias[1]).dot(self.weights[2]),self.bias[2]).T
def getScore(self):
""" determine the current score of the ship """
tempscore = 1000 - 0.01*self.timeDriving
tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)
tempscore += self.checkpoint *1000
tempscore += self.laps * 1000 * len(self.maze.checkpoints)
return tempscore
def crash(self):
""" Once the ship's run has expired it crashes. Here its score is
tallied and it is stopped until it is reset The cost increases as
weights tend away from 0, resulting in fewer extreme weights
"""
self.cost = 0
for wt in self.weights:
self.cost += np.abs(wt).sum()
for bs in self.bias:
self.cost += np.abs(bs).sum()
self.score -= 0.00001*self.cost
# Score improves with distance and time driving
self.score += self.getScore()
# Stop the ship from going further
self.crashed = True
self.vx = 0
self.vy = 0
self.accel = 0
self.dangle = 0
#print(self.getName() + " has crashed at: " + str(self.pos[0])+ " " + str(self.pos[1]))
def getIntPos(self):
"""Returns the current ship position as a tuple of integers """
return (int(self.pos[0]),int(self.pos[1]))
############### VISUAL #################################
# Functions related to creating various visual effects on screen
########################################################
def drawShip(self,screen,maze,frame,midpos = (450,800),zoom = 1,fancyShip = False, drawThrusters = True):
""" Draw triangular ship, get the input values and draw a red or blue
circle at their location
"""
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
# Draw Inputs
if not self.crashed:
if self.inputType == 0:
self.drawPointInputs(screen,maze,midpos=midpos)
elif self.inputType == 1:
self.drawVariableLOS(screen,frame,midpos=midpos)
# if(fancyShip): pygame.draw.polygon(screen, self.parentcolour,
# [[int(bp[0]+ 10 *np.cos(self.angle+3.14)),
# int(bp[1]+ 10 *np.sin(self.angle+3.14))],
# [int(bp[0]+ 10 *np.cos(self.angle+1)),
# int(bp[1]+ 10 *np.sin(self.angle+1))],
# [int(bp[0]),
# int(bp[1])],
# [int(bp[0]+ 10 *np.cos(self.angle-1)),
# int(bp[1]+ 10 *np.sin(self.angle-1))]])
# draw thrusters
if not self.crashed:
if(drawThrusters):
pygame.draw.polygon(screen, (140,140,40),
[[int(bp[0]+ self.accel*22 *np.cos(self.angle+3.14)),
int(bp[1]+ self.accel*22 *np.sin(self.angle+3.14))],
[int(bp[0]+ 7 *np.cos(self.angle + 2.64)),
int(bp[1]+ 7 *np.sin(self.angle + 2.64))],
[int(bp[0]+ 7 *np.cos(self.angle + 3.64)),
int(bp[1]+ 7 *np.sin(self.angle + 3.64))]])
pygame.draw.polygon(screen, (140,140,40),
[[int(bp[0]+ self.dangle*60 *np.cos(self.angle-1.57) + 7*np.cos(self.angle)),
int(bp[1]+ self.dangle*60 *np.sin(self.angle-1.57) + 7*np.sin(self.angle))],
[int(bp[0]+ 5 *np.cos(self.angle)),
int(bp[1]+ 5 *np.sin(self.angle))],
[int(bp[0]+ 9 *np.cos(self.angle)),
int(bp[1]+ 9 *np.sin(self.angle))]])
# draw ship
pygame.draw.polygon(screen, self.colour,
[[int(bp[0]+ 10 *np.cos(self.angle-0.15)),
int(bp[1]+ 10 *np.sin(self.angle-0.15))],
[int(bp[0]+ 10 *np.cos(self.angle+0.15)),
int(bp[1]+ 10 *np.sin(self.angle+0.15))],
[int(bp[0]+ 10 *np.cos(self.angle + 2.64)),
int(bp[1]+ 10 *np.sin(self.angle + 2.64))],
[int(bp[0]+ 10 *np.cos(self.angle + 3.64)),
int(bp[1]+ 10 *np.sin(self.angle + 3.64))]])
# Draw the cockpit
pygame.draw.circle(screen, (140,160,240), bp, 5,2)
def drawMatrix(self,screen,pos):
""" Draw a bunch of squares that light up red of green based on
different points in the decision process
"""
bp = pos # base position bp
namesurface = myfont.render(self.parentname, False, self.parentcolour)
screen.blit(namesurface,(bp[0]-50,bp[1] -60),)
tempOffset = namesurface.get_width()
namesurface = myfont.render(self.name, False, self.colour)
screen.blit(namesurface,(bp[0]-40 ,bp[1]-30),)
size = 10
separationx = 12
separationy = 20
# Cycle through array of inputs
for i in range(self.dimensions[0]):
# Create red - green colour based on array
temp_colour = (int((1-self.scan[i])*240),int(self.scan[i]*240),0)
# Draw square that is slightly offset of previous square
pygame.draw.rect(screen,temp_colour ,(bp[0] - separationx *int(i / len(self.inputdistance)),
bp[1] - separationx*(i%len(self.inputdistance)) + 3*separationx,size,size))
# Calculate intermediate decision array
temp_vector = self.scan
# Repeat
for j, bs in enumerate(self.bias):
temp_vector = np.add(temp_vector.dot(self.weights[j]), bs)
for i in range(temp_vector.shape[0]):
temp_colour = (int(max(min((1-temp_vector[i])*240,240),0)),int(max(min(temp_vector[i]*240,240),0)),0)
pygame.draw.rect(screen,temp_colour ,(bp[0] + (j+1)*separationy,bp[1] + separationx*i,size,size))
def drawPointInputs(self,screen,maze,midpos = (450,800)):
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
# Draw where the inputs are for decision making.
if(self.crashed == False or True):
self.drawTargetCheckpoint(screen,maze,bp,midpos = midpos)
for i,pos in enumerate(self.inputPos):
pygame.draw.circle(screen, self.inputColour[i], getOffsetPos(pos,midpos), 2,0)
def highlight(self,screen,midpos = (800,450)):
""" Draw some expanding circles around the ship """
posInt = self.getIntPos()
posInt = getOffsetPos(posInt,midpos)
pygame.draw.circle(screen, [max(0,tmp - (10 - self.timeDriving%10)*10) for tmp in self.colour],
posInt, int(10+ (self.timeDriving%10 )),2)
pygame.draw.circle(screen, self.colour, posInt, int(20+ (self.timeDriving%10 )),2)
pygame.draw.circle(screen, [max(0,tmp - (self.timeDriving%10)*10) for tmp in self.colour],
posInt, int(30+ (self.timeDriving%10 )),2)
def drawTargetCheckpoint(self,screen,maze,pos,midpos = (450,800)):
""" Draw an arrow pointing to the next checkpoint we must reach """
tarpos = getOffsetPos(self.targetCheckpointPos,midpos)
temp = (int(pos[0]+(tarpos[0]-pos[0])/10),
int(pos[1]+(tarpos[1]-pos[1])/10))
pygame.draw.circle(screen,(130,240,130),temp,2,2)
def drawVariableLOS(self,screen,frame,midpos = (450,800)):
bp = self.getIntPos()
bp = getOffsetPos(bp,midpos)
for g in self.extrapos:
#print("extrapos: ",g)
if g[0]:
drawPulsatingCirlce(screen, getOffsetPos(g[1],midpos),frame,size = 12,cycle_length = 60, colour = (255,0,0),magnitude = (self.sightLength - g[2]) / self.sightLength*0.3+0.7 )
#pygame.draw.circle(screen,(230,40,30),getOffsetPos(g[0],midpos),8,1)
#pygame.draw.circle(screen,(250,0,0),getOffsetPos(g[0],midpos),4,1)
pygame.draw.line(screen,(100,100,100),bp,getOffsetPos(g[1],midpos),1)
#pygame.draw.circle(screen,(30,140,130),[int(bp[0]),int(bp[1])],5,5)
else:
# Get the max length and draw a lighter line to show where the sensors are
endpoint = []
pygame.draw.line(screen,(30,30,30),bp,getOffsetPos(g[1],midpos),1)
def getName(self):
""" Get 6 letter "name" based on weight and bias totals """
l = []
for wt in self.weights:
l.append(chr( int( 97 + (sum(map(sum,wt)) * 10) % 26 ) ))
for bs in self.bias:
#print("BS: "+str(bs[0]))
l.append(chr( int( 97 + (sum(bs) * 10) % 26 ) ))
l[0] = chr(ord(l[0]) - 32)
self.name = ''.join(l)
return self.name
def setName(self,newName):
""" Changes the weights and biases randomly in order to have the
getName() function return the name specified here """
for i, wt in enumerate(self.weights):
tempcoef = 0
tempoff = ord(newName[i]) - ord(self.getName()[i])
if(tempoff > 0):
tempcoef = 0.1
else:
tempcoef = -0.1
#print("Was: "+newName + " " + self.getName() + " " + str(tempoff))
tempoff = np.abs(tempoff)
for j in range(tempoff):
a = np.random.randint(wt.shape[0])
b = np.random.randint(wt.shape[1])
wt[a,b] += tempcoef
for v, bs in enumerate(self.bias):
tempcoef = 0
tempoff = ord(newName[v+len(self.weights)]) - ord(self.getName()[v+len(self.weights)])
if(tempoff > 0):
tempcoef = 0.1
else:
tempcoef = -0.1
#print("Now: "+ str(v) + " " +newName + " " + self.getName() + " " + str(tempoff))
tempoff = np.abs(tempoff)
for j in range(tempoff):
c = np.random.randint(bs.shape[0])
bs[c] += tempcoef
def nameShip(self,newName,colour = None):
""" Forces the ship to conform to the name and colour provided """
self.setName(newName)
if colour is not None:
self.colour = colour | self.dangle += dangle | random_line_split |
layers.py | import gzip
import pickle as pkl
import time
from datetime import datetime
import grpc
import numpy as np
from sklearn.utils import shuffle
import neural_nets_pb2 as nn_pb
import neural_nets_pb2_grpc as nn_pb_grpc
from mnist_loader import load_data
from activations import *
# pylint: disable=too-many-arguments
class Layer(nn_pb_grpc.LayerDataExchangeServicer):
"""
abstract layer extract common methods
"""
# pylint: disable=too-many-arguments
def __init__(self, layer_name, upper_layer, lower_layer,
lower_layer_nodes, current_layer_nodes,
nonlin, nonlin_prime):
"""
datasets : the path of mnist dataset
nonlin: activation function
nonlin_prime: the derivative of activation function
"""
self.layer_name = layer_name
self.upper_layer_addr = upper_layer
self.lower_layer_addr = lower_layer
self.nonlin = nonlin
self.nonlin_prime = nonlin_prime
# lazy initialization
self.upper_layer_stub = None
self.lower_layer_stub = None
# weights dimension
self.weights_shape = (current_layer_nodes, lower_layer_nodes)
self.weights = None
self.biases = None
# record outputs from lower layer
# use batch id as key
# Purposes:
# 1) used for computing the weighted sum of current layer
# 2) used for computing the gradients for updating weights of current layer
self.lower_layer_outputs = {}
# computed from lower layer outputs for cache purpose
# cache for computing delta for current layer
# delta = partial_delta_rec * nonlin_prime(weighted_sum)
# with different batch we have different weighted sum
self.weighted_sum_inputs = {}
def forward_to_upper(self, batch_id, forward_matrix, forward_labels, istrain):
"""
forward output to upper layer
"""
if not self.upper_layer_stub:
self.create_upper_stub()
# convert numpy array to byte string
bytes_matrix = pkl.dumps(forward_matrix, 2)
bytes_labels = pkl.dumps(forward_labels, 2)
# send message to next layer
res = self.upper_layer_stub.UpdateInput(
nn_pb.ForwardMsg(batch_id=batch_id,
output_matrix=bytes_matrix,
labels=bytes_labels,
is_train=istrain))
# print("get response form upper layer", res.message)
def backward_to_lower(self, batch_id, partial_delta, labels):
"""
back propagate error partial_delta to lower layer
partial_delta = dot(self.weights.T, self.delta)
self.delta = delta_received_from_upper * nonlin_prime(z)
"""
# create stub for lower layer
if not self.lower_layer_stub:
self.create_lower_stub()
# convert partial_delta matrix to bytes string
bytes_delta = pkl.dumps(partial_delta)
bytes_labels = pkl.dumps(labels)
res = self.lower_layer_stub.UpdateDelta(
nn_pb.BackwardMsg(batch_id=batch_id,
partial_delta=bytes_delta,
labels=bytes_labels))
# print("get response from lower layer", res.message)
def create_upper_stub(self):
""" create upper_layer_stub for exchanging data between grpc"""
if self.upper_layer_addr:
channel = grpc.insecure_channel(self.upper_layer_addr)
self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no upper layer has been specified")
def create_lower_stub(self):
""" stub for lower layer communication"""
if self.lower_layer_addr:
channel = grpc.insecure_channel(self.lower_layer_addr)
self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no lower layer has been specified")
def init_weights(self, load_weights=None):
"""
if load_weights is specified load the trained weights
"""
if load_weights:
# TODO
pass
else:
# x: lower layer nodes n
# y: current layer nodes n
x = self.weights_shape[1]
y = self.weights_shape[0]
self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member
self.biases = np.random.randn(y, 1) # pylint: disable=no-member
def check_weights(self):
if self.weights is None or self.biases is None:
print("Weights of {} have not initialized".format(self.layer_name))
import sys
sys.exit(-1)
def update_weights(self, lr, delta, outputs_of_lower):
"""
outputs of lower: equals to inputs of this layer
"""
delta_shape = delta.shape
inputs_shape = outputs_of_lower.shape
# update biases
avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)
self.biases = self.biases - lr * avg_delta
# compute gradients for weights
delta = delta.reshape(delta_shape[0], delta_shape[1], 1)
inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])
gradients = delta * inputs
gradients_avg = np.mean(gradients, axis=0)
self.weights = self.weights - lr * gradients_avg
def parse_forward_msg(self, req):
""" extract and transform data in forward message"""
batch_id = req.batch_id
bytes_outputs_of_lower = req.output_matrix
bytes_labels = req.labels
is_train = req.is_train
outputs_of_lower = pkl.loads(bytes_outputs_of_lower)
labels = pkl.loads(bytes_labels)
return batch_id, outputs_of_lower, labels, is_train
# implementing rpc services
def UpdateInput(self, request, context):
# implemented in Hidden Layer and Output Layer
pass
def UpdateDelta(self, request, context):
""" Invoked by upper layer
will be implemented by hidden layer
"""
pass
class InputLayer(Layer):
""" for input data"""
def __init__(self, upper_layer, data_path, input_dim, layer_name="input"):
super().__init__(layer_name, upper_layer,
None, None, input_dim,
None, None)
self.train, self.val, self.test = load_data(data_path)
def start_feed_data(self, batch_size, epochs):
""""""
train_X = self.train[0]
train_y = self.train[1]
val_X = self.val[0]
val_y = self.val[1]
train_size = train_X.shape[0]
batch_id = 0
test_batch_id = -1 # use negative number, diff with batch_id
for i in range(epochs):
print("Start feed {0} epoch data".format(i))
train_X, train_y = shuffle(train_X, train_y)
for j in range(0, train_size, batch_size):
minibatch_X = train_X[j:j+batch_size]
minibatch_y = train_y[j:j+batch_size]
self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)
batch_id += 1
# send test data for evaluation
self.forward_to_upper(test_batch_id, val_X, val_y, False)
test_batch_id -= 1
def UpdateInput(self, req, ctx):
""""""
print("Should not have lower layer")
return nn_pb.PlainResponse(message="Wrong invoke!")
def UpdateDelta(self, req, ctx): | if batch_id % 100 == 0:
print("Complete backpropagation for batch {} at {}".format(
batch_id,
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return nn_pb.PlainResponse(message="Received at layer {}".format(
self.layer_name))
class HiddenLayer(Layer):
""" hidden layer"""
def __init__(self, layer_name,
upper_layer,
lower_layer,
lower_layer_size,
layer_size,
nonlin,
nonlin_prime,
learning_rate,
enable_synthetic_gradients,
sg_learning_rate
):
"""
enable_synthetic_gradients: whether use synthetic gradients
to do error approximating
"""
super().__init__(layer_name, upper_layer,
lower_layer, lower_layer_size,
layer_size, nonlin,
nonlin_prime)
self.lr = learning_rate
self.enable_sg = enable_synthetic_gradients
self.sg_lr = sg_learning_rate
self.sg_weights = None
self.sg_deltas = {}
def init_sg_weights(self):
""" using linear synthetic gradients model
SG(h, y) = hA + yB + C
refer to paper, Understanding synthetic gradients and decoupled neural networks
"""
n = self.weights_shape[0] # size of current layer
# pylint: disable=no-member
A = np.random.randn(n, n) / np.sqrt(n)
B = np.random.randn(10, n) / np.sqrt(n)
C = np.random.randn(1, n) / np.sqrt(n)
# pylint: enable=no-member
self.sg_weights = [A, B, C]
def check_sg_weights(self):
if self.sg_weights is None:
self.init_sg_weights()
def SG(self, h, y):
""" generate delta by weighted sum and label
h: outputs of this layer
y: labels for this batch
"""
self.check_sg_weights()
A = self.sg_weights[0] #(n, n)
B = self.sg_weights[1] #(10, n)
C = self.sg_weights[2] #(1, n)
delta = np.matmul(h, A) + np.matmul(y, B) + C
return delta
def update_sg_weights(self, true_delta, batch_id):
""" name conventions refer paper :
Understanding synthetic gradients and decoupled neural interface
TODO: synthetic gradient estimates the partial delta instead true gradients
"""
sg_delta = self.sg_deltas[batch_id]
weighted_sum = self.weighted_sum_inputs[batch_id]
labels = self.lower_layer_outputs[batch_id]['labels']
y = labels
h = self.nonlin(weighted_sum)
Err = sg_delta - true_delta
A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err) / h.shape[0]
B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err) / y.shape[0]
C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)
self.sg_weights = [A, B, C]
# del stored delta
del self.sg_deltas[batch_id]
def UpdateInput(self, request, context):
""" Invoked by lower layer
Once inputs updated, start computing the weighted sum
then activation outputs,
then forward outputs to next layer
request: ForwardMsg
"""
self.check_weights()
# get values from message
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)
print("Get inputs id: {0}, matrix shape: {1}, labels shape: {2}".format(
batch_id, outputs_of_lower.shape, labels.shape))
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
# saving inputs during training, because for weights updating
if is_train:
inputs = {'matrix': outputs_of_lower,
'labels': labels}
self.lower_layer_outputs[batch_id] = inputs
self.weighted_sum_inputs[batch_id] = weighted_sum
activations = self.nonlin(weighted_sum) # apply element wise
# update weights immediately with SG, if enabled SG
if self.enable_sg and is_train:
print("update weights based on SG delta")
sg_delta = self.SG(activations, labels)
# TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)
self.update_weights(self.lr, sg_delta, outputs_of_lower)
self.sg_deltas[batch_id] = sg_delta
# forward layer outputs
self.forward_to_upper(batch_id, activations, labels, is_train)
print("batch id: {0}, activations shape {1}".format(
batch_id, activations.shape))
# return received
return nn_pb.PlainResponse(message="Inputs received by layer {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
"""
delta shape: (batch_size, size_of_current_layer)
req: BackwardMsg
"""
batch_id = req.batch_id
bytes_partial_delta = req.partial_delta
partial_delta = pkl.loads(bytes_partial_delta)
bytes_labels = req.labels # variable currently not useful
labels = pkl.loads(bytes_labels)
# compute delta for current layer
z = self.weighted_sum_inputs[batch_id]
z_nonlin_prime = self.nonlin_prime(z)
# shape of delta: (batch_size, size_of_layer)
delta = partial_delta * z_nonlin_prime
# compute partial delta for lower layer
partial_delta_for_lower = np.dot(delta, self.weights)
# send partial delta to lower layer
self.backward_to_lower(batch_id,
partial_delta_for_lower,
labels)
if self.enable_sg:
# train the SG
# TODO pass partial delta instead
self.update_sg_weights(delta, batch_id)
else:
# update weights regularly
inputs = self.lower_layer_outputs[batch_id]['matrix']
self.update_weights(self.lr, delta, inputs)
# delete stored for weighted sum
del self.weighted_sum_inputs[batch_id]
# delete stored for lower layer outputs
del self.lower_layer_outputs[batch_id]
return nn_pb.PlainResponse(
message="Partial delta received at {}".format(self.layer_name))
class OutputLayer(Layer):
""" output layer
computing the error based on labels and prediction
using softmax as output activations and cross entropy loss
"""
def __init__(self, layer_name, lower_layer, lower_layer_size,
num_classes, learning_rate ):
super().__init__(layer_name, None,
lower_layer,
lower_layer_size,
num_classes,
None,
None)
self.lr = learning_rate
def UpdateInput(self, req, ctx):
""" once received input from lower layer:
compute weighted sum -> softmax output -> loss -> back propagate
"""
self.check_weights()
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(req)
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
softmax_output = softmax(weighted_sum, axis=1)
# print("weighted sum", weighted_sum)
# print("outputs of lower", outputs_of_lower)
if is_train:
delta = softmax_output - labels
# compute delta for lower layer first
# because current error is based on current weights
partial_delta_for_lower = np.dot(delta, self.weights)
# send to lower layer
self.backward_to_lower(batch_id, partial_delta_for_lower, labels)
# cross entropy loss
if batch_id % 100 == 0:
total_loss = np.log(softmax_output) * labels # pylint: disable=no-member
# print("total loss: ", np.sum(total_loss))
loss = -1 * np.sum(total_loss) / labels.shape[0]
print("For batch id {}, avg loss: {}".format(batch_id, loss))
# update weights
self.update_weights(self.lr, delta, outputs_of_lower)
else:
# test evaluation
pred_results = np.argmax(softmax_output, axis=1)
matched = sum(int(y == t) for (y, t) in zip(pred_results, labels))
print("Epoch {}, Performance test {} / {}".format(
-1*batch_id, matched, labels.shape[0]))
return nn_pb.PlainResponse(message="Inputs received at {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
""" No upper layer"""
print("Error: No upper layer for output layer")
return nn_pb.PlainResponse(message="Invalid Operation!!") | """"""
batch_id = req.batch_id | random_line_split |
layers.py | import gzip
import pickle as pkl
import time
from datetime import datetime
import grpc
import numpy as np
from sklearn.utils import shuffle
import neural_nets_pb2 as nn_pb
import neural_nets_pb2_grpc as nn_pb_grpc
from mnist_loader import load_data
from activations import *
# pylint: disable=too-many-arguments
class Layer(nn_pb_grpc.LayerDataExchangeServicer):
"""
abstract layer extract common methods
"""
# pylint: disable=too-many-arguments
def __init__(self, layer_name, upper_layer, lower_layer,
lower_layer_nodes, current_layer_nodes,
nonlin, nonlin_prime):
"""
datasets : the path of mnist dataset
nonlin: activation function
nonlin_prime: the derivative of activation function
"""
self.layer_name = layer_name
self.upper_layer_addr = upper_layer
self.lower_layer_addr = lower_layer
self.nonlin = nonlin
self.nonlin_prime = nonlin_prime
# lazy initialization
self.upper_layer_stub = None
self.lower_layer_stub = None
# weights dimension
self.weights_shape = (current_layer_nodes, lower_layer_nodes)
self.weights = None
self.biases = None
# record outputs from lower layer
# use batch id as key
# Purposes:
# 1) used for computing the weighted sum of current layer
# 2) used for computing the gradients for updating weights of current layer
self.lower_layer_outputs = {}
# computed from lower layer outputs for cache purpose
# cache for computing delta for current layer
# delta = partial_delta_rec * nonlin_prime(weighted_sum)
# with different batch we have different weighted sum
self.weighted_sum_inputs = {}
def forward_to_upper(self, batch_id, forward_matrix, forward_labels, istrain):
"""
forward output to upper layer
"""
if not self.upper_layer_stub:
self.create_upper_stub()
# convert numpy array to byte string
bytes_matrix = pkl.dumps(forward_matrix, 2)
bytes_labels = pkl.dumps(forward_labels, 2)
# send message to next layer
res = self.upper_layer_stub.UpdateInput(
nn_pb.ForwardMsg(batch_id=batch_id,
output_matrix=bytes_matrix,
labels=bytes_labels,
is_train=istrain))
# print("get response form upper layer", res.message)
def backward_to_lower(self, batch_id, partial_delta, labels):
"""
back propagate error partial_delta to lower layer
partial_delta = dot(self.weights.T, self.delta)
self.delta = delta_received_from_upper * nonlin_prime(z)
"""
# create stub for lower layer
if not self.lower_layer_stub:
self.create_lower_stub()
# convert partial_delta matrix to bytes string
bytes_delta = pkl.dumps(partial_delta)
bytes_labels = pkl.dumps(labels)
res = self.lower_layer_stub.UpdateDelta(
nn_pb.BackwardMsg(batch_id=batch_id,
partial_delta=bytes_delta,
labels=bytes_labels))
# print("get response from lower layer", res.message)
def create_upper_stub(self):
""" create upper_layer_stub for exchanging data between grpc"""
if self.upper_layer_addr:
channel = grpc.insecure_channel(self.upper_layer_addr)
self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no upper layer has been specified")
def create_lower_stub(self):
""" stub for lower layer communication"""
if self.lower_layer_addr:
channel = grpc.insecure_channel(self.lower_layer_addr)
self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no lower layer has been specified")
def init_weights(self, load_weights=None):
"""
if load_weights is specified load the trained weights
"""
if load_weights:
# TODO
pass
else:
# x: lower layer nodes n
# y: current layer nodes n
x = self.weights_shape[1]
y = self.weights_shape[0]
self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member
self.biases = np.random.randn(y, 1) # pylint: disable=no-member
def check_weights(self):
if self.weights is None or self.biases is None:
print("Weights of {} have not initialized".format(self.layer_name))
import sys
sys.exit(-1)
def update_weights(self, lr, delta, outputs_of_lower):
"""
outputs of lower: equals to inputs of this layer
"""
delta_shape = delta.shape
inputs_shape = outputs_of_lower.shape
# update biases
avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)
self.biases = self.biases - lr * avg_delta
# compute gradients for weights
delta = delta.reshape(delta_shape[0], delta_shape[1], 1)
inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])
gradients = delta * inputs
gradients_avg = np.mean(gradients, axis=0)
self.weights = self.weights - lr * gradients_avg
def parse_forward_msg(self, req):
""" extract and transform data in forward message"""
batch_id = req.batch_id
bytes_outputs_of_lower = req.output_matrix
bytes_labels = req.labels
is_train = req.is_train
outputs_of_lower = pkl.loads(bytes_outputs_of_lower)
labels = pkl.loads(bytes_labels)
return batch_id, outputs_of_lower, labels, is_train
# implementing rpc services
def UpdateInput(self, request, context):
# implemented in Hidden Layer and Output Layer
pass
def UpdateDelta(self, request, context):
""" Invoked by upper layer
will be implemented by hidden layer
"""
pass
class InputLayer(Layer):
""" for input data"""
def __init__(self, upper_layer, data_path, input_dim, layer_name="input"):
super().__init__(layer_name, upper_layer,
None, None, input_dim,
None, None)
self.train, self.val, self.test = load_data(data_path)
def start_feed_data(self, batch_size, epochs):
""""""
train_X = self.train[0]
train_y = self.train[1]
val_X = self.val[0]
val_y = self.val[1]
train_size = train_X.shape[0]
batch_id = 0
test_batch_id = -1 # use negative number, diff with batch_id
for i in range(epochs):
print("Start feed {0} epoch data".format(i))
train_X, train_y = shuffle(train_X, train_y)
for j in range(0, train_size, batch_size):
minibatch_X = train_X[j:j+batch_size]
minibatch_y = train_y[j:j+batch_size]
self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)
batch_id += 1
# send test data for evaluation
self.forward_to_upper(test_batch_id, val_X, val_y, False)
test_batch_id -= 1
def UpdateInput(self, req, ctx):
""""""
print("Should not have lower layer")
return nn_pb.PlainResponse(message="Wrong invoke!")
def UpdateDelta(self, req, ctx):
""""""
batch_id = req.batch_id
if batch_id % 100 == 0:
print("Complete backpropagation for batch {} at {}".format(
batch_id,
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return nn_pb.PlainResponse(message="Received at layer {}".format(
self.layer_name))
class HiddenLayer(Layer):
""" hidden layer"""
def __init__(self, layer_name,
upper_layer,
lower_layer,
lower_layer_size,
layer_size,
nonlin,
nonlin_prime,
learning_rate,
enable_synthetic_gradients,
sg_learning_rate
):
"""
enable_synthetic_gradients: whether use synthetic gradients
to do error approximating
"""
super().__init__(layer_name, upper_layer,
lower_layer, lower_layer_size,
layer_size, nonlin,
nonlin_prime)
self.lr = learning_rate
self.enable_sg = enable_synthetic_gradients
self.sg_lr = sg_learning_rate
self.sg_weights = None
self.sg_deltas = {}
def init_sg_weights(self):
""" using linear synthetic gradients model
SG(h, y) = hA + yB + C
refer to paper, Understanding synthetic gradients and decoupled neural networks
"""
n = self.weights_shape[0] # size of current layer
# pylint: disable=no-member
A = np.random.randn(n, n) / np.sqrt(n)
B = np.random.randn(10, n) / np.sqrt(n)
C = np.random.randn(1, n) / np.sqrt(n)
# pylint: enable=no-member
self.sg_weights = [A, B, C]
def check_sg_weights(self):
if self.sg_weights is None:
self.init_sg_weights()
def SG(self, h, y):
""" generate delta by weighted sum and label
h: outputs of this layer
y: labels for this batch
"""
self.check_sg_weights()
A = self.sg_weights[0] #(n, n)
B = self.sg_weights[1] #(10, n)
C = self.sg_weights[2] #(1, n)
delta = np.matmul(h, A) + np.matmul(y, B) + C
return delta
def update_sg_weights(self, true_delta, batch_id):
""" name conventions refer paper :
Understanding synthetic gradients and decoupled neural interface
TODO: synthetic gradient estimates the partial delta instead true gradients
"""
sg_delta = self.sg_deltas[batch_id]
weighted_sum = self.weighted_sum_inputs[batch_id]
labels = self.lower_layer_outputs[batch_id]['labels']
y = labels
h = self.nonlin(weighted_sum)
Err = sg_delta - true_delta
A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err) / h.shape[0]
B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err) / y.shape[0]
C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)
self.sg_weights = [A, B, C]
# del stored delta
del self.sg_deltas[batch_id]
def UpdateInput(self, request, context):
""" Invoked by lower layer
Once inputs updated, start computing the weighted sum
then activation outputs,
then forward outputs to next layer
request: ForwardMsg
"""
self.check_weights()
# get values from message
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)
print("Get inputs id: {0}, matrix shape: {1}, labels shape: {2}".format(
batch_id, outputs_of_lower.shape, labels.shape))
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
# saving inputs during training, because for weights updating
if is_train:
inputs = {'matrix': outputs_of_lower,
'labels': labels}
self.lower_layer_outputs[batch_id] = inputs
self.weighted_sum_inputs[batch_id] = weighted_sum
activations = self.nonlin(weighted_sum) # apply element wise
# update weights immediately with SG, if enabled SG
if self.enable_sg and is_train:
print("update weights based on SG delta")
sg_delta = self.SG(activations, labels)
# TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)
self.update_weights(self.lr, sg_delta, outputs_of_lower)
self.sg_deltas[batch_id] = sg_delta
# forward layer outputs
self.forward_to_upper(batch_id, activations, labels, is_train)
print("batch id: {0}, activations shape {1}".format(
batch_id, activations.shape))
# return received
return nn_pb.PlainResponse(message="Inputs received by layer {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
"""
delta shape: (batch_size, size_of_current_layer)
req: BackwardMsg
"""
batch_id = req.batch_id
bytes_partial_delta = req.partial_delta
partial_delta = pkl.loads(bytes_partial_delta)
bytes_labels = req.labels # variable currently not useful
labels = pkl.loads(bytes_labels)
# compute delta for current layer
z = self.weighted_sum_inputs[batch_id]
z_nonlin_prime = self.nonlin_prime(z)
# shape of delta: (batch_size, size_of_layer)
delta = partial_delta * z_nonlin_prime
# compute partial delta for lower layer
partial_delta_for_lower = np.dot(delta, self.weights)
# send partial delta to lower layer
self.backward_to_lower(batch_id,
partial_delta_for_lower,
labels)
if self.enable_sg:
# train the SG
# TODO pass partial delta instead
self.update_sg_weights(delta, batch_id)
else:
# update weights regularly
inputs = self.lower_layer_outputs[batch_id]['matrix']
self.update_weights(self.lr, delta, inputs)
# delete stored for weighted sum
del self.weighted_sum_inputs[batch_id]
# delete stored for lower layer outputs
del self.lower_layer_outputs[batch_id]
return nn_pb.PlainResponse(
message="Partial delta received at {}".format(self.layer_name))
class OutputLayer(Layer):
""" output layer
computing the error based on labels and prediction
using softmax as output activations and cross entropy loss
"""
def __init__(self, layer_name, lower_layer, lower_layer_size,
num_classes, learning_rate ):
super().__init__(layer_name, None,
lower_layer,
lower_layer_size,
num_classes,
None,
None)
self.lr = learning_rate
def UpdateInput(self, req, ctx):
""" once received input from lower layer:
compute weighted sum -> softmax output -> loss -> back propagate
"""
self.check_weights()
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(req)
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
softmax_output = softmax(weighted_sum, axis=1)
# print("weighted sum", weighted_sum)
# print("outputs of lower", outputs_of_lower)
if is_train:
|
else:
# test evaluation
pred_results = np.argmax(softmax_output, axis=1)
matched = sum(int(y == t) for (y, t) in zip(pred_results, labels))
print("Epoch {}, Performance test {} / {}".format(
-1*batch_id, matched, labels.shape[0]))
return nn_pb.PlainResponse(message="Inputs received at {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
""" No upper layer"""
print("Error: No upper layer for output layer")
return nn_pb.PlainResponse(message="Invalid Operation!!")
| delta = softmax_output - labels
# compute delta for lower layer first
# because current error is based on current weights
partial_delta_for_lower = np.dot(delta, self.weights)
# send to lower layer
self.backward_to_lower(batch_id, partial_delta_for_lower, labels)
# cross entropy loss
if batch_id % 100 == 0:
total_loss = np.log(softmax_output) * labels # pylint: disable=no-member
# print("total loss: ", np.sum(total_loss))
loss = -1 * np.sum(total_loss) / labels.shape[0]
print("For batch id {}, avg loss: {}".format(batch_id, loss))
# update weights
self.update_weights(self.lr, delta, outputs_of_lower) | conditional_block |
layers.py | import gzip
import pickle as pkl
import time
from datetime import datetime
import grpc
import numpy as np
from sklearn.utils import shuffle
import neural_nets_pb2 as nn_pb
import neural_nets_pb2_grpc as nn_pb_grpc
from mnist_loader import load_data
from activations import *
# pylint: disable=too-many-arguments
class Layer(nn_pb_grpc.LayerDataExchangeServicer):
"""
abstract layer extract common methods
"""
# pylint: disable=too-many-arguments
def __init__(self, layer_name, upper_layer, lower_layer,
lower_layer_nodes, current_layer_nodes,
nonlin, nonlin_prime):
"""
datasets : the path of mnist dataset
nonlin: activation function
nonlin_prime: the derivative of activation function
"""
self.layer_name = layer_name
self.upper_layer_addr = upper_layer
self.lower_layer_addr = lower_layer
self.nonlin = nonlin
self.nonlin_prime = nonlin_prime
# lazy initialization
self.upper_layer_stub = None
self.lower_layer_stub = None
# weights dimension
self.weights_shape = (current_layer_nodes, lower_layer_nodes)
self.weights = None
self.biases = None
# record outputs from lower layer
# use batch id as key
# Purposes:
# 1) used for computing the weighted sum of current layer
# 2) used for computing the gradients for updating weights of current layer
self.lower_layer_outputs = {}
# computed from lower layer outputs for cache purpose
# cache for computing delta for current layer
# delta = partial_delta_rec * nonlin_prime(weighted_sum)
# with different batch we have different weighted sum
self.weighted_sum_inputs = {}
def forward_to_upper(self, batch_id, forward_matrix, forward_labels, istrain):
"""
forward output to upper layer
"""
if not self.upper_layer_stub:
self.create_upper_stub()
# convert numpy array to byte string
bytes_matrix = pkl.dumps(forward_matrix, 2)
bytes_labels = pkl.dumps(forward_labels, 2)
# send message to next layer
res = self.upper_layer_stub.UpdateInput(
nn_pb.ForwardMsg(batch_id=batch_id,
output_matrix=bytes_matrix,
labels=bytes_labels,
is_train=istrain))
# print("get response form upper layer", res.message)
def backward_to_lower(self, batch_id, partial_delta, labels):
"""
back propagate error partial_delta to lower layer
partial_delta = dot(self.weights.T, self.delta)
self.delta = delta_received_from_upper * nonlin_prime(z)
"""
# create stub for lower layer
if not self.lower_layer_stub:
self.create_lower_stub()
# convert partial_delta matrix to bytes string
bytes_delta = pkl.dumps(partial_delta)
bytes_labels = pkl.dumps(labels)
res = self.lower_layer_stub.UpdateDelta(
nn_pb.BackwardMsg(batch_id=batch_id,
partial_delta=bytes_delta,
labels=bytes_labels))
# print("get response from lower layer", res.message)
def create_upper_stub(self):
""" create upper_layer_stub for exchanging data between grpc"""
if self.upper_layer_addr:
channel = grpc.insecure_channel(self.upper_layer_addr)
self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no upper layer has been specified")
def create_lower_stub(self):
""" stub for lower layer communication"""
if self.lower_layer_addr:
channel = grpc.insecure_channel(self.lower_layer_addr)
self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no lower layer has been specified")
def init_weights(self, load_weights=None):
"""
if load_weights is specified load the trained weights
"""
if load_weights:
# TODO
pass
else:
# x: lower layer nodes n
# y: current layer nodes n
x = self.weights_shape[1]
y = self.weights_shape[0]
self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member
self.biases = np.random.randn(y, 1) # pylint: disable=no-member
def check_weights(self):
if self.weights is None or self.biases is None:
print("Weights of {} have not initialized".format(self.layer_name))
import sys
sys.exit(-1)
def update_weights(self, lr, delta, outputs_of_lower):
"""
outputs of lower: equals to inputs of this layer
"""
delta_shape = delta.shape
inputs_shape = outputs_of_lower.shape
# update biases
avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)
self.biases = self.biases - lr * avg_delta
# compute gradients for weights
delta = delta.reshape(delta_shape[0], delta_shape[1], 1)
inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])
gradients = delta * inputs
gradients_avg = np.mean(gradients, axis=0)
self.weights = self.weights - lr * gradients_avg
def parse_forward_msg(self, req):
""" extract and transform data in forward message"""
batch_id = req.batch_id
bytes_outputs_of_lower = req.output_matrix
bytes_labels = req.labels
is_train = req.is_train
outputs_of_lower = pkl.loads(bytes_outputs_of_lower)
labels = pkl.loads(bytes_labels)
return batch_id, outputs_of_lower, labels, is_train
# implementing rpc services
def UpdateInput(self, request, context):
# implemented in Hidden Layer and Output Layer
pass
def UpdateDelta(self, request, context):
""" Invoked by upper layer
will be implemented by hidden layer
"""
pass
class InputLayer(Layer):
""" for input data"""
def __init__(self, upper_layer, data_path, input_dim, layer_name="input"):
super().__init__(layer_name, upper_layer,
None, None, input_dim,
None, None)
self.train, self.val, self.test = load_data(data_path)
def start_feed_data(self, batch_size, epochs):
""""""
train_X = self.train[0]
train_y = self.train[1]
val_X = self.val[0]
val_y = self.val[1]
train_size = train_X.shape[0]
batch_id = 0
test_batch_id = -1 # use negative number, diff with batch_id
for i in range(epochs):
print("Start feed {0} epoch data".format(i))
train_X, train_y = shuffle(train_X, train_y)
for j in range(0, train_size, batch_size):
minibatch_X = train_X[j:j+batch_size]
minibatch_y = train_y[j:j+batch_size]
self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)
batch_id += 1
# send test data for evaluation
self.forward_to_upper(test_batch_id, val_X, val_y, False)
test_batch_id -= 1
def UpdateInput(self, req, ctx):
""""""
print("Should not have lower layer")
return nn_pb.PlainResponse(message="Wrong invoke!")
def UpdateDelta(self, req, ctx):
""""""
batch_id = req.batch_id
if batch_id % 100 == 0:
print("Complete backpropagation for batch {} at {}".format(
batch_id,
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return nn_pb.PlainResponse(message="Received at layer {}".format(
self.layer_name))
class HiddenLayer(Layer):
""" hidden layer"""
def __init__(self, layer_name,
upper_layer,
lower_layer,
lower_layer_size,
layer_size,
nonlin,
nonlin_prime,
learning_rate,
enable_synthetic_gradients,
sg_learning_rate
):
"""
enable_synthetic_gradients: whether use synthetic gradients
to do error approximating
"""
super().__init__(layer_name, upper_layer,
lower_layer, lower_layer_size,
layer_size, nonlin,
nonlin_prime)
self.lr = learning_rate
self.enable_sg = enable_synthetic_gradients
self.sg_lr = sg_learning_rate
self.sg_weights = None
self.sg_deltas = {}
def init_sg_weights(self):
""" using linear synthetic gradients model
SG(h, y) = hA + yB + C
refer to paper, Understanding synthetic gradients and decoupled neural networks
"""
n = self.weights_shape[0] # size of current layer
# pylint: disable=no-member
A = np.random.randn(n, n) / np.sqrt(n)
B = np.random.randn(10, n) / np.sqrt(n)
C = np.random.randn(1, n) / np.sqrt(n)
# pylint: enable=no-member
self.sg_weights = [A, B, C]
def check_sg_weights(self):
if self.sg_weights is None:
self.init_sg_weights()
def SG(self, h, y):
""" generate delta by weighted sum and label
h: outputs of this layer
y: labels for this batch
"""
self.check_sg_weights()
A = self.sg_weights[0] #(n, n)
B = self.sg_weights[1] #(10, n)
C = self.sg_weights[2] #(1, n)
delta = np.matmul(h, A) + np.matmul(y, B) + C
return delta
def update_sg_weights(self, true_delta, batch_id):
""" name conventions refer paper :
Understanding synthetic gradients and decoupled neural interface
TODO: synthetic gradient estimates the partial delta instead true gradients
"""
sg_delta = self.sg_deltas[batch_id]
weighted_sum = self.weighted_sum_inputs[batch_id]
labels = self.lower_layer_outputs[batch_id]['labels']
y = labels
h = self.nonlin(weighted_sum)
Err = sg_delta - true_delta
A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err) / h.shape[0]
B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err) / y.shape[0]
C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)
self.sg_weights = [A, B, C]
# del stored delta
del self.sg_deltas[batch_id]
def UpdateInput(self, request, context):
""" Invoked by lower layer
Once inputs updated, start computing the weighted sum
then activation outputs,
then forward outputs to next layer
request: ForwardMsg
"""
self.check_weights()
# get values from message
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)
print("Get inputs id: {0}, matrix shape: {1}, labels shape: {2}".format(
batch_id, outputs_of_lower.shape, labels.shape))
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
# saving inputs during training, because for weights updating
if is_train:
inputs = {'matrix': outputs_of_lower,
'labels': labels}
self.lower_layer_outputs[batch_id] = inputs
self.weighted_sum_inputs[batch_id] = weighted_sum
activations = self.nonlin(weighted_sum) # apply element wise
# update weights immediately with SG, if enabled SG
if self.enable_sg and is_train:
print("update weights based on SG delta")
sg_delta = self.SG(activations, labels)
# TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)
self.update_weights(self.lr, sg_delta, outputs_of_lower)
self.sg_deltas[batch_id] = sg_delta
# forward layer outputs
self.forward_to_upper(batch_id, activations, labels, is_train)
print("batch id: {0}, activations shape {1}".format(
batch_id, activations.shape))
# return received
return nn_pb.PlainResponse(message="Inputs received by layer {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
"""
delta shape: (batch_size, size_of_current_layer)
req: BackwardMsg
"""
batch_id = req.batch_id
bytes_partial_delta = req.partial_delta
partial_delta = pkl.loads(bytes_partial_delta)
bytes_labels = req.labels # variable currently not useful
labels = pkl.loads(bytes_labels)
# compute delta for current layer
z = self.weighted_sum_inputs[batch_id]
z_nonlin_prime = self.nonlin_prime(z)
# shape of delta: (batch_size, size_of_layer)
delta = partial_delta * z_nonlin_prime
# compute partial delta for lower layer
partial_delta_for_lower = np.dot(delta, self.weights)
# send partial delta to lower layer
self.backward_to_lower(batch_id,
partial_delta_for_lower,
labels)
if self.enable_sg:
# train the SG
# TODO pass partial delta instead
self.update_sg_weights(delta, batch_id)
else:
# update weights regularly
inputs = self.lower_layer_outputs[batch_id]['matrix']
self.update_weights(self.lr, delta, inputs)
# delete stored for weighted sum
del self.weighted_sum_inputs[batch_id]
# delete stored for lower layer outputs
del self.lower_layer_outputs[batch_id]
return nn_pb.PlainResponse(
message="Partial delta received at {}".format(self.layer_name))
class | (Layer):
""" output layer
computing the error based on labels and prediction
using softmax as output activations and cross entropy loss
"""
def __init__(self, layer_name, lower_layer, lower_layer_size,
num_classes, learning_rate ):
super().__init__(layer_name, None,
lower_layer,
lower_layer_size,
num_classes,
None,
None)
self.lr = learning_rate
def UpdateInput(self, req, ctx):
""" once received input from lower layer:
compute weighted sum -> softmax output -> loss -> back propagate
"""
self.check_weights()
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(req)
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
softmax_output = softmax(weighted_sum, axis=1)
# print("weighted sum", weighted_sum)
# print("outputs of lower", outputs_of_lower)
if is_train:
delta = softmax_output - labels
# compute delta for lower layer first
# because current error is based on current weights
partial_delta_for_lower = np.dot(delta, self.weights)
# send to lower layer
self.backward_to_lower(batch_id, partial_delta_for_lower, labels)
# cross entropy loss
if batch_id % 100 == 0:
total_loss = np.log(softmax_output) * labels # pylint: disable=no-member
# print("total loss: ", np.sum(total_loss))
loss = -1 * np.sum(total_loss) / labels.shape[0]
print("For batch id {}, avg loss: {}".format(batch_id, loss))
# update weights
self.update_weights(self.lr, delta, outputs_of_lower)
else:
# test evaluation
pred_results = np.argmax(softmax_output, axis=1)
matched = sum(int(y == t) for (y, t) in zip(pred_results, labels))
print("Epoch {}, Performance test {} / {}".format(
-1*batch_id, matched, labels.shape[0]))
return nn_pb.PlainResponse(message="Inputs received at {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
""" No upper layer"""
print("Error: No upper layer for output layer")
return nn_pb.PlainResponse(message="Invalid Operation!!")
| OutputLayer | identifier_name |
layers.py | import gzip
import pickle as pkl
import time
from datetime import datetime
import grpc
import numpy as np
from sklearn.utils import shuffle
import neural_nets_pb2 as nn_pb
import neural_nets_pb2_grpc as nn_pb_grpc
from mnist_loader import load_data
from activations import *
# pylint: disable=too-many-arguments
class Layer(nn_pb_grpc.LayerDataExchangeServicer):
"""
abstract layer extract common methods
"""
# pylint: disable=too-many-arguments
def __init__(self, layer_name, upper_layer, lower_layer,
lower_layer_nodes, current_layer_nodes,
nonlin, nonlin_prime):
"""
datasets : the path of mnist dataset
nonlin: activation function
nonlin_prime: the derivative of activation function
"""
self.layer_name = layer_name
self.upper_layer_addr = upper_layer
self.lower_layer_addr = lower_layer
self.nonlin = nonlin
self.nonlin_prime = nonlin_prime
# lazy initialization
self.upper_layer_stub = None
self.lower_layer_stub = None
# weights dimension
self.weights_shape = (current_layer_nodes, lower_layer_nodes)
self.weights = None
self.biases = None
# record outputs from lower layer
# use batch id as key
# Purposes:
# 1) used for computing the weighted sum of current layer
# 2) used for computing the gradients for updating weights of current layer
self.lower_layer_outputs = {}
# computed from lower layer outputs for cache purpose
# cache for computing delta for current layer
# delta = partial_delta_rec * nonlin_prime(weighted_sum)
# with different batch we have different weighted sum
self.weighted_sum_inputs = {}
def forward_to_upper(self, batch_id, forward_matrix, forward_labels, istrain):
"""
forward output to upper layer
"""
if not self.upper_layer_stub:
self.create_upper_stub()
# convert numpy array to byte string
bytes_matrix = pkl.dumps(forward_matrix, 2)
bytes_labels = pkl.dumps(forward_labels, 2)
# send message to next layer
res = self.upper_layer_stub.UpdateInput(
nn_pb.ForwardMsg(batch_id=batch_id,
output_matrix=bytes_matrix,
labels=bytes_labels,
is_train=istrain))
# print("get response form upper layer", res.message)
def backward_to_lower(self, batch_id, partial_delta, labels):
"""
back propagate error partial_delta to lower layer
partial_delta = dot(self.weights.T, self.delta)
self.delta = delta_received_from_upper * nonlin_prime(z)
"""
# create stub for lower layer
if not self.lower_layer_stub:
self.create_lower_stub()
# convert partial_delta matrix to bytes string
bytes_delta = pkl.dumps(partial_delta)
bytes_labels = pkl.dumps(labels)
res = self.lower_layer_stub.UpdateDelta(
nn_pb.BackwardMsg(batch_id=batch_id,
partial_delta=bytes_delta,
labels=bytes_labels))
# print("get response from lower layer", res.message)
def create_upper_stub(self):
""" create upper_layer_stub for exchanging data between grpc"""
if self.upper_layer_addr:
channel = grpc.insecure_channel(self.upper_layer_addr)
self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no upper layer has been specified")
def create_lower_stub(self):
""" stub for lower layer communication"""
if self.lower_layer_addr:
channel = grpc.insecure_channel(self.lower_layer_addr)
self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no lower layer has been specified")
def init_weights(self, load_weights=None):
"""
if load_weights is specified load the trained weights
"""
if load_weights:
# TODO
pass
else:
# x: lower layer nodes n
# y: current layer nodes n
x = self.weights_shape[1]
y = self.weights_shape[0]
self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member
self.biases = np.random.randn(y, 1) # pylint: disable=no-member
def check_weights(self):
if self.weights is None or self.biases is None:
print("Weights of {} have not initialized".format(self.layer_name))
import sys
sys.exit(-1)
def update_weights(self, lr, delta, outputs_of_lower):
"""
outputs of lower: equals to inputs of this layer
"""
delta_shape = delta.shape
inputs_shape = outputs_of_lower.shape
# update biases
avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)
self.biases = self.biases - lr * avg_delta
# compute gradients for weights
delta = delta.reshape(delta_shape[0], delta_shape[1], 1)
inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])
gradients = delta * inputs
gradients_avg = np.mean(gradients, axis=0)
self.weights = self.weights - lr * gradients_avg
def parse_forward_msg(self, req):
""" extract and transform data in forward message"""
batch_id = req.batch_id
bytes_outputs_of_lower = req.output_matrix
bytes_labels = req.labels
is_train = req.is_train
outputs_of_lower = pkl.loads(bytes_outputs_of_lower)
labels = pkl.loads(bytes_labels)
return batch_id, outputs_of_lower, labels, is_train
# implementing rpc services
def UpdateInput(self, request, context):
# implemented in Hidden Layer and Output Layer
pass
def UpdateDelta(self, request, context):
""" Invoked by upper layer
will be implemented by hidden layer
"""
pass
class InputLayer(Layer):
""" for input data"""
def __init__(self, upper_layer, data_path, input_dim, layer_name="input"):
super().__init__(layer_name, upper_layer,
None, None, input_dim,
None, None)
self.train, self.val, self.test = load_data(data_path)
def start_feed_data(self, batch_size, epochs):
""""""
train_X = self.train[0]
train_y = self.train[1]
val_X = self.val[0]
val_y = self.val[1]
train_size = train_X.shape[0]
batch_id = 0
test_batch_id = -1 # use negative number, diff with batch_id
for i in range(epochs):
print("Start feed {0} epoch data".format(i))
train_X, train_y = shuffle(train_X, train_y)
for j in range(0, train_size, batch_size):
minibatch_X = train_X[j:j+batch_size]
minibatch_y = train_y[j:j+batch_size]
self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)
batch_id += 1
# send test data for evaluation
self.forward_to_upper(test_batch_id, val_X, val_y, False)
test_batch_id -= 1
def UpdateInput(self, req, ctx):
""""""
print("Should not have lower layer")
return nn_pb.PlainResponse(message="Wrong invoke!")
def UpdateDelta(self, req, ctx):
""""""
batch_id = req.batch_id
if batch_id % 100 == 0:
print("Complete backpropagation for batch {} at {}".format(
batch_id,
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return nn_pb.PlainResponse(message="Received at layer {}".format(
self.layer_name))
class HiddenLayer(Layer):
""" hidden layer"""
def __init__(self, layer_name,
upper_layer,
lower_layer,
lower_layer_size,
layer_size,
nonlin,
nonlin_prime,
learning_rate,
enable_synthetic_gradients,
sg_learning_rate
):
"""
enable_synthetic_gradients: whether use synthetic gradients
to do error approximating
"""
super().__init__(layer_name, upper_layer,
lower_layer, lower_layer_size,
layer_size, nonlin,
nonlin_prime)
self.lr = learning_rate
self.enable_sg = enable_synthetic_gradients
self.sg_lr = sg_learning_rate
self.sg_weights = None
self.sg_deltas = {}
def init_sg_weights(self):
""" using linear synthetic gradients model
SG(h, y) = hA + yB + C
refer to paper, Understanding synthetic gradients and decoupled neural networks
"""
n = self.weights_shape[0] # size of current layer
# pylint: disable=no-member
A = np.random.randn(n, n) / np.sqrt(n)
B = np.random.randn(10, n) / np.sqrt(n)
C = np.random.randn(1, n) / np.sqrt(n)
# pylint: enable=no-member
self.sg_weights = [A, B, C]
def check_sg_weights(self):
if self.sg_weights is None:
self.init_sg_weights()
def SG(self, h, y):
""" generate delta by weighted sum and label
h: outputs of this layer
y: labels for this batch
"""
self.check_sg_weights()
A = self.sg_weights[0] #(n, n)
B = self.sg_weights[1] #(10, n)
C = self.sg_weights[2] #(1, n)
delta = np.matmul(h, A) + np.matmul(y, B) + C
return delta
def update_sg_weights(self, true_delta, batch_id):
""" name conventions refer paper :
Understanding synthetic gradients and decoupled neural interface
TODO: synthetic gradient estimates the partial delta instead true gradients
"""
sg_delta = self.sg_deltas[batch_id]
weighted_sum = self.weighted_sum_inputs[batch_id]
labels = self.lower_layer_outputs[batch_id]['labels']
y = labels
h = self.nonlin(weighted_sum)
Err = sg_delta - true_delta
A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err) / h.shape[0]
B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err) / y.shape[0]
C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)
self.sg_weights = [A, B, C]
# del stored delta
del self.sg_deltas[batch_id]
def UpdateInput(self, request, context):
""" Invoked by lower layer
Once inputs updated, start computing the weighted sum
then activation outputs,
then forward outputs to next layer
request: ForwardMsg
"""
self.check_weights()
# get values from message
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)
print("Get inputs id: {0}, matrix shape: {1}, labels shape: {2}".format(
batch_id, outputs_of_lower.shape, labels.shape))
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
# saving inputs during training, because for weights updating
if is_train:
inputs = {'matrix': outputs_of_lower,
'labels': labels}
self.lower_layer_outputs[batch_id] = inputs
self.weighted_sum_inputs[batch_id] = weighted_sum
activations = self.nonlin(weighted_sum) # apply element wise
# update weights immediately with SG, if enabled SG
if self.enable_sg and is_train:
print("update weights based on SG delta")
sg_delta = self.SG(activations, labels)
# TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)
self.update_weights(self.lr, sg_delta, outputs_of_lower)
self.sg_deltas[batch_id] = sg_delta
# forward layer outputs
self.forward_to_upper(batch_id, activations, labels, is_train)
print("batch id: {0}, activations shape {1}".format(
batch_id, activations.shape))
# return received
return nn_pb.PlainResponse(message="Inputs received by layer {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
"""
delta shape: (batch_size, size_of_current_layer)
req: BackwardMsg
"""
batch_id = req.batch_id
bytes_partial_delta = req.partial_delta
partial_delta = pkl.loads(bytes_partial_delta)
bytes_labels = req.labels # variable currently not useful
labels = pkl.loads(bytes_labels)
# compute delta for current layer
z = self.weighted_sum_inputs[batch_id]
z_nonlin_prime = self.nonlin_prime(z)
# shape of delta: (batch_size, size_of_layer)
delta = partial_delta * z_nonlin_prime
# compute partial delta for lower layer
partial_delta_for_lower = np.dot(delta, self.weights)
# send partial delta to lower layer
self.backward_to_lower(batch_id,
partial_delta_for_lower,
labels)
if self.enable_sg:
# train the SG
# TODO pass partial delta instead
self.update_sg_weights(delta, batch_id)
else:
# update weights regularly
inputs = self.lower_layer_outputs[batch_id]['matrix']
self.update_weights(self.lr, delta, inputs)
# delete stored for weighted sum
del self.weighted_sum_inputs[batch_id]
# delete stored for lower layer outputs
del self.lower_layer_outputs[batch_id]
return nn_pb.PlainResponse(
message="Partial delta received at {}".format(self.layer_name))
class OutputLayer(Layer):
""" output layer
computing the error based on labels and prediction
using softmax as output activations and cross entropy loss
"""
def __init__(self, layer_name, lower_layer, lower_layer_size,
num_classes, learning_rate ):
super().__init__(layer_name, None,
lower_layer,
lower_layer_size,
num_classes,
None,
None)
self.lr = learning_rate
def UpdateInput(self, req, ctx):
|
def UpdateDelta(self, req, ctx):
""" No upper layer"""
print("Error: No upper layer for output layer")
return nn_pb.PlainResponse(message="Invalid Operation!!")
| """ once received input from lower layer:
compute weighted sum -> softmax output -> loss -> back propagate
"""
self.check_weights()
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(req)
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
softmax_output = softmax(weighted_sum, axis=1)
# print("weighted sum", weighted_sum)
# print("outputs of lower", outputs_of_lower)
if is_train:
delta = softmax_output - labels
# compute delta for lower layer first
# because current error is based on current weights
partial_delta_for_lower = np.dot(delta, self.weights)
# send to lower layer
self.backward_to_lower(batch_id, partial_delta_for_lower, labels)
# cross entropy loss
if batch_id % 100 == 0:
total_loss = np.log(softmax_output) * labels # pylint: disable=no-member
# print("total loss: ", np.sum(total_loss))
loss = -1 * np.sum(total_loss) / labels.shape[0]
print("For batch id {}, avg loss: {}".format(batch_id, loss))
# update weights
self.update_weights(self.lr, delta, outputs_of_lower)
else:
# test evaluation
pred_results = np.argmax(softmax_output, axis=1)
matched = sum(int(y == t) for (y, t) in zip(pred_results, labels))
print("Epoch {}, Performance test {} / {}".format(
-1*batch_id, matched, labels.shape[0]))
return nn_pb.PlainResponse(message="Inputs received at {}".format(
self.layer_name)) | identifier_body |
main.rs | #[macro_use]
extern crate log;
extern crate mio_multithread_unix;
extern crate env_logger;
extern crate httparse;
extern crate mio;
extern crate net2;
extern crate num_cpus;
extern crate slab;
extern crate time;
use std::ascii::AsciiExt;
use std::env;
use std::fmt;
use std::io::{self, Read, Write};
use std::mem;
use std::net::SocketAddr;
use std::panic;
use std::slice;
use std::str;
use std::thread;
use net2::unix::*;
use mio::tcp::{TcpStream, TcpListener};
use mio::{Poll, Token, EventSet, PollOpt, Events};
use slab::Slab;
const LISTENER: mio::Token = mio::Token(0);
struct Server<'a> {
count: u64,
listener: &'a TcpListener,
connections: Slab<Connection, usize>,
}
impl<'a> Server<'a> {
fn new(listener: &'a TcpListener) -> Server<'a> {
Server {
count: 0,
listener: listener,
connections: Slab::new_starting_at(1, 1024),
}
}
fn ready(&mut self,
poll: &Poll,
token: Token,
events: EventSet) {
debug!("{:?} {:?}", token, events);
if token == LISTENER {
while let Ok(Some(socket)) = self.listener.accept() {
debug!("accepted");
self.count += 1;
// socket.0.set_nodelay(true).unwrap();
let token = self.connections
.insert_with(move |_| Connection::new(socket.0))
.unwrap();
let token = mio::Token(token);
poll.register(&self.connections[token.into()].socket,
token,
EventSet::readable() | EventSet::writable(),
PollOpt::edge()).unwrap();
self.try_connection(token, EventSet::all());
}
} else {
self.try_connection(token, events);
}
}
fn try_connection(&mut self, token: Token, events: EventSet) {
let token = token.into();
// Simulate a `catch_unwind` that a real server would do anyway
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
self.connections[token].ready(events)
})).expect("oh no it panicked!");
if res.is_err() || self.connections[token].closed {
if let Err(ref e) = res {
info!("error: {:?}\non: {:?} {:?}", e,
token, self.connections[token].socket);
}
debug!("removing");
self.connections.remove(token);
}
}
} |
struct Connection {
socket: TcpStream,
input: Vec<u8>,
output: Output,
keepalive: bool,
closed: bool,
read_closed: bool,
events: EventSet,
}
struct Output {
buf: Vec<u8>,
}
impl Connection {
fn new(socket: TcpStream) -> Connection {
Connection {
socket: socket,
input: Vec::with_capacity(2048),
output: Output {
buf: Vec::with_capacity(2048),
},
keepalive: false,
read_closed: false,
closed: false,
events: EventSet::none(),
}
}
fn ready(&mut self, events: EventSet) -> io::Result<()> {
self.events = self.events | events;
while self.events.is_readable() && !self.read_closed {
let before = self.input.len();
let eof = try!(read(&mut self.socket,
&mut self.input,
&mut self.events));
if eof {
debug!("eof");
}
self.read_closed = eof;
if self.input.len() == before {
break
}
while self.input.len() > 0 {
let (req, amt) = match try!(parse(&self.input)) {
Some(pair) => pair,
None => {
debug!("need more data for a request");
return Ok(())
}
};
let request = Request {
inner: req,
amt: amt,
data: mem::replace(&mut self.input, Vec::new()),
};
debug!("got a request");
self.keepalive = request.version() >= 1;
self.keepalive = self.keepalive || request.headers().any(|s| {
s.0.eq_ignore_ascii_case("connection") &&
s.1.eq_ignore_ascii_case(b"keep-alive")
});
let response = process(&request);
response.to_bytes(&mut self.output.buf);
self.input = request.into_input_buf();
if !self.keepalive {
debug!("disabling keepalive");
self.read_closed = true;
}
}
}
if self.events.is_writable() && self.output.buf.len() > 0 {
let done = try!(write(&mut self.socket,
&mut self.output,
&mut self.events));
if done {
debug!("wrote response");
if !self.keepalive || self.read_closed {
self.closed = true;
}
}
}
if self.read_closed && self.output.buf.len() == 0 {
self.closed = true;
}
Ok(())
}
}
fn process(r: &Request) -> Response {
assert!(r.path() == "/plaintext");
let mut r = Response::new();
r.header("Content-Type", "text/plain; charset=UTF-8")
.body("Hello, World!");
return r
}
type Slice = (usize, usize);
#[allow(dead_code)]
pub struct Request {
inner: RawRequest,
data: Vec<u8>,
amt: usize,
}
struct RawRequest {
method: Slice,
path: Slice,
version: u8,
headers: Vec<(Slice, Slice)>,
}
pub struct Headers<'a> {
iter: slice::Iter<'a, (Slice, Slice)>,
req: &'a Request,
}
impl Request {
pub fn method(&self) -> &str {
str::from_utf8(self.get(&self.inner.method)).unwrap()
}
pub fn path(&self) -> &str {
str::from_utf8(self.get(&self.inner.path)).unwrap()
}
pub fn version(&self) -> u8 {
self.inner.version
}
pub fn headers(&self) -> Headers {
Headers {
iter: self.inner.headers.iter(),
req: self,
}
}
pub fn into_input_buf(mut self) -> Vec<u8> {
self.data.drain(..self.amt);
self.data
}
fn get(&self, s: &Slice) -> &[u8] {
&self.data[s.0..s.1]
}
}
impl<'a> Iterator for Headers<'a> {
type Item = (&'a str, &'a [u8]);
fn next(&mut self) -> Option<(&'a str, &'a [u8])> {
self.iter.next().map(|&(ref k, ref v)| {
(str::from_utf8(self.req.get(k)).unwrap(), self.req.get(v))
})
}
}
fn parse(buf: &[u8]) -> io::Result<Option<(RawRequest, usize)>> {
let mut headers = [httparse::EMPTY_HEADER; 16];
let mut r = httparse::Request::new(&mut headers);
let status = try!(r.parse(&buf).map_err(|_| {
io::Error::new(io::ErrorKind::Other, "failed to parse")
}));
return match status {
httparse::Status::Complete(amt) => {
debug!("ok {:?}", String::from_utf8_lossy(&buf[..amt]));
Ok(Some((RawRequest {
method: slice(buf, r.method.unwrap().as_bytes()),
path: slice(buf, r.path.unwrap().as_bytes()),
version: r.version.unwrap(),
headers: r.headers.iter().map(|h| {
(slice(buf, h.name.as_bytes()), slice(buf, &h.value))
}).collect(),
}, amt)))
}
httparse::Status::Partial => Ok(None),
};
fn slice(buf: &[u8], inner: &[u8]) -> Slice {
let start = inner.as_ptr() as usize - buf.as_ptr() as usize;
assert!(start < buf.len());
(start, start + inner.len())
}
}
pub struct Response {
headers: Vec<(String, String)>,
response: String,
}
impl Response {
pub fn new() -> Response {
Response {
headers: Vec::new(),
response: String::new(),
}
}
pub fn header(&mut self, name: &str, val: &str) -> &mut Response {
self.headers.push((name.to_string(), val.to_string()));
self
}
pub fn body(&mut self, s: &str) -> &mut Response {
self.response = s.to_string();
self
}
fn to_bytes(&self, into: &mut Vec<u8>) {
use std::fmt::Write;
write!(FastWrite(into), "\
HTTP/1.1 200 OK\r\n\
Server: Example\r\n\
Date: {}\r\n\
Content-Length: {}\r\n\
", mio_multithread_unix::date::now(), self.response.len()).unwrap();
for &(ref k, ref v) in &self.headers {
extend(into, k.as_bytes());
extend(into, b": ");
extend(into, v.as_bytes());
extend(into, b"\r\n");
}
extend(into, b"\r\n");
extend(into, self.response.as_bytes());
}
}
// TODO: impl fmt::Write for Vec<u8>
//
// Right now `write!` on `Vec<u8>` goes through io::Write and is not super
// speedy, so inline a less-crufty implementation here which doesn't go through
// io::Error.
struct FastWrite<'a>(&'a mut Vec<u8>);
impl<'a> fmt::Write for FastWrite<'a> {
fn write_str(&mut self, s: &str) -> fmt::Result {
extend(self.0, s.as_bytes());
Ok(())
}
}
// TODO: why does extend_from_slice not optimize?
fn extend(dst: &mut Vec<u8>, data: &[u8]) {
use std::ptr;
dst.reserve(data.len());
let prev = dst.len();
unsafe {
ptr::copy_nonoverlapping(data.as_ptr(),
dst.as_mut_ptr().offset(prev as isize),
data.len());
dst.set_len(prev + data.len());
}
}
fn read(socket: &mut TcpStream,
input: &mut Vec<u8>,
events: &mut EventSet) -> io::Result<bool> {
match socket.read(unsafe { slice_to_end(input) }) {
Ok(0) => return Ok(true),
Ok(n) => {
let len = input.len();
unsafe { input.set_len(len + n); }
return Ok(false)
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
*events = *events & !EventSet::readable();
return Ok(false)
}
Err(e) => return Err(e),
}
unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
use std::slice;
if v.capacity() == 0 {
v.reserve(16);
}
if v.capacity() == v.len() {
v.reserve(1);
}
slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
v.capacity() - v.len())
}
}
fn write(socket: &mut TcpStream,
output: &mut Output,
events: &mut EventSet) -> io::Result<bool> {
assert!(output.buf.len() > 0);
loop {
match socket.write(&output.buf) {
Ok(0) => {
return Err(io::Error::new(io::ErrorKind::Other, "early eof2"))
}
Ok(n) => {
output.buf.drain(..n);
if output.buf.len() == 0 {
return Ok(true)
}
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
*events = *events & !EventSet::writable();
return Ok(false)
}
Err(e) => return Err(e),
}
}
}
fn main() {
env_logger::init().unwrap();
let threads = (0..num_cpus::get()).map(|_| {
thread::spawn(|| {
let poll = mio::Poll::new().unwrap();
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let socket = net2::TcpBuilder::new_v4().unwrap();
socket.reuse_address(true).unwrap();
socket.reuse_port(true).unwrap();
socket.bind(&addr).unwrap();
let listener = socket.listen(2048).unwrap();
let listener = TcpListener::from_listener(listener, &addr).unwrap();
poll.register(&listener,
LISTENER,
EventSet::readable(),
PollOpt::level()).unwrap();
let mut events = Events::new();
let mut server = Server::new(&listener);
loop {
poll.poll(&mut events, None).unwrap();
for i in 0..events.len() {
let event = events.get(i).unwrap();
server.ready(&poll, event.token(), event.kind());
}
}
})
}).collect::<Vec<_>>();
for thread in threads {
thread.join().unwrap();
}
} | random_line_split | |
main.rs | #[macro_use]
extern crate log;
extern crate mio_multithread_unix;
extern crate env_logger;
extern crate httparse;
extern crate mio;
extern crate net2;
extern crate num_cpus;
extern crate slab;
extern crate time;
use std::ascii::AsciiExt;
use std::env;
use std::fmt;
use std::io::{self, Read, Write};
use std::mem;
use std::net::SocketAddr;
use std::panic;
use std::slice;
use std::str;
use std::thread;
use net2::unix::*;
use mio::tcp::{TcpStream, TcpListener};
use mio::{Poll, Token, EventSet, PollOpt, Events};
use slab::Slab;
const LISTENER: mio::Token = mio::Token(0);
struct | <'a> {
count: u64,
listener: &'a TcpListener,
connections: Slab<Connection, usize>,
}
impl<'a> Server<'a> {
fn new(listener: &'a TcpListener) -> Server<'a> {
Server {
count: 0,
listener: listener,
connections: Slab::new_starting_at(1, 1024),
}
}
fn ready(&mut self,
poll: &Poll,
token: Token,
events: EventSet) {
debug!("{:?} {:?}", token, events);
if token == LISTENER {
while let Ok(Some(socket)) = self.listener.accept() {
debug!("accepted");
self.count += 1;
// socket.0.set_nodelay(true).unwrap();
let token = self.connections
.insert_with(move |_| Connection::new(socket.0))
.unwrap();
let token = mio::Token(token);
poll.register(&self.connections[token.into()].socket,
token,
EventSet::readable() | EventSet::writable(),
PollOpt::edge()).unwrap();
self.try_connection(token, EventSet::all());
}
} else {
self.try_connection(token, events);
}
}
fn try_connection(&mut self, token: Token, events: EventSet) {
let token = token.into();
// Simulate a `catch_unwind` that a real server would do anyway
let res = panic::catch_unwind(panic::AssertUnwindSafe(|| {
self.connections[token].ready(events)
})).expect("oh no it panicked!");
if res.is_err() || self.connections[token].closed {
if let Err(ref e) = res {
info!("error: {:?}\non: {:?} {:?}", e,
token, self.connections[token].socket);
}
debug!("removing");
self.connections.remove(token);
}
}
}
struct Connection {
socket: TcpStream,
input: Vec<u8>,
output: Output,
keepalive: bool,
closed: bool,
read_closed: bool,
events: EventSet,
}
struct Output {
buf: Vec<u8>,
}
impl Connection {
fn new(socket: TcpStream) -> Connection {
Connection {
socket: socket,
input: Vec::with_capacity(2048),
output: Output {
buf: Vec::with_capacity(2048),
},
keepalive: false,
read_closed: false,
closed: false,
events: EventSet::none(),
}
}
fn ready(&mut self, events: EventSet) -> io::Result<()> {
self.events = self.events | events;
while self.events.is_readable() && !self.read_closed {
let before = self.input.len();
let eof = try!(read(&mut self.socket,
&mut self.input,
&mut self.events));
if eof {
debug!("eof");
}
self.read_closed = eof;
if self.input.len() == before {
break
}
while self.input.len() > 0 {
let (req, amt) = match try!(parse(&self.input)) {
Some(pair) => pair,
None => {
debug!("need more data for a request");
return Ok(())
}
};
let request = Request {
inner: req,
amt: amt,
data: mem::replace(&mut self.input, Vec::new()),
};
debug!("got a request");
self.keepalive = request.version() >= 1;
self.keepalive = self.keepalive || request.headers().any(|s| {
s.0.eq_ignore_ascii_case("connection") &&
s.1.eq_ignore_ascii_case(b"keep-alive")
});
let response = process(&request);
response.to_bytes(&mut self.output.buf);
self.input = request.into_input_buf();
if !self.keepalive {
debug!("disabling keepalive");
self.read_closed = true;
}
}
}
if self.events.is_writable() && self.output.buf.len() > 0 {
let done = try!(write(&mut self.socket,
&mut self.output,
&mut self.events));
if done {
debug!("wrote response");
if !self.keepalive || self.read_closed {
self.closed = true;
}
}
}
if self.read_closed && self.output.buf.len() == 0 {
self.closed = true;
}
Ok(())
}
}
fn process(r: &Request) -> Response {
assert!(r.path() == "/plaintext");
let mut r = Response::new();
r.header("Content-Type", "text/plain; charset=UTF-8")
.body("Hello, World!");
return r
}
type Slice = (usize, usize);
#[allow(dead_code)]
pub struct Request {
inner: RawRequest,
data: Vec<u8>,
amt: usize,
}
struct RawRequest {
method: Slice,
path: Slice,
version: u8,
headers: Vec<(Slice, Slice)>,
}
pub struct Headers<'a> {
iter: slice::Iter<'a, (Slice, Slice)>,
req: &'a Request,
}
impl Request {
pub fn method(&self) -> &str {
str::from_utf8(self.get(&self.inner.method)).unwrap()
}
pub fn path(&self) -> &str {
str::from_utf8(self.get(&self.inner.path)).unwrap()
}
pub fn version(&self) -> u8 {
self.inner.version
}
pub fn headers(&self) -> Headers {
Headers {
iter: self.inner.headers.iter(),
req: self,
}
}
pub fn into_input_buf(mut self) -> Vec<u8> {
self.data.drain(..self.amt);
self.data
}
fn get(&self, s: &Slice) -> &[u8] {
&self.data[s.0..s.1]
}
}
impl<'a> Iterator for Headers<'a> {
type Item = (&'a str, &'a [u8]);
fn next(&mut self) -> Option<(&'a str, &'a [u8])> {
self.iter.next().map(|&(ref k, ref v)| {
(str::from_utf8(self.req.get(k)).unwrap(), self.req.get(v))
})
}
}
fn parse(buf: &[u8]) -> io::Result<Option<(RawRequest, usize)>> {
let mut headers = [httparse::EMPTY_HEADER; 16];
let mut r = httparse::Request::new(&mut headers);
let status = try!(r.parse(&buf).map_err(|_| {
io::Error::new(io::ErrorKind::Other, "failed to parse")
}));
return match status {
httparse::Status::Complete(amt) => {
debug!("ok {:?}", String::from_utf8_lossy(&buf[..amt]));
Ok(Some((RawRequest {
method: slice(buf, r.method.unwrap().as_bytes()),
path: slice(buf, r.path.unwrap().as_bytes()),
version: r.version.unwrap(),
headers: r.headers.iter().map(|h| {
(slice(buf, h.name.as_bytes()), slice(buf, &h.value))
}).collect(),
}, amt)))
}
httparse::Status::Partial => Ok(None),
};
fn slice(buf: &[u8], inner: &[u8]) -> Slice {
let start = inner.as_ptr() as usize - buf.as_ptr() as usize;
assert!(start < buf.len());
(start, start + inner.len())
}
}
pub struct Response {
headers: Vec<(String, String)>,
response: String,
}
impl Response {
pub fn new() -> Response {
Response {
headers: Vec::new(),
response: String::new(),
}
}
pub fn header(&mut self, name: &str, val: &str) -> &mut Response {
self.headers.push((name.to_string(), val.to_string()));
self
}
pub fn body(&mut self, s: &str) -> &mut Response {
self.response = s.to_string();
self
}
fn to_bytes(&self, into: &mut Vec<u8>) {
use std::fmt::Write;
write!(FastWrite(into), "\
HTTP/1.1 200 OK\r\n\
Server: Example\r\n\
Date: {}\r\n\
Content-Length: {}\r\n\
", mio_multithread_unix::date::now(), self.response.len()).unwrap();
for &(ref k, ref v) in &self.headers {
extend(into, k.as_bytes());
extend(into, b": ");
extend(into, v.as_bytes());
extend(into, b"\r\n");
}
extend(into, b"\r\n");
extend(into, self.response.as_bytes());
}
}
// TODO: impl fmt::Write for Vec<u8>
//
// Right now `write!` on `Vec<u8>` goes through io::Write and is not super
// speedy, so inline a less-crufty implementation here which doesn't go through
// io::Error.
struct FastWrite<'a>(&'a mut Vec<u8>);
impl<'a> fmt::Write for FastWrite<'a> {
fn write_str(&mut self, s: &str) -> fmt::Result {
extend(self.0, s.as_bytes());
Ok(())
}
}
// TODO: why does extend_from_slice not optimize?
fn extend(dst: &mut Vec<u8>, data: &[u8]) {
use std::ptr;
dst.reserve(data.len());
let prev = dst.len();
unsafe {
ptr::copy_nonoverlapping(data.as_ptr(),
dst.as_mut_ptr().offset(prev as isize),
data.len());
dst.set_len(prev + data.len());
}
}
fn read(socket: &mut TcpStream,
input: &mut Vec<u8>,
events: &mut EventSet) -> io::Result<bool> {
match socket.read(unsafe { slice_to_end(input) }) {
Ok(0) => return Ok(true),
Ok(n) => {
let len = input.len();
unsafe { input.set_len(len + n); }
return Ok(false)
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
*events = *events & !EventSet::readable();
return Ok(false)
}
Err(e) => return Err(e),
}
unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] {
use std::slice;
if v.capacity() == 0 {
v.reserve(16);
}
if v.capacity() == v.len() {
v.reserve(1);
}
slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize),
v.capacity() - v.len())
}
}
fn write(socket: &mut TcpStream,
output: &mut Output,
events: &mut EventSet) -> io::Result<bool> {
assert!(output.buf.len() > 0);
loop {
match socket.write(&output.buf) {
Ok(0) => {
return Err(io::Error::new(io::ErrorKind::Other, "early eof2"))
}
Ok(n) => {
output.buf.drain(..n);
if output.buf.len() == 0 {
return Ok(true)
}
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
*events = *events & !EventSet::writable();
return Ok(false)
}
Err(e) => return Err(e),
}
}
}
fn main() {
env_logger::init().unwrap();
let threads = (0..num_cpus::get()).map(|_| {
thread::spawn(|| {
let poll = mio::Poll::new().unwrap();
let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let addr = addr.parse::<SocketAddr>().unwrap();
let socket = net2::TcpBuilder::new_v4().unwrap();
socket.reuse_address(true).unwrap();
socket.reuse_port(true).unwrap();
socket.bind(&addr).unwrap();
let listener = socket.listen(2048).unwrap();
let listener = TcpListener::from_listener(listener, &addr).unwrap();
poll.register(&listener,
LISTENER,
EventSet::readable(),
PollOpt::level()).unwrap();
let mut events = Events::new();
let mut server = Server::new(&listener);
loop {
poll.poll(&mut events, None).unwrap();
for i in 0..events.len() {
let event = events.get(i).unwrap();
server.ready(&poll, event.token(), event.kind());
}
}
})
}).collect::<Vec<_>>();
for thread in threads {
thread.join().unwrap();
}
}
| Server | identifier_name |
activedirectory.component.ts | import {ApplicationRef, Component, Injector, OnInit} from '@angular/core';
import {ActivatedRoute, Router, RouterModule} from '@angular/router';
import * as _ from 'lodash';
import {Subscription} from 'rxjs/Subscription';
import {RestService, SystemGeneralService, WebSocketService} from '../../../services/';
import {FieldConfig} from '../../common/entity/entity-form/models/field-config.interface';
import { DialogService } from '../../../services/';
import { Validators } from '@angular/forms';
@Component({
selector : 'app-activedirectory',
template : '<entity-form [conf]="this"></entity-form>',
})
export class ActiveDirectoryComponent {
protected resource_name = 'directoryservice/activedirectory';
protected isBasicMode = true;
protected idmapBacked: any;
protected ad_certificate: any;
protected ad_kerberos_realm: any;
protected ad_kerberos_principal: any;
protected ad_ssl: any;
protected ad_idmap_backend: any;
protected ad_nss_info: any;
protected ad_ldap_sasl_wrapping: any;
public custActions: Array<any> = [
{
'id' : 'basic_mode',
'name' : 'Basic Mode',
function : () => { this.isBasicMode = !this.isBasicMode; }
},
{
'id' : 'advanced_mode',
'name' : 'Advanced Mode',
function : () => { this.isBasicMode = !this.isBasicMode; }
},
{
'id' : 'edit_idmap',
'name' : 'Edit Idmap',
function : () => {
this.router.navigate(new Array('').concat(['directoryservice','idmap', this.idmapBacked, 'activedirectory']));
}
},
{
'id' : 'ds_clearcache',
'name' : 'Rebuild Directory Service Cache',
function : async () => {
this.ws.call('notifier.ds_clearcache').subscribe((cache_status)=>{
this.dialogservice.Info("Active Directory", "The cache is being rebuilt.");
})
}
}
];
public fieldConfig: FieldConfig[] = [
{
type : 'input',
name : 'ad_domainname',
placeholder : 'Domain Name',
tooltip : 'Name of Active Directory domain (<i>example.com</i>)\
or child domain (<i>sales.example.com</i>). This setting is mandatory\
and the GUI refuses to save the settings if the domain controller\
for the specified domain cannot be found.',
required: true,
validation : [ Validators.required ]
},
{
type : 'input',
name : 'ad_bindname',
placeholder : 'Domain Account Name',
tooltip : 'Name of the Active Directory administrator account.\
This setting is mandatory and the GUI refuses to save the settings\
if it cannot connect to the domain controller using this account name.',
},
{
type : 'input',
name : 'ad_bindpw',
placeholder : 'Domain Account Password',
tooltip : 'Password for the Active Directory administrator\
account. This setting is mandatory and the GUI refuses to save the\
settings if it cannot connect to the domain controller using this\
password.',
inputType : 'password'
},
{
type : 'input',
name : 'ad_monitor_frequency',
placeholder : 'AD check connectivity frequency (seconds)',
tooltip : 'How often to verify that Active Directory services are\
active.',
},
{
type : 'input',
name : 'ad_recover_retry',
placeholder : 'How many recovery attempts',
tooltip : 'Number of times to attempt reconnecting to the Active\
directory server. Tries forever when set to <i>0</i>.',
},
{
type : 'checkbox',
name : 'ad_enable_monitor',
placeholder : 'Enable AD Monitoring',
tooltip : 'Restart Active Directory automatically if the service\
is disconnected.',
},
{
type : 'select',
name : 'ad_ssl',
placeholder : 'Encryption Mode',
tooltip : 'Choices are <i>Off, SSL</i> or <i>TLS</i>.',
options : []
},
{
type : 'select',
name : 'ad_certificate',
placeholder : 'Certificate',
tooltip : 'Select the certificate of the Active Directory server\
if SSL connections are used. If a certificate does not exist yet,\
create a <a href="http://doc.freenas.org/11/system.html#cas"\
target="_blank">CA</a>, then create a certificate on the Active\
Directory server and import it to the FreeNAS system with\
<a href="http://doc.freenas.org/11/system.html#certificates"\
target="_blank">Certificates</a>.',
options : []
},
{
type : 'checkbox',
name : 'ad_verbose_logging',
placeholder : 'Verbose logging',
tooltip : 'When checked, logs attempts to join the domain to\
<b>/var/log/messages</b>.',
},
{
type : 'checkbox',
name : 'ad_unix_extensions',
placeholder : 'UNIX extensions',
tooltip : '<b>Only</b> check this box if the AD server has been\
explicitly configured to map permissions for UNIX users. Checking this\
box provides persistent UIDs and GUIDs, otherwise, users and groups\
are mapped to the UID or GUID range configured in Samba.',
},
{
type : 'checkbox',
name : 'ad_allow_trusted_doms',
placeholder : 'Allow Trusted Domains',
tooltip : 'Should only be enabled if network has active <a\
href="https://technet.microsoft.com/en-us/library/cc757352(WS.10).aspx"\
target="_blank">domain/forest trusts</a> and you need to manage files\
on multiple domains. use with caution as it will generate more\
winbind traffic, slowing down the ability to filter through\
user/group info.',
},
{
type : 'checkbox',
name : 'ad_use_default_domain',
placeholder : 'Use Default Domain',
tooltip : 'When unchecked, the domain name is prepended to the\
username. If <b>Allow Trusted Domains</b> is checked and multiple\
domains use the same usernames, uncheck this box to prevent name\
collisions.',
},
{
type : 'checkbox',
name : 'ad_allow_dns_updates',
placeholder : 'Allow DNS updates',
tooltip : 'When unchecked, disables Samba from doing DNS updates\
when joining a domain.',
},
{
type : 'checkbox',
name : 'ad_disable_freenas_cache',
placeholder : 'Disable FreeNAS Cache',
tooltip : 'When checked, disables caching AD users and gorups.\
Useful if you cannot bind to a domain with a large number of users or\
groups.',
},
{
type : 'input',
name : 'ad_userdn',
placeholder : 'User Base',
tooltip : 'Distinguished name (DN) of the user container in\
Active Directory.',
},
{
type : 'input',
name : 'ad_groupdn',
placeholder : 'Group Base',
tooltip : 'Distinguished name (DN) of the gorup container in\
Active Directory.',
},
{
type : 'input',
name : 'ad_site',
placeholder : 'Site Name',
tooltip : 'The relative distinguished name of the site object in\
Active Directory.',
},
{
type : 'input',
name : 'ad_dcname',
placeholder : 'Domain Controller',
tooltip : 'Will automatically be added to the SRV record for the\
domain and, when multiple controllers are specified, FreeNAS selects\
the closest DC which responds. Use a short form of the FQDN like\
<b>exampleserver</b>.',
},
{
type : 'input',
name : 'ad_gcname',
placeholder : 'Global Catalog Server',
tooltip : 'If the hostname of the global catalog server to use is\
specified, make sure it is resolvable.',
},
{
type : 'select',
name : 'ad_kerberos_realm',
placeholder : 'Kerberos Realm',
tooltip : 'Select the realm created using the instructions in <a\
href="http://doc.freenas.org/11/directoryservice.html#kerberos-realms"\
target="_blank">Kerberos Realms</a>.',
options : []
},
{
type : 'select',
name : 'ad_kerberos_principal',
placeholder : 'Kerberos Principal',
tooltip : 'Browse to the location of the keytab created using the\
instructions in <a\
href="http://doc.freenas.org/11/directoryservice.html#kerberos-keytabs"\
target="_blank">Kerberos Keytabs</a>.',
options : []
},
{
type : 'input',
name : 'ad_timeout',
placeholder : 'AD Timeout',
tooltip : 'In seconds, increase if the AD service does not start\
after connecting to the domain.',
},
{
type : 'input',
name : 'ad_dns_timeout',
placeholder : 'DNS Timeout',
tooltip : 'In seconds, increase if AD DNS queries timeout.',
},
{
type : 'select',
name : 'ad_idmap_backend',
placeholder : 'Idmap backend',
tooltip : 'Select the backend to use to map Windows security\
identifiers (SIDs) to UNIX UIDs and GIDs. Click the <b>Edit</b> link\
to configure the editable options of that backend.',
options : []
},
{
type : 'select',
name : 'ad_nss_info',
placeholder : 'Winbind NSS Info',
tooltip : 'Defines the schema to use when querying AD for\
user/group info. <i>rfc2307</i> uses the RFC2307 schema support\
included in Windows 2003 R2, <i>sfu20</i> is for Services For Unix 3.0\
or 3.5, and <i>sfu</i> is for Services For Unix 2.0.',
options : []
},
{
type : 'select',
name : 'ad_ldap_sasl_wrapping',
placeholder : 'SASL wrapping',
tooltip : 'Defines how LDAP traffic is transmitted. Choices are\
<i>plain</i> (plain text), <i>sign</i> (signed only), or <i>seal</i>\
(signed and encrypted). Windows 2000 SP3 and higher can be configured\
to enforce signed LDAP connections.',
options : []
},
{
type : 'checkbox',
name : 'ad_enable',
placeholder : 'Enable',
tooltip : 'Enable the Active Directory service.',
},
{
type : 'input',
name : 'ad_netbiosname_a',
placeholder : 'Netbios Name',
tooltip : 'Limited to 15 characters. Automatically populated with\
the original host name of the system. It <b>must</b> be different from\
the <i>Workgroup</i> name.',
},
{
type : 'input',
name : 'ad_netbiosalias',
placeholder : 'NetBIOS alias',
tooltip : 'Limited to 15 characters.',
}
];
protected advanced_field: Array<any> = [
'ad_ssl',
'ad_certificate',
'ad_verbose_logging',
'ad_unix_extensions',
'ad_allow_trusted_doms',
'ad_use_default_domain',
'ad_allow_dns_updates',
'ad_disable_freenas_cache',
'ad_userdn',
'ad_groupdn',
'ad_site',
'ad_dcname',
'ad_gcname',
'ad_kerberos_realm',
'ad_kerberos_principal',
'ad_timeout',
'ad_dns_timeout',
'ad_idmap_backend',
'ad_nss_info',
'ad_ldap_sasl_wrapping',
'ad_netbiosname_a',
'ad_netbiosalias',
];
isCustActionVisible(actionname: string) {
if (actionname === 'advanced_mode' && this.isBasicMode === false) {
return false;
} else if (actionname === 'basic_mode' && this.isBasicMode === true) | else if (actionname === 'edit_idmap' && this.isBasicMode === true) {
return false;
}
return true;
}
constructor(protected router: Router, protected route: ActivatedRoute,
protected rest: RestService, protected ws: WebSocketService,
protected _injector: Injector, protected _appRef: ApplicationRef,
protected systemGeneralService: SystemGeneralService,
private dialogservice: DialogService) {}
afterInit(entityEdit: any) {
this.rest.get("directoryservice/kerberosrealm", {}).subscribe((res) => {
this.ad_kerberos_realm = _.find(this.fieldConfig, {name : 'ad_kerberos_realm'});
res.data.forEach((item) => {
this.ad_kerberos_realm.options.push(
{label : item.krb_realm, value : item.id});
});
});
this.rest.get("directoryservice/kerberosprincipal", {}).subscribe((res) => {
this.ad_kerberos_principal = _.find(this.fieldConfig, {name : 'ad_kerberos_principal'});
res.data.forEach((item) => {
this.ad_kerberos_principal.options.push(
{label : item.principal_name, value : item.id});
});
});
this.systemGeneralService.getCA().subscribe((res) => {
this.ad_certificate = _.find(this.fieldConfig, {name : 'ad_certificate'});
res.forEach((item) => {
this.ad_certificate.options.push(
{label : item.name, value : item.id});
});
});
this.ws.call('notifier.choices', ['LDAP_SSL_CHOICES']).subscribe((res) => {
this.ad_ssl = _.find(this.fieldConfig, {name : 'ad_ssl'});
res.forEach((item) => {
this.ad_ssl.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['IDMAP_CHOICES']).subscribe((res) => {
this.ad_idmap_backend = _.find(this.fieldConfig, {name : 'ad_idmap_backend'});
res.forEach((item) => {
this.ad_idmap_backend.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['NSS_INFO_CHOICES']).subscribe((res) => {
this.ad_nss_info = _.find(this.fieldConfig, {name : 'ad_nss_info'});
res.forEach((item) => {
this.ad_nss_info.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['LDAP_SASL_WRAPPING_CHOICES']).subscribe((res) => {
this.ad_ldap_sasl_wrapping = _.find(this.fieldConfig, {name : 'ad_ldap_sasl_wrapping'});
res.forEach((item) => {
this.ad_ldap_sasl_wrapping.options.push(
{label : item[1], value : item[0]});
});
});
entityEdit.formGroup.controls['ad_idmap_backend'].valueChanges.subscribe((res)=> {
this.idmapBacked = res;
})
}
}
| {
return false;
} | conditional_block |
activedirectory.component.ts | import {ApplicationRef, Component, Injector, OnInit} from '@angular/core';
import {ActivatedRoute, Router, RouterModule} from '@angular/router';
import * as _ from 'lodash';
import {Subscription} from 'rxjs/Subscription';
import {RestService, SystemGeneralService, WebSocketService} from '../../../services/';
import {FieldConfig} from '../../common/entity/entity-form/models/field-config.interface';
import { DialogService } from '../../../services/';
import { Validators } from '@angular/forms';
@Component({
selector : 'app-activedirectory',
template : '<entity-form [conf]="this"></entity-form>',
})
export class ActiveDirectoryComponent {
protected resource_name = 'directoryservice/activedirectory';
protected isBasicMode = true;
protected idmapBacked: any;
protected ad_certificate: any;
protected ad_kerberos_realm: any;
protected ad_kerberos_principal: any;
protected ad_ssl: any;
protected ad_idmap_backend: any;
protected ad_nss_info: any;
protected ad_ldap_sasl_wrapping: any;
public custActions: Array<any> = [
{
'id' : 'basic_mode',
'name' : 'Basic Mode',
function : () => { this.isBasicMode = !this.isBasicMode; }
},
{
'id' : 'advanced_mode',
'name' : 'Advanced Mode',
function : () => { this.isBasicMode = !this.isBasicMode; }
},
{
'id' : 'edit_idmap',
'name' : 'Edit Idmap',
function : () => {
this.router.navigate(new Array('').concat(['directoryservice','idmap', this.idmapBacked, 'activedirectory']));
}
},
{
'id' : 'ds_clearcache',
'name' : 'Rebuild Directory Service Cache',
function : async () => {
this.ws.call('notifier.ds_clearcache').subscribe((cache_status)=>{
this.dialogservice.Info("Active Directory", "The cache is being rebuilt.");
})
}
}
];
public fieldConfig: FieldConfig[] = [
{
type : 'input',
name : 'ad_domainname',
placeholder : 'Domain Name',
tooltip : 'Name of Active Directory domain (<i>example.com</i>)\
or child domain (<i>sales.example.com</i>). This setting is mandatory\
and the GUI refuses to save the settings if the domain controller\
for the specified domain cannot be found.',
required: true,
validation : [ Validators.required ]
},
{
type : 'input',
name : 'ad_bindname',
placeholder : 'Domain Account Name',
tooltip : 'Name of the Active Directory administrator account.\
This setting is mandatory and the GUI refuses to save the settings\
if it cannot connect to the domain controller using this account name.',
},
{
type : 'input',
name : 'ad_bindpw',
placeholder : 'Domain Account Password',
tooltip : 'Password for the Active Directory administrator\
account. This setting is mandatory and the GUI refuses to save the\
settings if it cannot connect to the domain controller using this\
password.',
inputType : 'password'
},
{
type : 'input',
name : 'ad_monitor_frequency',
placeholder : 'AD check connectivity frequency (seconds)',
tooltip : 'How often to verify that Active Directory services are\
active.',
},
{
type : 'input',
name : 'ad_recover_retry',
placeholder : 'How many recovery attempts',
tooltip : 'Number of times to attempt reconnecting to the Active\
directory server. Tries forever when set to <i>0</i>.',
},
{
type : 'checkbox',
name : 'ad_enable_monitor',
placeholder : 'Enable AD Monitoring',
tooltip : 'Restart Active Directory automatically if the service\
is disconnected.',
},
{
type : 'select',
name : 'ad_ssl',
placeholder : 'Encryption Mode',
tooltip : 'Choices are <i>Off, SSL</i> or <i>TLS</i>.',
options : []
},
{
type : 'select',
name : 'ad_certificate',
placeholder : 'Certificate',
tooltip : 'Select the certificate of the Active Directory server\
if SSL connections are used. If a certificate does not exist yet,\
create a <a href="http://doc.freenas.org/11/system.html#cas"\
target="_blank">CA</a>, then create a certificate on the Active\
Directory server and import it to the FreeNAS system with\
<a href="http://doc.freenas.org/11/system.html#certificates"\
target="_blank">Certificates</a>.',
options : []
},
{
type : 'checkbox',
name : 'ad_verbose_logging',
placeholder : 'Verbose logging',
tooltip : 'When checked, logs attempts to join the domain to\
<b>/var/log/messages</b>.',
},
{
type : 'checkbox',
name : 'ad_unix_extensions',
placeholder : 'UNIX extensions',
tooltip : '<b>Only</b> check this box if the AD server has been\
explicitly configured to map permissions for UNIX users. Checking this\
box provides persistent UIDs and GUIDs, otherwise, users and groups\
are mapped to the UID or GUID range configured in Samba.',
},
{
type : 'checkbox',
name : 'ad_allow_trusted_doms',
placeholder : 'Allow Trusted Domains',
tooltip : 'Should only be enabled if network has active <a\
href="https://technet.microsoft.com/en-us/library/cc757352(WS.10).aspx"\
target="_blank">domain/forest trusts</a> and you need to manage files\
on multiple domains. use with caution as it will generate more\
winbind traffic, slowing down the ability to filter through\
user/group info.',
},
{
type : 'checkbox',
name : 'ad_use_default_domain',
placeholder : 'Use Default Domain',
tooltip : 'When unchecked, the domain name is prepended to the\
username. If <b>Allow Trusted Domains</b> is checked and multiple\
domains use the same usernames, uncheck this box to prevent name\
collisions.',
},
{
type : 'checkbox',
name : 'ad_allow_dns_updates',
placeholder : 'Allow DNS updates',
tooltip : 'When unchecked, disables Samba from doing DNS updates\
when joining a domain.',
},
{
type : 'checkbox',
name : 'ad_disable_freenas_cache',
placeholder : 'Disable FreeNAS Cache',
tooltip : 'When checked, disables caching AD users and gorups.\
Useful if you cannot bind to a domain with a large number of users or\
groups.',
},
{
type : 'input',
name : 'ad_userdn',
placeholder : 'User Base',
tooltip : 'Distinguished name (DN) of the user container in\
Active Directory.',
},
{
type : 'input',
name : 'ad_groupdn',
placeholder : 'Group Base',
tooltip : 'Distinguished name (DN) of the gorup container in\
Active Directory.',
},
{
type : 'input',
name : 'ad_site',
placeholder : 'Site Name',
tooltip : 'The relative distinguished name of the site object in\
Active Directory.',
},
{
type : 'input',
name : 'ad_dcname',
placeholder : 'Domain Controller',
tooltip : 'Will automatically be added to the SRV record for the\
domain and, when multiple controllers are specified, FreeNAS selects\
the closest DC which responds. Use a short form of the FQDN like\
<b>exampleserver</b>.',
},
{
type : 'input',
name : 'ad_gcname',
placeholder : 'Global Catalog Server',
tooltip : 'If the hostname of the global catalog server to use is\
specified, make sure it is resolvable.',
},
{
type : 'select',
name : 'ad_kerberos_realm',
placeholder : 'Kerberos Realm',
tooltip : 'Select the realm created using the instructions in <a\
href="http://doc.freenas.org/11/directoryservice.html#kerberos-realms"\
target="_blank">Kerberos Realms</a>.',
options : []
},
{
type : 'select',
name : 'ad_kerberos_principal',
placeholder : 'Kerberos Principal',
tooltip : 'Browse to the location of the keytab created using the\
instructions in <a\
href="http://doc.freenas.org/11/directoryservice.html#kerberos-keytabs"\
target="_blank">Kerberos Keytabs</a>.',
options : []
},
{
type : 'input',
name : 'ad_timeout',
placeholder : 'AD Timeout',
tooltip : 'In seconds, increase if the AD service does not start\
after connecting to the domain.',
},
{
type : 'input',
name : 'ad_dns_timeout',
placeholder : 'DNS Timeout',
tooltip : 'In seconds, increase if AD DNS queries timeout.',
},
{
type : 'select',
name : 'ad_idmap_backend',
placeholder : 'Idmap backend',
tooltip : 'Select the backend to use to map Windows security\
identifiers (SIDs) to UNIX UIDs and GIDs. Click the <b>Edit</b> link\
to configure the editable options of that backend.',
options : []
},
{
type : 'select',
name : 'ad_nss_info',
placeholder : 'Winbind NSS Info',
tooltip : 'Defines the schema to use when querying AD for\
user/group info. <i>rfc2307</i> uses the RFC2307 schema support\
included in Windows 2003 R2, <i>sfu20</i> is for Services For Unix 3.0\
or 3.5, and <i>sfu</i> is for Services For Unix 2.0.',
options : []
},
{
type : 'select',
name : 'ad_ldap_sasl_wrapping',
placeholder : 'SASL wrapping',
tooltip : 'Defines how LDAP traffic is transmitted. Choices are\
<i>plain</i> (plain text), <i>sign</i> (signed only), or <i>seal</i>\
(signed and encrypted). Windows 2000 SP3 and higher can be configured\
to enforce signed LDAP connections.',
options : []
},
{
type : 'checkbox',
name : 'ad_enable',
placeholder : 'Enable',
tooltip : 'Enable the Active Directory service.',
},
{
type : 'input',
name : 'ad_netbiosname_a',
placeholder : 'Netbios Name',
tooltip : 'Limited to 15 characters. Automatically populated with\
the original host name of the system. It <b>must</b> be different from\
the <i>Workgroup</i> name.',
},
{
type : 'input',
name : 'ad_netbiosalias',
placeholder : 'NetBIOS alias',
tooltip : 'Limited to 15 characters.',
}
];
protected advanced_field: Array<any> = [
'ad_ssl',
'ad_certificate',
'ad_verbose_logging',
'ad_unix_extensions',
'ad_allow_trusted_doms',
'ad_use_default_domain',
'ad_allow_dns_updates',
'ad_disable_freenas_cache',
'ad_userdn',
'ad_groupdn',
'ad_site',
'ad_dcname',
'ad_gcname',
'ad_kerberos_realm',
'ad_kerberos_principal',
'ad_timeout',
'ad_dns_timeout',
'ad_idmap_backend',
'ad_nss_info',
'ad_ldap_sasl_wrapping',
'ad_netbiosname_a',
'ad_netbiosalias',
];
isCustActionVisible(actionname: string) {
if (actionname === 'advanced_mode' && this.isBasicMode === false) {
return false;
} else if (actionname === 'basic_mode' && this.isBasicMode === true) {
return false;
} else if (actionname === 'edit_idmap' && this.isBasicMode === true) {
return false;
}
return true;
}
constructor(protected router: Router, protected route: ActivatedRoute,
protected rest: RestService, protected ws: WebSocketService,
protected _injector: Injector, protected _appRef: ApplicationRef,
protected systemGeneralService: SystemGeneralService,
private dialogservice: DialogService) |
afterInit(entityEdit: any) {
this.rest.get("directoryservice/kerberosrealm", {}).subscribe((res) => {
this.ad_kerberos_realm = _.find(this.fieldConfig, {name : 'ad_kerberos_realm'});
res.data.forEach((item) => {
this.ad_kerberos_realm.options.push(
{label : item.krb_realm, value : item.id});
});
});
this.rest.get("directoryservice/kerberosprincipal", {}).subscribe((res) => {
this.ad_kerberos_principal = _.find(this.fieldConfig, {name : 'ad_kerberos_principal'});
res.data.forEach((item) => {
this.ad_kerberos_principal.options.push(
{label : item.principal_name, value : item.id});
});
});
this.systemGeneralService.getCA().subscribe((res) => {
this.ad_certificate = _.find(this.fieldConfig, {name : 'ad_certificate'});
res.forEach((item) => {
this.ad_certificate.options.push(
{label : item.name, value : item.id});
});
});
this.ws.call('notifier.choices', ['LDAP_SSL_CHOICES']).subscribe((res) => {
this.ad_ssl = _.find(this.fieldConfig, {name : 'ad_ssl'});
res.forEach((item) => {
this.ad_ssl.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['IDMAP_CHOICES']).subscribe((res) => {
this.ad_idmap_backend = _.find(this.fieldConfig, {name : 'ad_idmap_backend'});
res.forEach((item) => {
this.ad_idmap_backend.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['NSS_INFO_CHOICES']).subscribe((res) => {
this.ad_nss_info = _.find(this.fieldConfig, {name : 'ad_nss_info'});
res.forEach((item) => {
this.ad_nss_info.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['LDAP_SASL_WRAPPING_CHOICES']).subscribe((res) => {
this.ad_ldap_sasl_wrapping = _.find(this.fieldConfig, {name : 'ad_ldap_sasl_wrapping'});
res.forEach((item) => {
this.ad_ldap_sasl_wrapping.options.push(
{label : item[1], value : item[0]});
});
});
entityEdit.formGroup.controls['ad_idmap_backend'].valueChanges.subscribe((res)=> {
this.idmapBacked = res;
})
}
}
| {} | identifier_body |
activedirectory.component.ts | import {ApplicationRef, Component, Injector, OnInit} from '@angular/core';
import {ActivatedRoute, Router, RouterModule} from '@angular/router';
import * as _ from 'lodash';
import {Subscription} from 'rxjs/Subscription';
import {RestService, SystemGeneralService, WebSocketService} from '../../../services/';
import {FieldConfig} from '../../common/entity/entity-form/models/field-config.interface';
import { DialogService } from '../../../services/';
import { Validators } from '@angular/forms';
@Component({
selector : 'app-activedirectory',
template : '<entity-form [conf]="this"></entity-form>',
})
export class ActiveDirectoryComponent {
protected resource_name = 'directoryservice/activedirectory';
protected isBasicMode = true;
protected idmapBacked: any;
protected ad_certificate: any;
protected ad_kerberos_realm: any;
protected ad_kerberos_principal: any;
protected ad_ssl: any;
protected ad_idmap_backend: any;
protected ad_nss_info: any;
protected ad_ldap_sasl_wrapping: any;
public custActions: Array<any> = [
{
'id' : 'basic_mode',
'name' : 'Basic Mode',
function : () => { this.isBasicMode = !this.isBasicMode; }
},
{
'id' : 'advanced_mode',
'name' : 'Advanced Mode',
function : () => { this.isBasicMode = !this.isBasicMode; }
},
{
'id' : 'edit_idmap',
'name' : 'Edit Idmap',
function : () => {
this.router.navigate(new Array('').concat(['directoryservice','idmap', this.idmapBacked, 'activedirectory']));
}
},
{
'id' : 'ds_clearcache',
'name' : 'Rebuild Directory Service Cache',
function : async () => {
this.ws.call('notifier.ds_clearcache').subscribe((cache_status)=>{
this.dialogservice.Info("Active Directory", "The cache is being rebuilt.");
})
}
}
];
public fieldConfig: FieldConfig[] = [
{
type : 'input',
name : 'ad_domainname',
placeholder : 'Domain Name',
tooltip : 'Name of Active Directory domain (<i>example.com</i>)\
or child domain (<i>sales.example.com</i>). This setting is mandatory\
and the GUI refuses to save the settings if the domain controller\
for the specified domain cannot be found.',
required: true,
validation : [ Validators.required ]
},
{
type : 'input',
name : 'ad_bindname',
placeholder : 'Domain Account Name',
tooltip : 'Name of the Active Directory administrator account.\
This setting is mandatory and the GUI refuses to save the settings\
if it cannot connect to the domain controller using this account name.',
},
{
type : 'input',
name : 'ad_bindpw',
placeholder : 'Domain Account Password',
tooltip : 'Password for the Active Directory administrator\
account. This setting is mandatory and the GUI refuses to save the\
settings if it cannot connect to the domain controller using this\
password.',
inputType : 'password'
},
{
type : 'input',
name : 'ad_monitor_frequency',
placeholder : 'AD check connectivity frequency (seconds)',
tooltip : 'How often to verify that Active Directory services are\
active.',
},
{
type : 'input',
name : 'ad_recover_retry',
placeholder : 'How many recovery attempts',
tooltip : 'Number of times to attempt reconnecting to the Active\
directory server. Tries forever when set to <i>0</i>.',
},
{
type : 'checkbox',
name : 'ad_enable_monitor',
placeholder : 'Enable AD Monitoring',
tooltip : 'Restart Active Directory automatically if the service\
is disconnected.',
},
{
type : 'select',
name : 'ad_ssl',
placeholder : 'Encryption Mode',
tooltip : 'Choices are <i>Off, SSL</i> or <i>TLS</i>.',
options : []
},
{
type : 'select',
name : 'ad_certificate',
placeholder : 'Certificate',
tooltip : 'Select the certificate of the Active Directory server\
if SSL connections are used. If a certificate does not exist yet,\
create a <a href="http://doc.freenas.org/11/system.html#cas"\
target="_blank">CA</a>, then create a certificate on the Active\
Directory server and import it to the FreeNAS system with\
<a href="http://doc.freenas.org/11/system.html#certificates"\
target="_blank">Certificates</a>.',
options : []
},
{
type : 'checkbox',
name : 'ad_verbose_logging',
placeholder : 'Verbose logging',
tooltip : 'When checked, logs attempts to join the domain to\
<b>/var/log/messages</b>.',
},
{
type : 'checkbox',
name : 'ad_unix_extensions',
placeholder : 'UNIX extensions',
tooltip : '<b>Only</b> check this box if the AD server has been\
explicitly configured to map permissions for UNIX users. Checking this\
box provides persistent UIDs and GUIDs, otherwise, users and groups\
are mapped to the UID or GUID range configured in Samba.',
},
{
type : 'checkbox',
name : 'ad_allow_trusted_doms',
placeholder : 'Allow Trusted Domains',
tooltip : 'Should only be enabled if network has active <a\
href="https://technet.microsoft.com/en-us/library/cc757352(WS.10).aspx"\
target="_blank">domain/forest trusts</a> and you need to manage files\
on multiple domains. use with caution as it will generate more\
winbind traffic, slowing down the ability to filter through\
user/group info.',
},
{
type : 'checkbox',
name : 'ad_use_default_domain',
placeholder : 'Use Default Domain',
tooltip : 'When unchecked, the domain name is prepended to the\
username. If <b>Allow Trusted Domains</b> is checked and multiple\
domains use the same usernames, uncheck this box to prevent name\
collisions.',
},
{
type : 'checkbox',
name : 'ad_allow_dns_updates',
placeholder : 'Allow DNS updates',
tooltip : 'When unchecked, disables Samba from doing DNS updates\
when joining a domain.',
},
{
type : 'checkbox',
name : 'ad_disable_freenas_cache',
placeholder : 'Disable FreeNAS Cache',
tooltip : 'When checked, disables caching AD users and gorups.\
Useful if you cannot bind to a domain with a large number of users or\
groups.',
},
{
type : 'input',
name : 'ad_userdn',
placeholder : 'User Base',
tooltip : 'Distinguished name (DN) of the user container in\
Active Directory.',
},
{
type : 'input',
name : 'ad_groupdn',
placeholder : 'Group Base',
tooltip : 'Distinguished name (DN) of the gorup container in\
Active Directory.',
},
{
type : 'input',
name : 'ad_site',
placeholder : 'Site Name',
tooltip : 'The relative distinguished name of the site object in\
Active Directory.',
},
{
type : 'input',
name : 'ad_dcname',
placeholder : 'Domain Controller',
tooltip : 'Will automatically be added to the SRV record for the\
domain and, when multiple controllers are specified, FreeNAS selects\
the closest DC which responds. Use a short form of the FQDN like\
<b>exampleserver</b>.',
},
{
type : 'input',
name : 'ad_gcname',
placeholder : 'Global Catalog Server',
tooltip : 'If the hostname of the global catalog server to use is\
specified, make sure it is resolvable.',
},
{
type : 'select',
name : 'ad_kerberos_realm',
placeholder : 'Kerberos Realm',
tooltip : 'Select the realm created using the instructions in <a\
href="http://doc.freenas.org/11/directoryservice.html#kerberos-realms"\
target="_blank">Kerberos Realms</a>.',
options : []
},
{
type : 'select',
name : 'ad_kerberos_principal',
placeholder : 'Kerberos Principal',
tooltip : 'Browse to the location of the keytab created using the\
instructions in <a\
href="http://doc.freenas.org/11/directoryservice.html#kerberos-keytabs"\
target="_blank">Kerberos Keytabs</a>.',
options : []
},
{
type : 'input',
name : 'ad_timeout',
placeholder : 'AD Timeout',
tooltip : 'In seconds, increase if the AD service does not start\
after connecting to the domain.',
},
{
type : 'input',
name : 'ad_dns_timeout',
placeholder : 'DNS Timeout',
tooltip : 'In seconds, increase if AD DNS queries timeout.',
},
{
type : 'select',
name : 'ad_idmap_backend',
placeholder : 'Idmap backend',
tooltip : 'Select the backend to use to map Windows security\
identifiers (SIDs) to UNIX UIDs and GIDs. Click the <b>Edit</b> link\
to configure the editable options of that backend.',
options : []
},
{
type : 'select',
name : 'ad_nss_info',
placeholder : 'Winbind NSS Info',
tooltip : 'Defines the schema to use when querying AD for\
user/group info. <i>rfc2307</i> uses the RFC2307 schema support\
included in Windows 2003 R2, <i>sfu20</i> is for Services For Unix 3.0\
or 3.5, and <i>sfu</i> is for Services For Unix 2.0.',
options : []
},
{
type : 'select',
name : 'ad_ldap_sasl_wrapping',
placeholder : 'SASL wrapping',
tooltip : 'Defines how LDAP traffic is transmitted. Choices are\
<i>plain</i> (plain text), <i>sign</i> (signed only), or <i>seal</i>\
(signed and encrypted). Windows 2000 SP3 and higher can be configured\
to enforce signed LDAP connections.',
options : []
},
{
type : 'checkbox',
name : 'ad_enable',
placeholder : 'Enable',
tooltip : 'Enable the Active Directory service.',
},
{
type : 'input',
name : 'ad_netbiosname_a',
placeholder : 'Netbios Name',
tooltip : 'Limited to 15 characters. Automatically populated with\
the original host name of the system. It <b>must</b> be different from\
the <i>Workgroup</i> name.',
},
{
type : 'input',
name : 'ad_netbiosalias',
placeholder : 'NetBIOS alias',
tooltip : 'Limited to 15 characters.',
}
];
protected advanced_field: Array<any> = [
'ad_ssl',
'ad_certificate',
'ad_verbose_logging',
'ad_unix_extensions',
'ad_allow_trusted_doms',
'ad_use_default_domain',
'ad_allow_dns_updates',
'ad_disable_freenas_cache',
'ad_userdn',
'ad_groupdn',
'ad_site',
'ad_dcname',
'ad_gcname',
'ad_kerberos_realm',
'ad_kerberos_principal',
'ad_timeout',
'ad_dns_timeout',
'ad_idmap_backend',
'ad_nss_info',
'ad_ldap_sasl_wrapping',
'ad_netbiosname_a',
'ad_netbiosalias',
];
isCustActionVisible(actionname: string) {
if (actionname === 'advanced_mode' && this.isBasicMode === false) {
return false;
} else if (actionname === 'basic_mode' && this.isBasicMode === true) {
return false;
} else if (actionname === 'edit_idmap' && this.isBasicMode === true) {
return false;
}
return true;
}
constructor(protected router: Router, protected route: ActivatedRoute,
protected rest: RestService, protected ws: WebSocketService,
protected _injector: Injector, protected _appRef: ApplicationRef,
protected systemGeneralService: SystemGeneralService,
private dialogservice: DialogService) {}
afterInit(entityEdit: any) {
this.rest.get("directoryservice/kerberosrealm", {}).subscribe((res) => {
this.ad_kerberos_realm = _.find(this.fieldConfig, {name : 'ad_kerberos_realm'});
res.data.forEach((item) => {
this.ad_kerberos_realm.options.push(
{label : item.krb_realm, value : item.id});
});
});
this.rest.get("directoryservice/kerberosprincipal", {}).subscribe((res) => {
this.ad_kerberos_principal = _.find(this.fieldConfig, {name : 'ad_kerberos_principal'});
res.data.forEach((item) => {
this.ad_kerberos_principal.options.push(
{label : item.principal_name, value : item.id});
});
});
this.systemGeneralService.getCA().subscribe((res) => { | this.ad_certificate.options.push(
{label : item.name, value : item.id});
});
});
this.ws.call('notifier.choices', ['LDAP_SSL_CHOICES']).subscribe((res) => {
this.ad_ssl = _.find(this.fieldConfig, {name : 'ad_ssl'});
res.forEach((item) => {
this.ad_ssl.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['IDMAP_CHOICES']).subscribe((res) => {
this.ad_idmap_backend = _.find(this.fieldConfig, {name : 'ad_idmap_backend'});
res.forEach((item) => {
this.ad_idmap_backend.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['NSS_INFO_CHOICES']).subscribe((res) => {
this.ad_nss_info = _.find(this.fieldConfig, {name : 'ad_nss_info'});
res.forEach((item) => {
this.ad_nss_info.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['LDAP_SASL_WRAPPING_CHOICES']).subscribe((res) => {
this.ad_ldap_sasl_wrapping = _.find(this.fieldConfig, {name : 'ad_ldap_sasl_wrapping'});
res.forEach((item) => {
this.ad_ldap_sasl_wrapping.options.push(
{label : item[1], value : item[0]});
});
});
entityEdit.formGroup.controls['ad_idmap_backend'].valueChanges.subscribe((res)=> {
this.idmapBacked = res;
})
}
} | this.ad_certificate = _.find(this.fieldConfig, {name : 'ad_certificate'});
res.forEach((item) => { | random_line_split |
activedirectory.component.ts | import {ApplicationRef, Component, Injector, OnInit} from '@angular/core';
import {ActivatedRoute, Router, RouterModule} from '@angular/router';
import * as _ from 'lodash';
import {Subscription} from 'rxjs/Subscription';
import {RestService, SystemGeneralService, WebSocketService} from '../../../services/';
import {FieldConfig} from '../../common/entity/entity-form/models/field-config.interface';
import { DialogService } from '../../../services/';
import { Validators } from '@angular/forms';
@Component({
selector : 'app-activedirectory',
template : '<entity-form [conf]="this"></entity-form>',
})
export class ActiveDirectoryComponent {
protected resource_name = 'directoryservice/activedirectory';
protected isBasicMode = true;
protected idmapBacked: any;
protected ad_certificate: any;
protected ad_kerberos_realm: any;
protected ad_kerberos_principal: any;
protected ad_ssl: any;
protected ad_idmap_backend: any;
protected ad_nss_info: any;
protected ad_ldap_sasl_wrapping: any;
public custActions: Array<any> = [
{
'id' : 'basic_mode',
'name' : 'Basic Mode',
function : () => { this.isBasicMode = !this.isBasicMode; }
},
{
'id' : 'advanced_mode',
'name' : 'Advanced Mode',
function : () => { this.isBasicMode = !this.isBasicMode; }
},
{
'id' : 'edit_idmap',
'name' : 'Edit Idmap',
function : () => {
this.router.navigate(new Array('').concat(['directoryservice','idmap', this.idmapBacked, 'activedirectory']));
}
},
{
'id' : 'ds_clearcache',
'name' : 'Rebuild Directory Service Cache',
function : async () => {
this.ws.call('notifier.ds_clearcache').subscribe((cache_status)=>{
this.dialogservice.Info("Active Directory", "The cache is being rebuilt.");
})
}
}
];
public fieldConfig: FieldConfig[] = [
{
type : 'input',
name : 'ad_domainname',
placeholder : 'Domain Name',
tooltip : 'Name of Active Directory domain (<i>example.com</i>)\
or child domain (<i>sales.example.com</i>). This setting is mandatory\
and the GUI refuses to save the settings if the domain controller\
for the specified domain cannot be found.',
required: true,
validation : [ Validators.required ]
},
{
type : 'input',
name : 'ad_bindname',
placeholder : 'Domain Account Name',
tooltip : 'Name of the Active Directory administrator account.\
This setting is mandatory and the GUI refuses to save the settings\
if it cannot connect to the domain controller using this account name.',
},
{
type : 'input',
name : 'ad_bindpw',
placeholder : 'Domain Account Password',
tooltip : 'Password for the Active Directory administrator\
account. This setting is mandatory and the GUI refuses to save the\
settings if it cannot connect to the domain controller using this\
password.',
inputType : 'password'
},
{
type : 'input',
name : 'ad_monitor_frequency',
placeholder : 'AD check connectivity frequency (seconds)',
tooltip : 'How often to verify that Active Directory services are\
active.',
},
{
type : 'input',
name : 'ad_recover_retry',
placeholder : 'How many recovery attempts',
tooltip : 'Number of times to attempt reconnecting to the Active\
directory server. Tries forever when set to <i>0</i>.',
},
{
type : 'checkbox',
name : 'ad_enable_monitor',
placeholder : 'Enable AD Monitoring',
tooltip : 'Restart Active Directory automatically if the service\
is disconnected.',
},
{
type : 'select',
name : 'ad_ssl',
placeholder : 'Encryption Mode',
tooltip : 'Choices are <i>Off, SSL</i> or <i>TLS</i>.',
options : []
},
{
type : 'select',
name : 'ad_certificate',
placeholder : 'Certificate',
tooltip : 'Select the certificate of the Active Directory server\
if SSL connections are used. If a certificate does not exist yet,\
create a <a href="http://doc.freenas.org/11/system.html#cas"\
target="_blank">CA</a>, then create a certificate on the Active\
Directory server and import it to the FreeNAS system with\
<a href="http://doc.freenas.org/11/system.html#certificates"\
target="_blank">Certificates</a>.',
options : []
},
{
type : 'checkbox',
name : 'ad_verbose_logging',
placeholder : 'Verbose logging',
tooltip : 'When checked, logs attempts to join the domain to\
<b>/var/log/messages</b>.',
},
{
type : 'checkbox',
name : 'ad_unix_extensions',
placeholder : 'UNIX extensions',
tooltip : '<b>Only</b> check this box if the AD server has been\
explicitly configured to map permissions for UNIX users. Checking this\
box provides persistent UIDs and GUIDs, otherwise, users and groups\
are mapped to the UID or GUID range configured in Samba.',
},
{
type : 'checkbox',
name : 'ad_allow_trusted_doms',
placeholder : 'Allow Trusted Domains',
tooltip : 'Should only be enabled if network has active <a\
href="https://technet.microsoft.com/en-us/library/cc757352(WS.10).aspx"\
target="_blank">domain/forest trusts</a> and you need to manage files\
on multiple domains. use with caution as it will generate more\
winbind traffic, slowing down the ability to filter through\
user/group info.',
},
{
type : 'checkbox',
name : 'ad_use_default_domain',
placeholder : 'Use Default Domain',
tooltip : 'When unchecked, the domain name is prepended to the\
username. If <b>Allow Trusted Domains</b> is checked and multiple\
domains use the same usernames, uncheck this box to prevent name\
collisions.',
},
{
type : 'checkbox',
name : 'ad_allow_dns_updates',
placeholder : 'Allow DNS updates',
tooltip : 'When unchecked, disables Samba from doing DNS updates\
when joining a domain.',
},
{
type : 'checkbox',
name : 'ad_disable_freenas_cache',
placeholder : 'Disable FreeNAS Cache',
tooltip : 'When checked, disables caching AD users and gorups.\
Useful if you cannot bind to a domain with a large number of users or\
groups.',
},
{
type : 'input',
name : 'ad_userdn',
placeholder : 'User Base',
tooltip : 'Distinguished name (DN) of the user container in\
Active Directory.',
},
{
type : 'input',
name : 'ad_groupdn',
placeholder : 'Group Base',
tooltip : 'Distinguished name (DN) of the gorup container in\
Active Directory.',
},
{
type : 'input',
name : 'ad_site',
placeholder : 'Site Name',
tooltip : 'The relative distinguished name of the site object in\
Active Directory.',
},
{
type : 'input',
name : 'ad_dcname',
placeholder : 'Domain Controller',
tooltip : 'Will automatically be added to the SRV record for the\
domain and, when multiple controllers are specified, FreeNAS selects\
the closest DC which responds. Use a short form of the FQDN like\
<b>exampleserver</b>.',
},
{
type : 'input',
name : 'ad_gcname',
placeholder : 'Global Catalog Server',
tooltip : 'If the hostname of the global catalog server to use is\
specified, make sure it is resolvable.',
},
{
type : 'select',
name : 'ad_kerberos_realm',
placeholder : 'Kerberos Realm',
tooltip : 'Select the realm created using the instructions in <a\
href="http://doc.freenas.org/11/directoryservice.html#kerberos-realms"\
target="_blank">Kerberos Realms</a>.',
options : []
},
{
type : 'select',
name : 'ad_kerberos_principal',
placeholder : 'Kerberos Principal',
tooltip : 'Browse to the location of the keytab created using the\
instructions in <a\
href="http://doc.freenas.org/11/directoryservice.html#kerberos-keytabs"\
target="_blank">Kerberos Keytabs</a>.',
options : []
},
{
type : 'input',
name : 'ad_timeout',
placeholder : 'AD Timeout',
tooltip : 'In seconds, increase if the AD service does not start\
after connecting to the domain.',
},
{
type : 'input',
name : 'ad_dns_timeout',
placeholder : 'DNS Timeout',
tooltip : 'In seconds, increase if AD DNS queries timeout.',
},
{
type : 'select',
name : 'ad_idmap_backend',
placeholder : 'Idmap backend',
tooltip : 'Select the backend to use to map Windows security\
identifiers (SIDs) to UNIX UIDs and GIDs. Click the <b>Edit</b> link\
to configure the editable options of that backend.',
options : []
},
{
type : 'select',
name : 'ad_nss_info',
placeholder : 'Winbind NSS Info',
tooltip : 'Defines the schema to use when querying AD for\
user/group info. <i>rfc2307</i> uses the RFC2307 schema support\
included in Windows 2003 R2, <i>sfu20</i> is for Services For Unix 3.0\
or 3.5, and <i>sfu</i> is for Services For Unix 2.0.',
options : []
},
{
type : 'select',
name : 'ad_ldap_sasl_wrapping',
placeholder : 'SASL wrapping',
tooltip : 'Defines how LDAP traffic is transmitted. Choices are\
<i>plain</i> (plain text), <i>sign</i> (signed only), or <i>seal</i>\
(signed and encrypted). Windows 2000 SP3 and higher can be configured\
to enforce signed LDAP connections.',
options : []
},
{
type : 'checkbox',
name : 'ad_enable',
placeholder : 'Enable',
tooltip : 'Enable the Active Directory service.',
},
{
type : 'input',
name : 'ad_netbiosname_a',
placeholder : 'Netbios Name',
tooltip : 'Limited to 15 characters. Automatically populated with\
the original host name of the system. It <b>must</b> be different from\
the <i>Workgroup</i> name.',
},
{
type : 'input',
name : 'ad_netbiosalias',
placeholder : 'NetBIOS alias',
tooltip : 'Limited to 15 characters.',
}
];
protected advanced_field: Array<any> = [
'ad_ssl',
'ad_certificate',
'ad_verbose_logging',
'ad_unix_extensions',
'ad_allow_trusted_doms',
'ad_use_default_domain',
'ad_allow_dns_updates',
'ad_disable_freenas_cache',
'ad_userdn',
'ad_groupdn',
'ad_site',
'ad_dcname',
'ad_gcname',
'ad_kerberos_realm',
'ad_kerberos_principal',
'ad_timeout',
'ad_dns_timeout',
'ad_idmap_backend',
'ad_nss_info',
'ad_ldap_sasl_wrapping',
'ad_netbiosname_a',
'ad_netbiosalias',
];
| (actionname: string) {
if (actionname === 'advanced_mode' && this.isBasicMode === false) {
return false;
} else if (actionname === 'basic_mode' && this.isBasicMode === true) {
return false;
} else if (actionname === 'edit_idmap' && this.isBasicMode === true) {
return false;
}
return true;
}
constructor(protected router: Router, protected route: ActivatedRoute,
protected rest: RestService, protected ws: WebSocketService,
protected _injector: Injector, protected _appRef: ApplicationRef,
protected systemGeneralService: SystemGeneralService,
private dialogservice: DialogService) {}
afterInit(entityEdit: any) {
this.rest.get("directoryservice/kerberosrealm", {}).subscribe((res) => {
this.ad_kerberos_realm = _.find(this.fieldConfig, {name : 'ad_kerberos_realm'});
res.data.forEach((item) => {
this.ad_kerberos_realm.options.push(
{label : item.krb_realm, value : item.id});
});
});
this.rest.get("directoryservice/kerberosprincipal", {}).subscribe((res) => {
this.ad_kerberos_principal = _.find(this.fieldConfig, {name : 'ad_kerberos_principal'});
res.data.forEach((item) => {
this.ad_kerberos_principal.options.push(
{label : item.principal_name, value : item.id});
});
});
this.systemGeneralService.getCA().subscribe((res) => {
this.ad_certificate = _.find(this.fieldConfig, {name : 'ad_certificate'});
res.forEach((item) => {
this.ad_certificate.options.push(
{label : item.name, value : item.id});
});
});
this.ws.call('notifier.choices', ['LDAP_SSL_CHOICES']).subscribe((res) => {
this.ad_ssl = _.find(this.fieldConfig, {name : 'ad_ssl'});
res.forEach((item) => {
this.ad_ssl.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['IDMAP_CHOICES']).subscribe((res) => {
this.ad_idmap_backend = _.find(this.fieldConfig, {name : 'ad_idmap_backend'});
res.forEach((item) => {
this.ad_idmap_backend.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['NSS_INFO_CHOICES']).subscribe((res) => {
this.ad_nss_info = _.find(this.fieldConfig, {name : 'ad_nss_info'});
res.forEach((item) => {
this.ad_nss_info.options.push(
{label : item[1], value : item[0]});
});
});
this.ws.call('notifier.choices', ['LDAP_SASL_WRAPPING_CHOICES']).subscribe((res) => {
this.ad_ldap_sasl_wrapping = _.find(this.fieldConfig, {name : 'ad_ldap_sasl_wrapping'});
res.forEach((item) => {
this.ad_ldap_sasl_wrapping.options.push(
{label : item[1], value : item[0]});
});
});
entityEdit.formGroup.controls['ad_idmap_backend'].valueChanges.subscribe((res)=> {
this.idmapBacked = res;
})
}
}
| isCustActionVisible | identifier_name |
parser.rs | use crate::error::Error;
use crate::resolve_import::resolve_import;
use std::sync::Arc;
use std::sync::Mutex;
use swc_common::comments::SingleThreadedComments;
use swc_common::errors::Diagnostic;
use swc_common::errors::DiagnosticBuilder;
use swc_common::errors::Emitter;
use swc_common::errors::Handler;
use swc_common::errors::HandlerFlags;
use swc_common::input::StringInput;
use swc_common::FileName;
use swc_common::SourceMap;
use swc_ecmascript::ast::Program;
use swc_ecmascript::dep_graph::analyze_dependencies;
use swc_ecmascript::dep_graph::DependencyKind;
use swc_ecmascript::parser::lexer::Lexer;
use swc_ecmascript::parser::EsConfig;
use swc_ecmascript::parser::JscTarget;
use swc_ecmascript::parser::Parser;
use swc_ecmascript::parser::Syntax;
use swc_ecmascript::parser::TsConfig;
use url::Url;
// Returns (deps, transpiled source code)
pub fn get_deps_and_transpile(
url: &Url,
source: &str,
content_type: &Option<String>,
) -> Result<(Vec<Url>, Option<String>), Error> {
let comments = SingleThreadedComments::default();
let source_map = SourceMap::default();
let source_file = source_map
.new_source_file(FileName::Custom(url.to_string()), source.to_string());
let input = StringInput::from(&*source_file);
let syntax = get_syntax(url, content_type);
let lexer = Lexer::new(syntax, JscTarget::Es2020, input, Some(&comments));
let mut parser = Parser::new_from(lexer);
let module = parser
.parse_module()
.map_err(|e| ParseError::new(e, &source_map))?;
let mut deps = Vec::new();
for import in analyze_dependencies(&module, &source_map, &comments) {
if (import.kind == DependencyKind::Import
|| import.kind == DependencyKind::Export)
&& import.is_dynamic == false
{
let specifier = import.specifier.to_string();
deps.push(resolve_import(&specifier, url.as_str())?);
}
}
// If the file is not jsx, ts, or tsx we do not need to transform it. In that
// case source == transformed.
if !syntax.jsx() && !syntax.typescript() {
return Ok((deps, None));
}
use swc_ecmascript::transforms::react;
let program = Program::Module(module);
let options = EmitOptions::default();
let source_map = std::rc::Rc::new(source_map);
let jsx_pass = react::react(
source_map.clone(),
Some(&comments),
react::Options {
pragma: options.jsx_factory.clone(),
pragma_frag: options.jsx_fragment_factory.clone(),
// this will use `Object.assign()` instead of the `_extends` helper
// when spreading props.
use_builtins: true,
..Default::default()
},
);
use swc_common::chain;
use swc_common::Globals;
use swc_ecmascript::transforms::fixer;
use swc_ecmascript::transforms::helpers;
use swc_ecmascript::transforms::pass::Optional;
use swc_ecmascript::transforms::proposals;
use swc_ecmascript::transforms::typescript;
use swc_ecmascript::visit::FoldWith;
let mut passes = chain!(
Optional::new(jsx_pass, options.transform_jsx),
proposals::decorators::decorators(proposals::decorators::Config {
legacy: true,
emit_metadata: options.emit_metadata
}),
helpers::inject_helpers(),
typescript::strip(),
fixer(Some(&comments)),
);
let program = swc_common::GLOBALS.set(&Globals::new(), || {
helpers::HELPERS.set(&helpers::Helpers::new(false), || {
program.fold_with(&mut passes)
})
});
use swc_ecmascript::codegen::text_writer::JsWriter;
use swc_ecmascript::codegen::Node;
let mut src_map_buf = vec![];
let mut buf = vec![];
{
let writer = Box::new(JsWriter::new(
source_map.clone(),
"\n",
&mut buf,
Some(&mut src_map_buf),
));
let config = swc_ecmascript::codegen::Config { minify: false };
let mut emitter = swc_ecmascript::codegen::Emitter {
cfg: config,
comments: Some(&comments),
cm: source_map.clone(),
wr: writer,
};
program
.emit_with(&mut emitter)
.map_err(|err| Error::Other(Box::new(err)))?;
}
let mut src =
String::from_utf8(buf).map_err(|err| Error::Other(Box::new(err)))?;
{
let mut buf = Vec::new();
source_map
.build_source_map_from(&mut src_map_buf, None)
.to_writer(&mut buf)
.map_err(|err| Error::Other(Box::new(err)))?;
src.push_str("//# sourceMappingURL=data:application/json;base64,");
let encoded_map = base64::encode(buf);
src.push_str(&encoded_map);
}
Ok((deps, Some(src)))
}
fn get_syntax(url: &Url, maybe_content_type: &Option<String>) -> Syntax {
fn get_es_config(jsx: bool) -> EsConfig {
EsConfig {
class_private_methods: true,
class_private_props: true,
class_props: true,
dynamic_import: true,
export_default_from: true,
export_namespace_from: true,
import_meta: true,
jsx,
nullish_coalescing: true,
num_sep: true,
optional_chaining: true,
top_level_await: true,
..EsConfig::default()
}
}
fn get_ts_config(tsx: bool, dts: bool) -> TsConfig {
TsConfig {
decorators: true,
dts,
dynamic_import: true,
tsx,
..TsConfig::default()
}
}
let maybe_extension = if let Some(content_type) = maybe_content_type {
match content_type
.split(";")
.next()
.unwrap()
.trim()
.to_lowercase()
.as_ref()
{
"application/typescript"
| "text/typescript"
| "video/vnd.dlna.mpeg-tts"
| "video/mp2t"
| "application/x-typescript" => Some("ts"),
"application/javascript"
| "text/javascript"
| "application/ecmascript"
| "text/ecmascript"
| "application/x-javascript"
| "application/node" => Some("js"),
"text/jsx" => Some("jsx"),
"text/tsx" => Some("tsx"),
_ => None,
}
} else {
None
};
let extension = if maybe_extension.is_some() {
maybe_extension
} else {
let parts: Vec<&str> = url.as_str().split('.').collect();
parts.last().copied()
};
match extension {
Some("js") => Syntax::Es(get_es_config(false)),
Some("jsx") => Syntax::Es(get_es_config(true)),
Some("ts") => Syntax::Typescript(get_ts_config(false, false)),
Some("tsx") => Syntax::Typescript(get_ts_config(true, false)),
_ => Syntax::Typescript(get_ts_config(false, false)),
}
}
pub struct ParseError {
lines: Vec<String>,
}
impl ParseError {
fn new(
err: swc_ecmascript::parser::error::Error,
source_map: &SourceMap,
) -> Self {
let error_buffer = ErrorBuffer::default();
let handler = Handler::with_emitter_and_flags(
Box::new(error_buffer.clone()),
HandlerFlags {
can_emit_warnings: true,
dont_buffer_diagnostics: true,
..HandlerFlags::default()
},
);
let mut diagnostic = err.into_diagnostic(&handler);
diagnostic.emit();
let v = error_buffer.0.lock().unwrap();
let lines = v
.iter()
.map(|d| {
if let Some(span) = d.span.primary_span() {
let loc = source_map.lookup_char_pos(span.lo);
let file_name = match &loc.file.name {
FileName::Custom(n) => n,
_ => unreachable!(),
};
format!(
"{} at {}:{}:{}",
d.message(),
file_name,
loc.line,
loc.col_display
)
} else {
d.message()
}
})
.collect::<Vec<_>>();
Self { lines }
}
}
impl std::error::Error for ParseError {}
impl std::fmt::Display for ParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for line in &self.lines {
writeln!(f, "{}", line)?;
}
Ok(())
}
}
impl std::fmt::Debug for ParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
/// A buffer for collecting errors from the AST parser.
#[derive(Debug, Clone, Default)]
pub struct ErrorBuffer(Arc<Mutex<Vec<Diagnostic>>>);
impl Emitter for ErrorBuffer {
fn emit(&mut self, db: &DiagnosticBuilder) {
self.0.lock().unwrap().push((**db).clone());
}
}
/// Options which can be adjusted when transpiling a module.
#[derive(Debug, Clone)]
pub struct EmitOptions {
/// Indicate if JavaScript is being checked/transformed as well, or if it is
/// only TypeScript.
pub check_js: bool,
/// When emitting a legacy decorator, also emit experimental decorator meta
/// data. Defaults to `false`.
pub emit_metadata: bool,
/// Should the source map be inlined in the emitted code file, or provided
/// as a separate file. Defaults to `true`.
pub inline_source_map: bool,
/// When transforming JSX, what value should be used for the JSX factory.
/// Defaults to `React.createElement`.
pub jsx_factory: String,
/// When transforming JSX, what value should be used for the JSX fragment
/// factory. Defaults to `React.Fragment`.
pub jsx_fragment_factory: String,
/// Should JSX be transformed or preserved. Defaults to `true`.
pub transform_jsx: bool,
}
impl Default for EmitOptions {
fn default() -> Self {
EmitOptions {
check_js: false,
emit_metadata: false,
inline_source_map: true,
jsx_factory: "h".into(),
jsx_fragment_factory: "Fragment".into(),
transform_jsx: true,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_syntax() {
// Prefer content-type over extension.
let url = Url::parse("https://deno.land/x/foo@0.1.0/bar.js").unwrap();
let content_type = Some("text/jsx".to_string());
let syntax = get_syntax(&url, &content_type);
assert!(syntax.jsx());
assert!(!syntax.typescript());
// Fallback to extension if content-type is unsupported.
let url = Url::parse("https://deno.land/x/foo@0.1.0/bar.tsx").unwrap();
let content_type = Some("text/unsupported".to_string());
let syntax = get_syntax(&url, &content_type);
assert!(syntax.jsx());
assert!(syntax.typescript());
}
#[test]
fn jsx() {
let url = Url::parse(
"https://deno.land/x/dext@0.10.3/example/pages/dynamic/%5Bname%5D.tsx",
)
.unwrap();
let source = r#"
import { Fragment, h } from "../../deps.ts";
import type { PageProps } from "../../deps.ts";
function UserPage(props: PageProps) {
const name = props.route?.name ?? "";
return (
<>
<h1>This is the page for {name}</h1>
<p> <a href="/">Go home</a> </p>
</>
);
}
export default UserPage;
"#;
let (deps, _transpiled) =
get_deps_and_transpile(&url, source, &None).unwrap();
assert_eq!(deps.len(), 1);
}
#[test]
#[ignore]
fn | () {
let url = Url::parse("https://deno.land/x/oak@v6.4.2/router.ts").unwrap();
let source = r#"
delete<P extends RouteParams = RP, S extends State = RS>(
name: string,
path: string,
...middleware: RouterMiddleware<P, S>[]
): Router<P extends RP ? P : (P & RP), S extends RS ? S : (S & RS)>;
"#;
let (deps, _transpiled) =
get_deps_and_transpile(&url, source, &None).unwrap();
assert_eq!(deps.len(), 0);
}
#[test]
#[ignore]
fn dynamic_import() {
let url = Url::parse("https://deno.land/x/oak@v6.4.2/router.ts").unwrap();
let source = r#"
await import("fs");
await import("https://deno.land/std/version.ts");
"#;
let (deps, _transpiled) =
get_deps_and_transpile(&url, source, &None).unwrap();
assert_eq!(deps.len(), 0);
}
}
| complex_types | identifier_name |
parser.rs | use crate::error::Error;
use crate::resolve_import::resolve_import;
use std::sync::Arc;
use std::sync::Mutex;
use swc_common::comments::SingleThreadedComments;
use swc_common::errors::Diagnostic;
use swc_common::errors::DiagnosticBuilder;
use swc_common::errors::Emitter;
use swc_common::errors::Handler;
use swc_common::errors::HandlerFlags;
use swc_common::input::StringInput;
use swc_common::FileName;
use swc_common::SourceMap;
use swc_ecmascript::ast::Program;
use swc_ecmascript::dep_graph::analyze_dependencies;
use swc_ecmascript::dep_graph::DependencyKind;
use swc_ecmascript::parser::lexer::Lexer;
use swc_ecmascript::parser::EsConfig;
use swc_ecmascript::parser::JscTarget;
use swc_ecmascript::parser::Parser;
use swc_ecmascript::parser::Syntax;
use swc_ecmascript::parser::TsConfig;
use url::Url;
// Returns (deps, transpiled source code)
pub fn get_deps_and_transpile(
url: &Url,
source: &str,
content_type: &Option<String>,
) -> Result<(Vec<Url>, Option<String>), Error> {
let comments = SingleThreadedComments::default();
let source_map = SourceMap::default();
let source_file = source_map
.new_source_file(FileName::Custom(url.to_string()), source.to_string());
let input = StringInput::from(&*source_file);
let syntax = get_syntax(url, content_type);
let lexer = Lexer::new(syntax, JscTarget::Es2020, input, Some(&comments));
let mut parser = Parser::new_from(lexer);
let module = parser
.parse_module()
.map_err(|e| ParseError::new(e, &source_map))?;
let mut deps = Vec::new();
for import in analyze_dependencies(&module, &source_map, &comments) {
if (import.kind == DependencyKind::Import
|| import.kind == DependencyKind::Export)
&& import.is_dynamic == false
{
let specifier = import.specifier.to_string();
deps.push(resolve_import(&specifier, url.as_str())?);
}
}
// If the file is not jsx, ts, or tsx we do not need to transform it. In that
// case source == transformed.
if !syntax.jsx() && !syntax.typescript() {
return Ok((deps, None));
}
use swc_ecmascript::transforms::react;
let program = Program::Module(module);
let options = EmitOptions::default();
let source_map = std::rc::Rc::new(source_map);
let jsx_pass = react::react(
source_map.clone(),
Some(&comments),
react::Options {
pragma: options.jsx_factory.clone(),
pragma_frag: options.jsx_fragment_factory.clone(),
// this will use `Object.assign()` instead of the `_extends` helper
// when spreading props.
use_builtins: true,
..Default::default()
},
);
use swc_common::chain;
use swc_common::Globals;
use swc_ecmascript::transforms::fixer;
use swc_ecmascript::transforms::helpers;
use swc_ecmascript::transforms::pass::Optional;
use swc_ecmascript::transforms::proposals;
use swc_ecmascript::transforms::typescript;
use swc_ecmascript::visit::FoldWith;
let mut passes = chain!(
Optional::new(jsx_pass, options.transform_jsx),
proposals::decorators::decorators(proposals::decorators::Config {
legacy: true,
emit_metadata: options.emit_metadata
}),
helpers::inject_helpers(),
typescript::strip(),
fixer(Some(&comments)),
);
let program = swc_common::GLOBALS.set(&Globals::new(), || {
helpers::HELPERS.set(&helpers::Helpers::new(false), || {
program.fold_with(&mut passes)
})
});
use swc_ecmascript::codegen::text_writer::JsWriter;
use swc_ecmascript::codegen::Node;
let mut src_map_buf = vec![];
let mut buf = vec![];
{
let writer = Box::new(JsWriter::new(
source_map.clone(),
"\n",
&mut buf,
Some(&mut src_map_buf),
));
let config = swc_ecmascript::codegen::Config { minify: false };
let mut emitter = swc_ecmascript::codegen::Emitter {
cfg: config,
comments: Some(&comments),
cm: source_map.clone(),
wr: writer,
};
program
.emit_with(&mut emitter)
.map_err(|err| Error::Other(Box::new(err)))?;
}
let mut src =
String::from_utf8(buf).map_err(|err| Error::Other(Box::new(err)))?;
{
let mut buf = Vec::new();
source_map
.build_source_map_from(&mut src_map_buf, None)
.to_writer(&mut buf)
.map_err(|err| Error::Other(Box::new(err)))?;
src.push_str("//# sourceMappingURL=data:application/json;base64,");
let encoded_map = base64::encode(buf);
src.push_str(&encoded_map);
}
Ok((deps, Some(src)))
}
fn get_syntax(url: &Url, maybe_content_type: &Option<String>) -> Syntax {
fn get_es_config(jsx: bool) -> EsConfig {
EsConfig {
class_private_methods: true,
class_private_props: true,
class_props: true,
dynamic_import: true,
export_default_from: true,
export_namespace_from: true,
import_meta: true,
jsx,
nullish_coalescing: true,
num_sep: true,
optional_chaining: true,
top_level_await: true,
..EsConfig::default()
}
}
fn get_ts_config(tsx: bool, dts: bool) -> TsConfig {
TsConfig {
decorators: true,
dts,
dynamic_import: true,
tsx,
..TsConfig::default()
}
}
let maybe_extension = if let Some(content_type) = maybe_content_type {
match content_type
.split(";")
.next()
.unwrap()
.trim()
.to_lowercase()
.as_ref()
{
"application/typescript"
| "text/typescript"
| "video/vnd.dlna.mpeg-tts"
| "video/mp2t"
| "application/x-typescript" => Some("ts"),
"application/javascript"
| "text/javascript"
| "application/ecmascript"
| "text/ecmascript"
| "application/x-javascript"
| "application/node" => Some("js"),
"text/jsx" => Some("jsx"),
"text/tsx" => Some("tsx"),
_ => None,
}
} else {
None
};
let extension = if maybe_extension.is_some() {
maybe_extension
} else {
let parts: Vec<&str> = url.as_str().split('.').collect();
parts.last().copied()
};
match extension {
Some("js") => Syntax::Es(get_es_config(false)),
Some("jsx") => Syntax::Es(get_es_config(true)),
Some("ts") => Syntax::Typescript(get_ts_config(false, false)),
Some("tsx") => Syntax::Typescript(get_ts_config(true, false)),
_ => Syntax::Typescript(get_ts_config(false, false)),
}
}
pub struct ParseError {
lines: Vec<String>,
}
impl ParseError {
fn new(
err: swc_ecmascript::parser::error::Error,
source_map: &SourceMap,
) -> Self {
let error_buffer = ErrorBuffer::default();
let handler = Handler::with_emitter_and_flags(
Box::new(error_buffer.clone()),
HandlerFlags {
can_emit_warnings: true,
dont_buffer_diagnostics: true,
..HandlerFlags::default()
},
);
let mut diagnostic = err.into_diagnostic(&handler);
diagnostic.emit();
let v = error_buffer.0.lock().unwrap();
let lines = v
.iter()
.map(|d| {
if let Some(span) = d.span.primary_span() {
let loc = source_map.lookup_char_pos(span.lo);
let file_name = match &loc.file.name {
FileName::Custom(n) => n,
_ => unreachable!(),
};
format!(
"{} at {}:{}:{}",
d.message(),
file_name,
loc.line,
loc.col_display
)
} else {
d.message()
}
})
.collect::<Vec<_>>();
Self { lines }
}
}
impl std::error::Error for ParseError {}
impl std::fmt::Display for ParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for line in &self.lines {
writeln!(f, "{}", line)?;
}
Ok(())
}
}
impl std::fmt::Debug for ParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
/// A buffer for collecting errors from the AST parser.
#[derive(Debug, Clone, Default)]
pub struct ErrorBuffer(Arc<Mutex<Vec<Diagnostic>>>);
impl Emitter for ErrorBuffer {
fn emit(&mut self, db: &DiagnosticBuilder) {
self.0.lock().unwrap().push((**db).clone());
}
}
/// Options which can be adjusted when transpiling a module.
#[derive(Debug, Clone)]
pub struct EmitOptions {
/// Indicate if JavaScript is being checked/transformed as well, or if it is
/// only TypeScript.
pub check_js: bool,
/// When emitting a legacy decorator, also emit experimental decorator meta
/// data. Defaults to `false`.
pub emit_metadata: bool,
/// Should the source map be inlined in the emitted code file, or provided
/// as a separate file. Defaults to `true`.
pub inline_source_map: bool,
/// When transforming JSX, what value should be used for the JSX factory.
/// Defaults to `React.createElement`.
pub jsx_factory: String,
/// When transforming JSX, what value should be used for the JSX fragment
/// factory. Defaults to `React.Fragment`.
pub jsx_fragment_factory: String,
/// Should JSX be transformed or preserved. Defaults to `true`.
pub transform_jsx: bool,
}
impl Default for EmitOptions {
fn default() -> Self {
EmitOptions {
check_js: false,
emit_metadata: false,
inline_source_map: true,
jsx_factory: "h".into(),
jsx_fragment_factory: "Fragment".into(),
transform_jsx: true,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_syntax() |
#[test]
fn jsx() {
let url = Url::parse(
"https://deno.land/x/dext@0.10.3/example/pages/dynamic/%5Bname%5D.tsx",
)
.unwrap();
let source = r#"
import { Fragment, h } from "../../deps.ts";
import type { PageProps } from "../../deps.ts";
function UserPage(props: PageProps) {
const name = props.route?.name ?? "";
return (
<>
<h1>This is the page for {name}</h1>
<p> <a href="/">Go home</a> </p>
</>
);
}
export default UserPage;
"#;
let (deps, _transpiled) =
get_deps_and_transpile(&url, source, &None).unwrap();
assert_eq!(deps.len(), 1);
}
#[test]
#[ignore]
fn complex_types() {
let url = Url::parse("https://deno.land/x/oak@v6.4.2/router.ts").unwrap();
let source = r#"
delete<P extends RouteParams = RP, S extends State = RS>(
name: string,
path: string,
...middleware: RouterMiddleware<P, S>[]
): Router<P extends RP ? P : (P & RP), S extends RS ? S : (S & RS)>;
"#;
let (deps, _transpiled) =
get_deps_and_transpile(&url, source, &None).unwrap();
assert_eq!(deps.len(), 0);
}
#[test]
#[ignore]
fn dynamic_import() {
let url = Url::parse("https://deno.land/x/oak@v6.4.2/router.ts").unwrap();
let source = r#"
await import("fs");
await import("https://deno.land/std/version.ts");
"#;
let (deps, _transpiled) =
get_deps_and_transpile(&url, source, &None).unwrap();
assert_eq!(deps.len(), 0);
}
}
| {
// Prefer content-type over extension.
let url = Url::parse("https://deno.land/x/foo@0.1.0/bar.js").unwrap();
let content_type = Some("text/jsx".to_string());
let syntax = get_syntax(&url, &content_type);
assert!(syntax.jsx());
assert!(!syntax.typescript());
// Fallback to extension if content-type is unsupported.
let url = Url::parse("https://deno.land/x/foo@0.1.0/bar.tsx").unwrap();
let content_type = Some("text/unsupported".to_string());
let syntax = get_syntax(&url, &content_type);
assert!(syntax.jsx());
assert!(syntax.typescript());
} | identifier_body |
parser.rs | use crate::error::Error;
use crate::resolve_import::resolve_import;
use std::sync::Arc;
use std::sync::Mutex;
use swc_common::comments::SingleThreadedComments;
use swc_common::errors::Diagnostic;
use swc_common::errors::DiagnosticBuilder;
use swc_common::errors::Emitter;
use swc_common::errors::Handler;
use swc_common::errors::HandlerFlags;
use swc_common::input::StringInput;
use swc_common::FileName;
use swc_common::SourceMap;
use swc_ecmascript::ast::Program;
use swc_ecmascript::dep_graph::analyze_dependencies;
use swc_ecmascript::dep_graph::DependencyKind;
use swc_ecmascript::parser::lexer::Lexer;
use swc_ecmascript::parser::EsConfig;
use swc_ecmascript::parser::JscTarget;
use swc_ecmascript::parser::Parser;
use swc_ecmascript::parser::Syntax;
use swc_ecmascript::parser::TsConfig;
use url::Url;
// Returns (deps, transpiled source code)
pub fn get_deps_and_transpile(
url: &Url,
source: &str,
content_type: &Option<String>,
) -> Result<(Vec<Url>, Option<String>), Error> {
let comments = SingleThreadedComments::default();
let source_map = SourceMap::default();
let source_file = source_map
.new_source_file(FileName::Custom(url.to_string()), source.to_string());
let input = StringInput::from(&*source_file);
let syntax = get_syntax(url, content_type);
let lexer = Lexer::new(syntax, JscTarget::Es2020, input, Some(&comments));
let mut parser = Parser::new_from(lexer);
let module = parser
.parse_module()
.map_err(|e| ParseError::new(e, &source_map))?;
let mut deps = Vec::new();
for import in analyze_dependencies(&module, &source_map, &comments) {
if (import.kind == DependencyKind::Import
|| import.kind == DependencyKind::Export)
&& import.is_dynamic == false
{
let specifier = import.specifier.to_string();
deps.push(resolve_import(&specifier, url.as_str())?);
}
}
// If the file is not jsx, ts, or tsx we do not need to transform it. In that
// case source == transformed.
if !syntax.jsx() && !syntax.typescript() {
return Ok((deps, None));
}
use swc_ecmascript::transforms::react;
let program = Program::Module(module);
let options = EmitOptions::default();
let source_map = std::rc::Rc::new(source_map);
let jsx_pass = react::react(
source_map.clone(),
Some(&comments),
react::Options {
pragma: options.jsx_factory.clone(),
pragma_frag: options.jsx_fragment_factory.clone(),
// this will use `Object.assign()` instead of the `_extends` helper
// when spreading props.
use_builtins: true,
..Default::default()
},
);
use swc_common::chain;
use swc_common::Globals;
use swc_ecmascript::transforms::fixer;
use swc_ecmascript::transforms::helpers;
use swc_ecmascript::transforms::pass::Optional;
use swc_ecmascript::transforms::proposals;
use swc_ecmascript::transforms::typescript;
use swc_ecmascript::visit::FoldWith;
let mut passes = chain!(
Optional::new(jsx_pass, options.transform_jsx),
proposals::decorators::decorators(proposals::decorators::Config {
legacy: true,
emit_metadata: options.emit_metadata
}),
helpers::inject_helpers(),
typescript::strip(),
fixer(Some(&comments)),
);
let program = swc_common::GLOBALS.set(&Globals::new(), || {
helpers::HELPERS.set(&helpers::Helpers::new(false), || {
program.fold_with(&mut passes)
})
});
use swc_ecmascript::codegen::text_writer::JsWriter;
use swc_ecmascript::codegen::Node;
let mut src_map_buf = vec![];
let mut buf = vec![];
{
let writer = Box::new(JsWriter::new(
source_map.clone(),
"\n",
&mut buf,
Some(&mut src_map_buf),
));
let config = swc_ecmascript::codegen::Config { minify: false };
let mut emitter = swc_ecmascript::codegen::Emitter {
cfg: config,
comments: Some(&comments),
cm: source_map.clone(),
wr: writer,
};
program
.emit_with(&mut emitter)
.map_err(|err| Error::Other(Box::new(err)))?;
}
let mut src =
String::from_utf8(buf).map_err(|err| Error::Other(Box::new(err)))?;
{
let mut buf = Vec::new();
source_map
.build_source_map_from(&mut src_map_buf, None)
.to_writer(&mut buf)
.map_err(|err| Error::Other(Box::new(err)))?;
src.push_str("//# sourceMappingURL=data:application/json;base64,");
let encoded_map = base64::encode(buf);
src.push_str(&encoded_map);
}
Ok((deps, Some(src)))
}
fn get_syntax(url: &Url, maybe_content_type: &Option<String>) -> Syntax {
fn get_es_config(jsx: bool) -> EsConfig {
EsConfig {
class_private_methods: true,
class_private_props: true,
class_props: true,
dynamic_import: true,
export_default_from: true,
export_namespace_from: true,
import_meta: true,
jsx,
nullish_coalescing: true,
num_sep: true,
optional_chaining: true,
top_level_await: true,
..EsConfig::default()
}
}
fn get_ts_config(tsx: bool, dts: bool) -> TsConfig {
TsConfig {
decorators: true,
dts,
dynamic_import: true,
tsx,
..TsConfig::default()
}
}
let maybe_extension = if let Some(content_type) = maybe_content_type {
match content_type
.split(";")
.next()
.unwrap()
.trim()
.to_lowercase()
.as_ref()
{
"application/typescript"
| "text/typescript"
| "video/vnd.dlna.mpeg-tts"
| "video/mp2t"
| "application/x-typescript" => Some("ts"),
"application/javascript"
| "text/javascript"
| "application/ecmascript"
| "text/ecmascript"
| "application/x-javascript"
| "application/node" => Some("js"),
"text/jsx" => Some("jsx"),
"text/tsx" => Some("tsx"),
_ => None,
}
} else {
None
};
let extension = if maybe_extension.is_some() {
maybe_extension
} else {
let parts: Vec<&str> = url.as_str().split('.').collect();
parts.last().copied()
};
match extension {
Some("js") => Syntax::Es(get_es_config(false)),
Some("jsx") => Syntax::Es(get_es_config(true)),
Some("ts") => Syntax::Typescript(get_ts_config(false, false)),
Some("tsx") => Syntax::Typescript(get_ts_config(true, false)),
_ => Syntax::Typescript(get_ts_config(false, false)),
}
}
pub struct ParseError {
lines: Vec<String>,
}
impl ParseError {
fn new(
err: swc_ecmascript::parser::error::Error,
source_map: &SourceMap,
) -> Self {
let error_buffer = ErrorBuffer::default();
let handler = Handler::with_emitter_and_flags(
Box::new(error_buffer.clone()),
HandlerFlags {
can_emit_warnings: true,
dont_buffer_diagnostics: true,
..HandlerFlags::default()
},
);
let mut diagnostic = err.into_diagnostic(&handler);
diagnostic.emit();
let v = error_buffer.0.lock().unwrap();
let lines = v
.iter()
.map(|d| {
if let Some(span) = d.span.primary_span() {
let loc = source_map.lookup_char_pos(span.lo);
let file_name = match &loc.file.name {
FileName::Custom(n) => n,
_ => unreachable!(),
};
format!(
"{} at {}:{}:{}",
d.message(),
file_name,
loc.line,
loc.col_display
)
} else {
d.message()
}
})
.collect::<Vec<_>>();
Self { lines }
}
}
impl std::error::Error for ParseError {}
impl std::fmt::Display for ParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for line in &self.lines {
writeln!(f, "{}", line)?;
}
Ok(())
}
}
impl std::fmt::Debug for ParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
/// A buffer for collecting errors from the AST parser.
#[derive(Debug, Clone, Default)]
pub struct ErrorBuffer(Arc<Mutex<Vec<Diagnostic>>>);
impl Emitter for ErrorBuffer {
fn emit(&mut self, db: &DiagnosticBuilder) {
self.0.lock().unwrap().push((**db).clone());
}
}
/// Options which can be adjusted when transpiling a module.
#[derive(Debug, Clone)]
pub struct EmitOptions {
/// Indicate if JavaScript is being checked/transformed as well, or if it is
/// only TypeScript.
pub check_js: bool,
/// When emitting a legacy decorator, also emit experimental decorator meta
/// data. Defaults to `false`.
pub emit_metadata: bool,
/// Should the source map be inlined in the emitted code file, or provided
/// as a separate file. Defaults to `true`.
pub inline_source_map: bool,
/// When transforming JSX, what value should be used for the JSX factory.
/// Defaults to `React.createElement`.
pub jsx_factory: String,
/// When transforming JSX, what value should be used for the JSX fragment
/// factory. Defaults to `React.Fragment`.
pub jsx_fragment_factory: String,
/// Should JSX be transformed or preserved. Defaults to `true`.
pub transform_jsx: bool,
}
impl Default for EmitOptions {
fn default() -> Self {
EmitOptions {
check_js: false,
emit_metadata: false,
inline_source_map: true,
jsx_factory: "h".into(),
jsx_fragment_factory: "Fragment".into(),
transform_jsx: true,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test] | let syntax = get_syntax(&url, &content_type);
assert!(syntax.jsx());
assert!(!syntax.typescript());
// Fallback to extension if content-type is unsupported.
let url = Url::parse("https://deno.land/x/foo@0.1.0/bar.tsx").unwrap();
let content_type = Some("text/unsupported".to_string());
let syntax = get_syntax(&url, &content_type);
assert!(syntax.jsx());
assert!(syntax.typescript());
}
#[test]
fn jsx() {
let url = Url::parse(
"https://deno.land/x/dext@0.10.3/example/pages/dynamic/%5Bname%5D.tsx",
)
.unwrap();
let source = r#"
import { Fragment, h } from "../../deps.ts";
import type { PageProps } from "../../deps.ts";
function UserPage(props: PageProps) {
const name = props.route?.name ?? "";
return (
<>
<h1>This is the page for {name}</h1>
<p> <a href="/">Go home</a> </p>
</>
);
}
export default UserPage;
"#;
let (deps, _transpiled) =
get_deps_and_transpile(&url, source, &None).unwrap();
assert_eq!(deps.len(), 1);
}
#[test]
#[ignore]
fn complex_types() {
let url = Url::parse("https://deno.land/x/oak@v6.4.2/router.ts").unwrap();
let source = r#"
delete<P extends RouteParams = RP, S extends State = RS>(
name: string,
path: string,
...middleware: RouterMiddleware<P, S>[]
): Router<P extends RP ? P : (P & RP), S extends RS ? S : (S & RS)>;
"#;
let (deps, _transpiled) =
get_deps_and_transpile(&url, source, &None).unwrap();
assert_eq!(deps.len(), 0);
}
#[test]
#[ignore]
fn dynamic_import() {
let url = Url::parse("https://deno.land/x/oak@v6.4.2/router.ts").unwrap();
let source = r#"
await import("fs");
await import("https://deno.land/std/version.ts");
"#;
let (deps, _transpiled) =
get_deps_and_transpile(&url, source, &None).unwrap();
assert_eq!(deps.len(), 0);
}
} | fn test_get_syntax() {
// Prefer content-type over extension.
let url = Url::parse("https://deno.land/x/foo@0.1.0/bar.js").unwrap();
let content_type = Some("text/jsx".to_string()); | random_line_split |
main.rs | use once_cell::sync::Lazy;
use libp2p::{PeerId, Transport, mplex, Swarm, NetworkBehaviour};
use libp2p::identity;
use libp2p::floodsub::{Topic, Floodsub, FloodsubEvent};
use libp2p::noise::{X25519Spec, Keypair, NoiseConfig};
use libp2p::tcp::TokioTcpConfig;
use libp2p::core::upgrade;
use libp2p::mdns::{TokioMdns, MdnsEvent};
use tokio::sync::mpsc;
use libp2p::swarm::{SwarmBuilder, NetworkBehaviourEventProcess};
use tokio::io::AsyncBufReadExt;
use std::collections::HashSet;
use serde::{Serialize, Deserialize};
use log::{error, info};
const STORAGE_FILE_PATH: &str = "./recipes.json";
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync + 'static>>;
static KEYS: Lazy<identity::Keypair> = Lazy::new(|| identity::Keypair::generate_ed25519());
static PEER_ID: Lazy<PeerId> = Lazy::new(|| PeerId::from(KEYS.public()));
static TOPIC: Lazy<Topic> = Lazy::new(|| Topic::new("recipes"));
type Recipes = Vec<Recipe>;
#[derive(Debug, Serialize, Deserialize)]
struct Recipe {
id: usize,
name: String,
ingredients: String,
instructions: String,
public: bool,
}
#[derive(Debug, Serialize, Deserialize)]
enum ListMode {
ALL,
One(String)
}
#[derive(Debug, Serialize, Deserialize)]
struct ListRequest {
mode: ListMode
}
#[derive(Debug, Serialize, Deserialize)]
struct ListResponse {
mode: ListMode,
data: Recipes,
receiver: String,
}
enum EventType {
Response(ListResponse),
Input(String)
}
#[derive(NetworkBehaviour)]
struct | {
floodsub: Floodsub,
mdns: TokioMdns,
#[behaviour(ignore)]
response_sender: mpsc::UnboundedSender<ListResponse>,
}
#[tokio::main]
async fn main() {
pretty_env_logger::init();
info!("Peer Id: {}", PEER_ID.clone());
let (response_sender, mut response_rcv) = tokio::sync::mpsc::unbounded_channel();
let auth_keys = Keypair::<X25519Spec>::new()
.into_authentic(&KEYS)
.expect("can create auth keys");
let transport = TokioTcpConfig::new()
.upgrade(upgrade::Version::V1)
.authenticate(NoiseConfig::xx(auth_keys).into_authenticated())
.multiplex(mplex::MplexConfig::new())
.boxed();
let mut behaviour = RecipeBehaviour {
floodsub: Floodsub::new(PEER_ID.clone()),
mdns: TokioMdns::new().expect("can create mdns"),
response_sender
};
behaviour.floodsub.subscribe(TOPIC.clone());
let mut swarm = SwarmBuilder::new(transport, behaviour, PEER_ID.clone())
.executor(Box::new(|fut| {
tokio::spawn(fut);
})).build();
Swarm::listen_on(
&mut swarm,
"/ip4/0.0.0.0/tcp/0"
.parse()
.expect("can get a local socket"))
.expect("swarm can be started");
let mut stdin = tokio::io::BufReader::new(tokio::io::stdin()).lines();
loop {
let evt = {
tokio::select! {
line = stdin.next_line() => Some(EventType::Input(line.expect("can get line").expect("can read from stdin"))),
event = swarm.next() => {
info!("Unhandled Swarm Event: {:?}", event);
None
},
response = response_rcv.recv() => Some(EventType::Response(response.expect("response exists")))
}
};
if let Some(event) = evt {
match event {
EventType::Response(resp) => {
let json = serde_json::to_string(&resp).expect("can jsonify response");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
},
EventType::Input(line) => match line.as_str() {
"ls p" => handle_list_peers(&mut swarm).await,
cmd if cmd.starts_with("ls r") => handle_list_recipes(cmd, &mut swarm).await,
cmd if cmd.starts_with("create r") => handle_create_recipe(cmd).await,
cmd if cmd.starts_with("publish r") => handle_publish_recipe(cmd).await,
_ => error!("unknown command")
}
}
}
}
}
impl NetworkBehaviourEventProcess<MdnsEvent> for RecipeBehaviour {
fn inject_event(&mut self, event: MdnsEvent) {
match event {
MdnsEvent::Discovered(discovered_list) => {
for (peer, _addr) in discovered_list {
self.floodsub.add_node_to_partial_view(peer);
}
}
MdnsEvent::Expired(expired_list) => {
for (peer, _addr) in expired_list {
if !self.mdns.has_node(&peer) {
self.floodsub.remove_node_from_partial_view(&peer);
}
}
}
}
}
}
impl NetworkBehaviourEventProcess<FloodsubEvent> for RecipeBehaviour {
fn inject_event(&mut self, event: FloodsubEvent) {
match event {
FloodsubEvent::Message(msg) => {
if let Ok(resp) = serde_json::from_slice::<ListResponse>(&msg.data) {
if resp.receiver == PEER_ID.to_string() {
info!("response from {}:", msg.source);
resp.data.iter().for_each(|r| info!("{:?}", r));
}
} else if let Ok(req) = serde_json::from_slice::<ListRequest>(&msg.data) {
match req.mode {
ListMode::ALL => {
info!("received ALL req: {:?} from {:?}", req, msg.source);
respond_with_publish_recipes(
self.response_sender.clone(),
msg.source.to_string()
);
}
ListMode::One(ref peer_id) => {
if peer_id == &PEER_ID.to_string() {
info!("received req: {:?} from {:?}", req, msg.source);
respond_with_publish_recipes(
self.response_sender.clone(),
msg.source.to_string()
);
}
}
}
}
},
_ => ()
}
}
}
async fn handle_list_peers(swarm: &mut Swarm<RecipeBehaviour>) {
info!("Discovered Peers:");
let nodes = swarm.mdns.discovered_nodes();
let mut unique_peers = HashSet::new();
for peer in nodes {
unique_peers.insert(peer);
}
unique_peers.iter().for_each(|p| info!("{}", p));
}
async fn handle_list_recipes(cmd: &str, swarm: &mut Swarm<RecipeBehaviour>) {
let rest = cmd.strip_prefix("ls r ");
match rest {
Some("all") => {
let req = ListRequest {
mode: ListMode::ALL
};
let json = serde_json::to_string(&req).expect("can jsonify request");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
}
Some(recipes_peer_id) => {
let req = ListRequest {
mode: ListMode::One(recipes_peer_id.to_owned())
};
let json = serde_json::to_string(&req).expect("can jsonify request");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
}
None => {
match read_local_recipes().await {
Ok(v) => {
info!("local recipes ({})", v.len());
v.iter().for_each(|r| info!("{:?}", r));
}
Err(e) => error!("error fetching local recipes: {}", e)
}
}
}
}
async fn handle_create_recipe(cmd: &str) {
if let Some(rest) = cmd.strip_prefix("create r") {
let elements: Vec<&str> = rest.split("|").collect();
if elements.len() < 3 {
info!("too few arguments - Format: name|ingredients|instructions");
} else {
let name = elements.get(0).expect("name is there");
let ingredients = elements.get(1).expect("ingredients is there");
let instruments = elements.get(2).expect("instruments is there");
if let Err(e) = create_new_recipe(name, ingredients, instruments).await {
error!("error creating recipe: {}", e);
}
}
}
}
fn respond_with_publish_recipes(sender: mpsc::UnboundedSender<ListResponse>, receiver: String) {
tokio::spawn(async move {
match read_local_recipes().await {
Ok(recipes) => {
let resp = ListResponse {
mode: ListMode::ALL,
receiver,
data: recipes.into_iter().filter(|r| r.public).collect()
};
if let Err(e) = sender.send(resp) {
error!("error sending response via channel, {}", e);
}
}
Err(e) => error!("error fetching local recipes to answer ALL request, {}", e)
}
});
}
async fn handle_publish_recipe(cmd: &str) {
if let Some(rest) = cmd.strip_prefix("publish r") {
match rest.trim().parse::<usize>() {
Ok(id) => {
if let Err(e) = publish_recipe(id).await {
info!("error publishing recipe with id {}, {}", id, e);
} else {
info!("published recipe with id: {}", id);
}
}
Err(e) => error!("invalid id: {}, {}", rest.trim(), e)
}
}
}
async fn create_new_recipe(name: &str, ingredients: &str, instructions: &str) -> Result<()> {
let mut local_recipes = read_local_recipes().await?;
let new_id = match local_recipes.iter().max_by_key(|r|r.id) {
Some(v) => v.id + 1,
None => 0
};
local_recipes.push(Recipe {
id: new_id,
name: name.to_owned(),
ingredients: ingredients.to_owned(),
instructions: instructions.to_owned(),
public: false
});
write_local_recipes(&local_recipes).await?;
info!("create recipe:");
info!("name: {}", name);
info!("ingredients: {}", ingredients);
info!("instruments: {}", instructions);
Ok(())
}
async fn publish_recipe(id: usize) -> Result<()> {
let mut local_recipes = read_local_recipes().await?;
local_recipes
.iter_mut()
.filter(|r|r.id == id)
.for_each(|r| r.public = true);
write_local_recipes(&local_recipes).await?;
Ok(())
}
async fn read_local_recipes() -> Result<Recipes> {
let content = tokio::fs::read(STORAGE_FILE_PATH).await?;
let result = serde_json::from_slice(&content)?;
Ok(result)
}
async fn write_local_recipes(recipes: &Recipes) -> Result<()> {
let json = serde_json::to_string(&recipes)?;
tokio::fs::write(STORAGE_FILE_PATH, &json).await?;
Ok(())
} | RecipeBehaviour | identifier_name |
main.rs | use once_cell::sync::Lazy;
use libp2p::{PeerId, Transport, mplex, Swarm, NetworkBehaviour};
use libp2p::identity;
use libp2p::floodsub::{Topic, Floodsub, FloodsubEvent};
use libp2p::noise::{X25519Spec, Keypair, NoiseConfig};
use libp2p::tcp::TokioTcpConfig;
use libp2p::core::upgrade;
use libp2p::mdns::{TokioMdns, MdnsEvent};
use tokio::sync::mpsc;
use libp2p::swarm::{SwarmBuilder, NetworkBehaviourEventProcess};
use tokio::io::AsyncBufReadExt;
use std::collections::HashSet;
use serde::{Serialize, Deserialize};
use log::{error, info};
const STORAGE_FILE_PATH: &str = "./recipes.json";
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync + 'static>>;
static KEYS: Lazy<identity::Keypair> = Lazy::new(|| identity::Keypair::generate_ed25519());
static PEER_ID: Lazy<PeerId> = Lazy::new(|| PeerId::from(KEYS.public()));
static TOPIC: Lazy<Topic> = Lazy::new(|| Topic::new("recipes"));
type Recipes = Vec<Recipe>;
#[derive(Debug, Serialize, Deserialize)]
struct Recipe {
id: usize,
name: String,
ingredients: String,
instructions: String,
public: bool,
}
#[derive(Debug, Serialize, Deserialize)]
enum ListMode {
ALL,
One(String)
}
#[derive(Debug, Serialize, Deserialize)]
struct ListRequest {
mode: ListMode
}
#[derive(Debug, Serialize, Deserialize)]
struct ListResponse {
mode: ListMode,
data: Recipes,
receiver: String,
}
enum EventType {
Response(ListResponse),
Input(String)
}
#[derive(NetworkBehaviour)]
struct RecipeBehaviour {
floodsub: Floodsub,
mdns: TokioMdns,
#[behaviour(ignore)]
response_sender: mpsc::UnboundedSender<ListResponse>,
}
#[tokio::main]
async fn main() {
pretty_env_logger::init();
info!("Peer Id: {}", PEER_ID.clone());
let (response_sender, mut response_rcv) = tokio::sync::mpsc::unbounded_channel();
let auth_keys = Keypair::<X25519Spec>::new()
.into_authentic(&KEYS)
.expect("can create auth keys");
let transport = TokioTcpConfig::new()
.upgrade(upgrade::Version::V1)
.authenticate(NoiseConfig::xx(auth_keys).into_authenticated())
.multiplex(mplex::MplexConfig::new())
.boxed();
let mut behaviour = RecipeBehaviour {
floodsub: Floodsub::new(PEER_ID.clone()),
mdns: TokioMdns::new().expect("can create mdns"),
response_sender
};
behaviour.floodsub.subscribe(TOPIC.clone());
let mut swarm = SwarmBuilder::new(transport, behaviour, PEER_ID.clone())
.executor(Box::new(|fut| {
tokio::spawn(fut);
})).build();
Swarm::listen_on(
&mut swarm,
"/ip4/0.0.0.0/tcp/0"
.parse()
.expect("can get a local socket"))
.expect("swarm can be started");
let mut stdin = tokio::io::BufReader::new(tokio::io::stdin()).lines();
loop {
let evt = {
tokio::select! {
line = stdin.next_line() => Some(EventType::Input(line.expect("can get line").expect("can read from stdin"))),
event = swarm.next() => {
info!("Unhandled Swarm Event: {:?}", event);
None
},
response = response_rcv.recv() => Some(EventType::Response(response.expect("response exists")))
}
};
if let Some(event) = evt {
match event {
EventType::Response(resp) => {
let json = serde_json::to_string(&resp).expect("can jsonify response");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
},
EventType::Input(line) => match line.as_str() {
"ls p" => handle_list_peers(&mut swarm).await,
cmd if cmd.starts_with("ls r") => handle_list_recipes(cmd, &mut swarm).await,
cmd if cmd.starts_with("create r") => handle_create_recipe(cmd).await,
cmd if cmd.starts_with("publish r") => handle_publish_recipe(cmd).await,
_ => error!("unknown command")
}
}
}
}
}
impl NetworkBehaviourEventProcess<MdnsEvent> for RecipeBehaviour {
fn inject_event(&mut self, event: MdnsEvent) {
match event {
MdnsEvent::Discovered(discovered_list) => {
for (peer, _addr) in discovered_list {
self.floodsub.add_node_to_partial_view(peer);
}
}
MdnsEvent::Expired(expired_list) => {
for (peer, _addr) in expired_list {
if !self.mdns.has_node(&peer) {
self.floodsub.remove_node_from_partial_view(&peer);
}
}
}
}
}
}
impl NetworkBehaviourEventProcess<FloodsubEvent> for RecipeBehaviour {
fn inject_event(&mut self, event: FloodsubEvent) {
match event {
FloodsubEvent::Message(msg) => {
if let Ok(resp) = serde_json::from_slice::<ListResponse>(&msg.data) {
if resp.receiver == PEER_ID.to_string() {
info!("response from {}:", msg.source);
resp.data.iter().for_each(|r| info!("{:?}", r));
}
} else if let Ok(req) = serde_json::from_slice::<ListRequest>(&msg.data) {
match req.mode {
ListMode::ALL => {
info!("received ALL req: {:?} from {:?}", req, msg.source);
respond_with_publish_recipes(
self.response_sender.clone(),
msg.source.to_string()
);
}
ListMode::One(ref peer_id) => {
if peer_id == &PEER_ID.to_string() {
info!("received req: {:?} from {:?}", req, msg.source);
respond_with_publish_recipes(
self.response_sender.clone(),
msg.source.to_string()
);
}
}
}
}
},
_ => ()
}
}
}
async fn handle_list_peers(swarm: &mut Swarm<RecipeBehaviour>) {
info!("Discovered Peers:");
let nodes = swarm.mdns.discovered_nodes();
let mut unique_peers = HashSet::new();
for peer in nodes {
unique_peers.insert(peer);
}
unique_peers.iter().for_each(|p| info!("{}", p));
}
async fn handle_list_recipes(cmd: &str, swarm: &mut Swarm<RecipeBehaviour>) {
let rest = cmd.strip_prefix("ls r ");
match rest {
Some("all") => {
let req = ListRequest {
mode: ListMode::ALL
};
let json = serde_json::to_string(&req).expect("can jsonify request");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
}
Some(recipes_peer_id) => {
let req = ListRequest {
mode: ListMode::One(recipes_peer_id.to_owned())
};
let json = serde_json::to_string(&req).expect("can jsonify request");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
}
None => {
match read_local_recipes().await {
Ok(v) => {
info!("local recipes ({})", v.len());
v.iter().for_each(|r| info!("{:?}", r));
}
Err(e) => error!("error fetching local recipes: {}", e)
}
}
}
}
async fn handle_create_recipe(cmd: &str) {
if let Some(rest) = cmd.strip_prefix("create r") {
let elements: Vec<&str> = rest.split("|").collect();
if elements.len() < 3 {
info!("too few arguments - Format: name|ingredients|instructions");
} else {
let name = elements.get(0).expect("name is there");
let ingredients = elements.get(1).expect("ingredients is there");
let instruments = elements.get(2).expect("instruments is there");
if let Err(e) = create_new_recipe(name, ingredients, instruments).await {
error!("error creating recipe: {}", e);
}
}
}
}
fn respond_with_publish_recipes(sender: mpsc::UnboundedSender<ListResponse>, receiver: String) {
tokio::spawn(async move {
match read_local_recipes().await {
Ok(recipes) => {
let resp = ListResponse {
mode: ListMode::ALL,
receiver,
data: recipes.into_iter().filter(|r| r.public).collect()
};
if let Err(e) = sender.send(resp) {
error!("error sending response via channel, {}", e);
}
}
Err(e) => error!("error fetching local recipes to answer ALL request, {}", e)
}
});
}
async fn handle_publish_recipe(cmd: &str) {
if let Some(rest) = cmd.strip_prefix("publish r") {
match rest.trim().parse::<usize>() {
Ok(id) => {
if let Err(e) = publish_recipe(id).await {
info!("error publishing recipe with id {}, {}", id, e);
} else {
info!("published recipe with id: {}", id);
}
}
Err(e) => error!("invalid id: {}, {}", rest.trim(), e)
}
}
}
async fn create_new_recipe(name: &str, ingredients: &str, instructions: &str) -> Result<()> {
let mut local_recipes = read_local_recipes().await?;
let new_id = match local_recipes.iter().max_by_key(|r|r.id) {
Some(v) => v.id + 1,
None => 0
};
local_recipes.push(Recipe { | });
write_local_recipes(&local_recipes).await?;
info!("create recipe:");
info!("name: {}", name);
info!("ingredients: {}", ingredients);
info!("instruments: {}", instructions);
Ok(())
}
async fn publish_recipe(id: usize) -> Result<()> {
let mut local_recipes = read_local_recipes().await?;
local_recipes
.iter_mut()
.filter(|r|r.id == id)
.for_each(|r| r.public = true);
write_local_recipes(&local_recipes).await?;
Ok(())
}
async fn read_local_recipes() -> Result<Recipes> {
let content = tokio::fs::read(STORAGE_FILE_PATH).await?;
let result = serde_json::from_slice(&content)?;
Ok(result)
}
async fn write_local_recipes(recipes: &Recipes) -> Result<()> {
let json = serde_json::to_string(&recipes)?;
tokio::fs::write(STORAGE_FILE_PATH, &json).await?;
Ok(())
} | id: new_id,
name: name.to_owned(),
ingredients: ingredients.to_owned(),
instructions: instructions.to_owned(),
public: false | random_line_split |
main.rs | use once_cell::sync::Lazy;
use libp2p::{PeerId, Transport, mplex, Swarm, NetworkBehaviour};
use libp2p::identity;
use libp2p::floodsub::{Topic, Floodsub, FloodsubEvent};
use libp2p::noise::{X25519Spec, Keypair, NoiseConfig};
use libp2p::tcp::TokioTcpConfig;
use libp2p::core::upgrade;
use libp2p::mdns::{TokioMdns, MdnsEvent};
use tokio::sync::mpsc;
use libp2p::swarm::{SwarmBuilder, NetworkBehaviourEventProcess};
use tokio::io::AsyncBufReadExt;
use std::collections::HashSet;
use serde::{Serialize, Deserialize};
use log::{error, info};
const STORAGE_FILE_PATH: &str = "./recipes.json";
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync + 'static>>;
static KEYS: Lazy<identity::Keypair> = Lazy::new(|| identity::Keypair::generate_ed25519());
static PEER_ID: Lazy<PeerId> = Lazy::new(|| PeerId::from(KEYS.public()));
static TOPIC: Lazy<Topic> = Lazy::new(|| Topic::new("recipes"));
type Recipes = Vec<Recipe>;
#[derive(Debug, Serialize, Deserialize)]
struct Recipe {
id: usize,
name: String,
ingredients: String,
instructions: String,
public: bool,
}
#[derive(Debug, Serialize, Deserialize)]
enum ListMode {
ALL,
One(String)
}
#[derive(Debug, Serialize, Deserialize)]
struct ListRequest {
mode: ListMode
}
#[derive(Debug, Serialize, Deserialize)]
struct ListResponse {
mode: ListMode,
data: Recipes,
receiver: String,
}
enum EventType {
Response(ListResponse),
Input(String)
}
#[derive(NetworkBehaviour)]
struct RecipeBehaviour {
floodsub: Floodsub,
mdns: TokioMdns,
#[behaviour(ignore)]
response_sender: mpsc::UnboundedSender<ListResponse>,
}
#[tokio::main]
async fn main() {
pretty_env_logger::init();
info!("Peer Id: {}", PEER_ID.clone());
let (response_sender, mut response_rcv) = tokio::sync::mpsc::unbounded_channel();
let auth_keys = Keypair::<X25519Spec>::new()
.into_authentic(&KEYS)
.expect("can create auth keys");
let transport = TokioTcpConfig::new()
.upgrade(upgrade::Version::V1)
.authenticate(NoiseConfig::xx(auth_keys).into_authenticated())
.multiplex(mplex::MplexConfig::new())
.boxed();
let mut behaviour = RecipeBehaviour {
floodsub: Floodsub::new(PEER_ID.clone()),
mdns: TokioMdns::new().expect("can create mdns"),
response_sender
};
behaviour.floodsub.subscribe(TOPIC.clone());
let mut swarm = SwarmBuilder::new(transport, behaviour, PEER_ID.clone())
.executor(Box::new(|fut| {
tokio::spawn(fut);
})).build();
Swarm::listen_on(
&mut swarm,
"/ip4/0.0.0.0/tcp/0"
.parse()
.expect("can get a local socket"))
.expect("swarm can be started");
let mut stdin = tokio::io::BufReader::new(tokio::io::stdin()).lines();
loop {
let evt = {
tokio::select! {
line = stdin.next_line() => Some(EventType::Input(line.expect("can get line").expect("can read from stdin"))),
event = swarm.next() => {
info!("Unhandled Swarm Event: {:?}", event);
None
},
response = response_rcv.recv() => Some(EventType::Response(response.expect("response exists")))
}
};
if let Some(event) = evt {
match event {
EventType::Response(resp) => {
let json = serde_json::to_string(&resp).expect("can jsonify response");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
},
EventType::Input(line) => match line.as_str() {
"ls p" => handle_list_peers(&mut swarm).await,
cmd if cmd.starts_with("ls r") => handle_list_recipes(cmd, &mut swarm).await,
cmd if cmd.starts_with("create r") => handle_create_recipe(cmd).await,
cmd if cmd.starts_with("publish r") => handle_publish_recipe(cmd).await,
_ => error!("unknown command")
}
}
}
}
}
impl NetworkBehaviourEventProcess<MdnsEvent> for RecipeBehaviour {
fn inject_event(&mut self, event: MdnsEvent) {
match event {
MdnsEvent::Discovered(discovered_list) => {
for (peer, _addr) in discovered_list {
self.floodsub.add_node_to_partial_view(peer);
}
}
MdnsEvent::Expired(expired_list) => {
for (peer, _addr) in expired_list {
if !self.mdns.has_node(&peer) {
self.floodsub.remove_node_from_partial_view(&peer);
}
}
}
}
}
}
impl NetworkBehaviourEventProcess<FloodsubEvent> for RecipeBehaviour {
fn inject_event(&mut self, event: FloodsubEvent) {
match event {
FloodsubEvent::Message(msg) => {
if let Ok(resp) = serde_json::from_slice::<ListResponse>(&msg.data) {
if resp.receiver == PEER_ID.to_string() {
info!("response from {}:", msg.source);
resp.data.iter().for_each(|r| info!("{:?}", r));
}
} else if let Ok(req) = serde_json::from_slice::<ListRequest>(&msg.data) {
match req.mode {
ListMode::ALL => {
info!("received ALL req: {:?} from {:?}", req, msg.source);
respond_with_publish_recipes(
self.response_sender.clone(),
msg.source.to_string()
);
}
ListMode::One(ref peer_id) => {
if peer_id == &PEER_ID.to_string() {
info!("received req: {:?} from {:?}", req, msg.source);
respond_with_publish_recipes(
self.response_sender.clone(),
msg.source.to_string()
);
}
}
}
}
},
_ => ()
}
}
}
async fn handle_list_peers(swarm: &mut Swarm<RecipeBehaviour>) {
info!("Discovered Peers:");
let nodes = swarm.mdns.discovered_nodes();
let mut unique_peers = HashSet::new();
for peer in nodes {
unique_peers.insert(peer);
}
unique_peers.iter().for_each(|p| info!("{}", p));
}
async fn handle_list_recipes(cmd: &str, swarm: &mut Swarm<RecipeBehaviour>) {
let rest = cmd.strip_prefix("ls r ");
match rest {
Some("all") => {
let req = ListRequest {
mode: ListMode::ALL
};
let json = serde_json::to_string(&req).expect("can jsonify request");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
}
Some(recipes_peer_id) => {
let req = ListRequest {
mode: ListMode::One(recipes_peer_id.to_owned())
};
let json = serde_json::to_string(&req).expect("can jsonify request");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
}
None => {
match read_local_recipes().await {
Ok(v) => {
info!("local recipes ({})", v.len());
v.iter().for_each(|r| info!("{:?}", r));
}
Err(e) => error!("error fetching local recipes: {}", e)
}
}
}
}
async fn handle_create_recipe(cmd: &str) {
if let Some(rest) = cmd.strip_prefix("create r") {
let elements: Vec<&str> = rest.split("|").collect();
if elements.len() < 3 {
info!("too few arguments - Format: name|ingredients|instructions");
} else {
let name = elements.get(0).expect("name is there");
let ingredients = elements.get(1).expect("ingredients is there");
let instruments = elements.get(2).expect("instruments is there");
if let Err(e) = create_new_recipe(name, ingredients, instruments).await |
}
}
}
fn respond_with_publish_recipes(sender: mpsc::UnboundedSender<ListResponse>, receiver: String) {
tokio::spawn(async move {
match read_local_recipes().await {
Ok(recipes) => {
let resp = ListResponse {
mode: ListMode::ALL,
receiver,
data: recipes.into_iter().filter(|r| r.public).collect()
};
if let Err(e) = sender.send(resp) {
error!("error sending response via channel, {}", e);
}
}
Err(e) => error!("error fetching local recipes to answer ALL request, {}", e)
}
});
}
async fn handle_publish_recipe(cmd: &str) {
if let Some(rest) = cmd.strip_prefix("publish r") {
match rest.trim().parse::<usize>() {
Ok(id) => {
if let Err(e) = publish_recipe(id).await {
info!("error publishing recipe with id {}, {}", id, e);
} else {
info!("published recipe with id: {}", id);
}
}
Err(e) => error!("invalid id: {}, {}", rest.trim(), e)
}
}
}
async fn create_new_recipe(name: &str, ingredients: &str, instructions: &str) -> Result<()> {
let mut local_recipes = read_local_recipes().await?;
let new_id = match local_recipes.iter().max_by_key(|r|r.id) {
Some(v) => v.id + 1,
None => 0
};
local_recipes.push(Recipe {
id: new_id,
name: name.to_owned(),
ingredients: ingredients.to_owned(),
instructions: instructions.to_owned(),
public: false
});
write_local_recipes(&local_recipes).await?;
info!("create recipe:");
info!("name: {}", name);
info!("ingredients: {}", ingredients);
info!("instruments: {}", instructions);
Ok(())
}
async fn publish_recipe(id: usize) -> Result<()> {
let mut local_recipes = read_local_recipes().await?;
local_recipes
.iter_mut()
.filter(|r|r.id == id)
.for_each(|r| r.public = true);
write_local_recipes(&local_recipes).await?;
Ok(())
}
async fn read_local_recipes() -> Result<Recipes> {
let content = tokio::fs::read(STORAGE_FILE_PATH).await?;
let result = serde_json::from_slice(&content)?;
Ok(result)
}
async fn write_local_recipes(recipes: &Recipes) -> Result<()> {
let json = serde_json::to_string(&recipes)?;
tokio::fs::write(STORAGE_FILE_PATH, &json).await?;
Ok(())
} | {
error!("error creating recipe: {}", e);
} | conditional_block |
main.rs | use once_cell::sync::Lazy;
use libp2p::{PeerId, Transport, mplex, Swarm, NetworkBehaviour};
use libp2p::identity;
use libp2p::floodsub::{Topic, Floodsub, FloodsubEvent};
use libp2p::noise::{X25519Spec, Keypair, NoiseConfig};
use libp2p::tcp::TokioTcpConfig;
use libp2p::core::upgrade;
use libp2p::mdns::{TokioMdns, MdnsEvent};
use tokio::sync::mpsc;
use libp2p::swarm::{SwarmBuilder, NetworkBehaviourEventProcess};
use tokio::io::AsyncBufReadExt;
use std::collections::HashSet;
use serde::{Serialize, Deserialize};
use log::{error, info};
const STORAGE_FILE_PATH: &str = "./recipes.json";
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync + 'static>>;
static KEYS: Lazy<identity::Keypair> = Lazy::new(|| identity::Keypair::generate_ed25519());
static PEER_ID: Lazy<PeerId> = Lazy::new(|| PeerId::from(KEYS.public()));
static TOPIC: Lazy<Topic> = Lazy::new(|| Topic::new("recipes"));
type Recipes = Vec<Recipe>;
#[derive(Debug, Serialize, Deserialize)]
struct Recipe {
id: usize,
name: String,
ingredients: String,
instructions: String,
public: bool,
}
#[derive(Debug, Serialize, Deserialize)]
enum ListMode {
ALL,
One(String)
}
#[derive(Debug, Serialize, Deserialize)]
struct ListRequest {
mode: ListMode
}
#[derive(Debug, Serialize, Deserialize)]
struct ListResponse {
mode: ListMode,
data: Recipes,
receiver: String,
}
enum EventType {
Response(ListResponse),
Input(String)
}
#[derive(NetworkBehaviour)]
struct RecipeBehaviour {
floodsub: Floodsub,
mdns: TokioMdns,
#[behaviour(ignore)]
response_sender: mpsc::UnboundedSender<ListResponse>,
}
#[tokio::main]
async fn main() {
pretty_env_logger::init();
info!("Peer Id: {}", PEER_ID.clone());
let (response_sender, mut response_rcv) = tokio::sync::mpsc::unbounded_channel();
let auth_keys = Keypair::<X25519Spec>::new()
.into_authentic(&KEYS)
.expect("can create auth keys");
let transport = TokioTcpConfig::new()
.upgrade(upgrade::Version::V1)
.authenticate(NoiseConfig::xx(auth_keys).into_authenticated())
.multiplex(mplex::MplexConfig::new())
.boxed();
let mut behaviour = RecipeBehaviour {
floodsub: Floodsub::new(PEER_ID.clone()),
mdns: TokioMdns::new().expect("can create mdns"),
response_sender
};
behaviour.floodsub.subscribe(TOPIC.clone());
let mut swarm = SwarmBuilder::new(transport, behaviour, PEER_ID.clone())
.executor(Box::new(|fut| {
tokio::spawn(fut);
})).build();
Swarm::listen_on(
&mut swarm,
"/ip4/0.0.0.0/tcp/0"
.parse()
.expect("can get a local socket"))
.expect("swarm can be started");
let mut stdin = tokio::io::BufReader::new(tokio::io::stdin()).lines();
loop {
let evt = {
tokio::select! {
line = stdin.next_line() => Some(EventType::Input(line.expect("can get line").expect("can read from stdin"))),
event = swarm.next() => {
info!("Unhandled Swarm Event: {:?}", event);
None
},
response = response_rcv.recv() => Some(EventType::Response(response.expect("response exists")))
}
};
if let Some(event) = evt {
match event {
EventType::Response(resp) => {
let json = serde_json::to_string(&resp).expect("can jsonify response");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
},
EventType::Input(line) => match line.as_str() {
"ls p" => handle_list_peers(&mut swarm).await,
cmd if cmd.starts_with("ls r") => handle_list_recipes(cmd, &mut swarm).await,
cmd if cmd.starts_with("create r") => handle_create_recipe(cmd).await,
cmd if cmd.starts_with("publish r") => handle_publish_recipe(cmd).await,
_ => error!("unknown command")
}
}
}
}
}
impl NetworkBehaviourEventProcess<MdnsEvent> for RecipeBehaviour {
fn inject_event(&mut self, event: MdnsEvent) {
match event {
MdnsEvent::Discovered(discovered_list) => {
for (peer, _addr) in discovered_list {
self.floodsub.add_node_to_partial_view(peer);
}
}
MdnsEvent::Expired(expired_list) => {
for (peer, _addr) in expired_list {
if !self.mdns.has_node(&peer) {
self.floodsub.remove_node_from_partial_view(&peer);
}
}
}
}
}
}
impl NetworkBehaviourEventProcess<FloodsubEvent> for RecipeBehaviour {
fn inject_event(&mut self, event: FloodsubEvent) {
match event {
FloodsubEvent::Message(msg) => {
if let Ok(resp) = serde_json::from_slice::<ListResponse>(&msg.data) {
if resp.receiver == PEER_ID.to_string() {
info!("response from {}:", msg.source);
resp.data.iter().for_each(|r| info!("{:?}", r));
}
} else if let Ok(req) = serde_json::from_slice::<ListRequest>(&msg.data) {
match req.mode {
ListMode::ALL => {
info!("received ALL req: {:?} from {:?}", req, msg.source);
respond_with_publish_recipes(
self.response_sender.clone(),
msg.source.to_string()
);
}
ListMode::One(ref peer_id) => {
if peer_id == &PEER_ID.to_string() {
info!("received req: {:?} from {:?}", req, msg.source);
respond_with_publish_recipes(
self.response_sender.clone(),
msg.source.to_string()
);
}
}
}
}
},
_ => ()
}
}
}
async fn handle_list_peers(swarm: &mut Swarm<RecipeBehaviour>) {
info!("Discovered Peers:");
let nodes = swarm.mdns.discovered_nodes();
let mut unique_peers = HashSet::new();
for peer in nodes {
unique_peers.insert(peer);
}
unique_peers.iter().for_each(|p| info!("{}", p));
}
async fn handle_list_recipes(cmd: &str, swarm: &mut Swarm<RecipeBehaviour>) {
let rest = cmd.strip_prefix("ls r ");
match rest {
Some("all") => {
let req = ListRequest {
mode: ListMode::ALL
};
let json = serde_json::to_string(&req).expect("can jsonify request");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
}
Some(recipes_peer_id) => {
let req = ListRequest {
mode: ListMode::One(recipes_peer_id.to_owned())
};
let json = serde_json::to_string(&req).expect("can jsonify request");
swarm.floodsub.publish(TOPIC.clone(), json.as_bytes());
}
None => {
match read_local_recipes().await {
Ok(v) => {
info!("local recipes ({})", v.len());
v.iter().for_each(|r| info!("{:?}", r));
}
Err(e) => error!("error fetching local recipes: {}", e)
}
}
}
}
async fn handle_create_recipe(cmd: &str) {
if let Some(rest) = cmd.strip_prefix("create r") {
let elements: Vec<&str> = rest.split("|").collect();
if elements.len() < 3 {
info!("too few arguments - Format: name|ingredients|instructions");
} else {
let name = elements.get(0).expect("name is there");
let ingredients = elements.get(1).expect("ingredients is there");
let instruments = elements.get(2).expect("instruments is there");
if let Err(e) = create_new_recipe(name, ingredients, instruments).await {
error!("error creating recipe: {}", e);
}
}
}
}
fn respond_with_publish_recipes(sender: mpsc::UnboundedSender<ListResponse>, receiver: String) {
tokio::spawn(async move {
match read_local_recipes().await {
Ok(recipes) => {
let resp = ListResponse {
mode: ListMode::ALL,
receiver,
data: recipes.into_iter().filter(|r| r.public).collect()
};
if let Err(e) = sender.send(resp) {
error!("error sending response via channel, {}", e);
}
}
Err(e) => error!("error fetching local recipes to answer ALL request, {}", e)
}
});
}
async fn handle_publish_recipe(cmd: &str) {
if let Some(rest) = cmd.strip_prefix("publish r") {
match rest.trim().parse::<usize>() {
Ok(id) => {
if let Err(e) = publish_recipe(id).await {
info!("error publishing recipe with id {}, {}", id, e);
} else {
info!("published recipe with id: {}", id);
}
}
Err(e) => error!("invalid id: {}, {}", rest.trim(), e)
}
}
}
async fn create_new_recipe(name: &str, ingredients: &str, instructions: &str) -> Result<()> {
let mut local_recipes = read_local_recipes().await?;
let new_id = match local_recipes.iter().max_by_key(|r|r.id) {
Some(v) => v.id + 1,
None => 0
};
local_recipes.push(Recipe {
id: new_id,
name: name.to_owned(),
ingredients: ingredients.to_owned(),
instructions: instructions.to_owned(),
public: false
});
write_local_recipes(&local_recipes).await?;
info!("create recipe:");
info!("name: {}", name);
info!("ingredients: {}", ingredients);
info!("instruments: {}", instructions);
Ok(())
}
async fn publish_recipe(id: usize) -> Result<()> {
let mut local_recipes = read_local_recipes().await?;
local_recipes
.iter_mut()
.filter(|r|r.id == id)
.for_each(|r| r.public = true);
write_local_recipes(&local_recipes).await?;
Ok(())
}
async fn read_local_recipes() -> Result<Recipes> |
async fn write_local_recipes(recipes: &Recipes) -> Result<()> {
let json = serde_json::to_string(&recipes)?;
tokio::fs::write(STORAGE_FILE_PATH, &json).await?;
Ok(())
} | {
let content = tokio::fs::read(STORAGE_FILE_PATH).await?;
let result = serde_json::from_slice(&content)?;
Ok(result)
} | identifier_body |
main.js | var sensorServices = [];
var actuatorServices = [];
var sensor_types = "http://webinos.org/api/sensors/*";
var actuator_types = "http://webinos.org/api/actuators/*";
var idObject_to_identify = {};
var sensor = {};
var actuator = {};
var registered = false;
var toFind = true;
var toRemoveFromFile = false;
var operations = {
"ADD" : false,
"REMOVE" : true
}
//necessary for graph google component
google.load("visualization", "1", {packages:["corechart"]});
//onLoad is the ready function!
$(document).ready(onLoad);
//graphic to show on display
var graphic;
//actuator component to show on display
var actComponent;
var eventListenerFunction = function(event){
if(graphic.type == "thermometer"){
graphic.setVal(event.sensorValues[0]);
$("#content_chart").show();
}else if(graphic.type == "line-chart"){
var time=new Date(event.timestamp);
time=(time.getUTCHours()+2)+ ":"+time.getUTCMinutes()+":"+time.getUTCSeconds();
var arrayTMP = new Array();
arrayTMP[0] = time;
arrayTMP[1] = parseInt(event.sensorValues[0]);
var dimGraphData = graphic.graphData.addRow(arrayTMP);
graphic.numberOfValues++;
graphic.chart.draw(graphic.graphData, graphic.options);
if(graphic.numberOfValues>150){
graphic.graphData.removeRow(0);
}
$("#content_chart").show();
}else{
console.log("Error!");
}
$("#values").text(event.sensorValues[0]);
};
var video, canvas, context, imageData, detector;
function onLoad(){
video = document.getElementById("video");
canvas = document.getElementById("canvas");
context = canvas.getContext("2d");
canvas.width = parseInt(canvas.style.width);
canvas.height = parseInt(canvas.style.height);
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (navigator.getUserMedia){
function successCallback(stream){
if (window.webkitURL) {
video.src = window.webkitURL.createObjectURL(stream);
} else if (video.mozSrcObject !== undefined) {
video.mozSrcObject = stream;
} else {
video.src = stream;
}
}
function errorCallback(error){
}
navigator.getUserMedia({video: true}, successCallback, errorCallback);
detector = new AR.Detector();
requestAnimationFrame(tick);
discoverFileSystem();
//discoverActuators();
//discoverSensors();
$("#back").text("Back");
$("#back").on("click", function(){
hideOverlay();
});
$("#sensorButton").on("click", function(){
listenSensor();
});
}
}
function tick(){
requestAnimationFrame(tick);
if (video.readyState === video.HAVE_ENOUGH_DATA){
snapshot();
var markers = detector.detect(imageData);
read(markers);
}
}
function snapshot(){
context.drawImage(video, 0, 0, canvas.width, canvas.height);
imageData = context.getImageData(0, 0, canvas.width, canvas.height);
}
function read(marker){
for(var i = 0; i !== marker.length; ++i){
var id = idObject_to_identify[marker[i].id];
//update grafich
$("#new_marker").text(marker[i].id);
if( id === undefined){
//console.log("Error! This marker is not associated to any sensor/actuator");
drawCorners(marker);
drawId(marker);
}
else{
//console.log("Marker correctly readed");
if(Object.keys(sensor).length == 0 && toFind && Object.keys(actuator).length == 0)
find(id);
}
}
}
function drawCorners(markers){
var corners, corner, i, j;
context.lineWidth = 3;
for (i = 0; i !== markers.length; ++ i){
corners = markers[i].corners;
context.strokeStyle = "red";
context.beginPath();
for (j = 0; j !== corners.length; ++ j){
corner = corners[j];
context.moveTo(corner.x, corner.y);
corner = corners[(j + 1) % corners.length];
context.lineTo(corner.x, corner.y);
}
context.stroke();
context.closePath();
context.strokeStyle = "green";
context.strokeRect(corners[0].x - 2, corners[0].y - 2, 4, 4);
}
}
function drawId(markers){
var corners, corner, x, y, i, j;
context.strokeStyle = "blue";
context.lineWidth = 1;
for (i = 0; i !== markers.length; ++ i){
corners = markers[i].corners;
x = Infinity;
y = Infinity;
for (j = 0; j !== corners.length; ++ j){
corner = corners[j];
x = Math.min(x, corner.x);
y = Math.min(y, corner.y);
}
context.strokeText(markers[i].id, x, y)
}
}
function find(id){
toFind = false;
//sensor
for (var i = 0; i < sensorServices.length; i++) {
if(sensorServices[i].id == id){
sensorServices[i].bind({onBind:function(){
sensorServices[i].configureSensor({rate: 1000, eventFireMode: "fixedinterval"},
function(){
sensor = sensorServices[i];
toFind = true;
showOverlay();
},
function (){
sensor = undefined;
console.error('Error configuring Sensor ' + service.api);
});
}
});
return;
}
}
//actuator
for (i = 0; i < actuatorServices.length; i++) {
if (actuatorServices[i].id == id) {
actuatorServices[i].bind({onBind:function(){
actuator = actuatorServices[i];
toFind = true;
showOverlay();
}
});
return;
}
}
//DA VERIFICAREEEEEEEEEEEEE - TODO
//toFind = true;
}
function showOverlay() |
function hideOverlay() {
$("#overlay").hide();
$("#content_chart").empty();
$("#content_actuator").empty();
$("#values").empty();
if(Object.keys(sensor).length != 0) {
listenSensor(operations.REMOVE);
sensor = {};
graphic = undefined;
}
else if(Object.keys(actuator).length != 0) {
actuator = {};
actComponent = undefined;
}
}
function discoverSensors() {
var serviceType = new ServiceType(sensor_types);
webinos.discovery.findServices(serviceType, {
onFound: function (service) {
sensorServices.push(service);
insertNewRowInTable(service.id,service.description);
},
onLost: function(service){
},
onError: function(error){
}
});
}
function discoverActuators() {
var serviceType = new ServiceType(actuator_types);
webinos.discovery.findServices(serviceType, {
onFound: function (service) {
actuatorServices.push(service);
insertNewRowInTable(service.id,service.description);
},
onLost: function(service){
},
onError: function(error){
}
});
}
function insertNewRowInTable(id, description){
var found = false;
var html = "";
html += '<tr id="'+id+'">';
html += '<td>'+description+'</td>';
for(var markerID in idObject_to_identify){
if(idObject_to_identify[markerID] == id){
html += '<td id="markerID_'+id+'">'+markerID+'</td>';
found = true;
break;
}
}
if(!found)
html += '<td id="markerID_'+id+'"></td>';
html += '<td><div id="remove_'+id+'"><img width="15px" height="15px" src="assets/img/x_min.png" style="float:right;"></div></td>';
html += '</tr>';
$("#example").append(html);
$("#"+id).on("click", function(){
if(toRemoveFromFile == operations.ADD){
var saID = this.id;
var markerID = $("#new_marker").text();
if(markerID.length != 0){
//IF THERE IS another marker with the same sensor ID, I have to remove this!
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == saID)
delete idObject_to_identify[i];
}
idObject_to_identify[markerID] = saID;
save_file(idObject_to_identify, file_name_aruco);
//var idElem = "#markerID_"+saID;
//$(idElem).text(markerID);
}
}
updateGUIMarkerMatching();
toRemoveFromFile = operations.ADD;
});
$("#remove_"+id).on("click", function(){
var splittingString = this.id.split("_");
var markerID;
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == splittingString[1]){
markerID = i;
}
}
delete idObject_to_identify[markerID];
save_file(idObject_to_identify, file_name_aruco);
toRemoveFromFile = operations.REMOVE;
});
}
function updateGUIMarkerMatching(){
//sensors
for(var j in sensorServices){
var isThereMarkerMatched = "";
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == sensorServices[j].id){
isThereMarkerMatched = i;
break;
}
}
var idElem = "#markerID_"+sensorServices[j].id;
$(idElem).text("");
$(idElem).text(isThereMarkerMatched);
}
//actuators
for(var j in actuatorServices){
var isThereMarkerMatched = "";
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == actuatorServices[j].id){
isThereMarkerMatched = i;
break;
}
}
var idElem = "#markerID_" + actuatorServices[j].id;
$(idElem).text("");
$(idElem).text(isThereMarkerMatched);
}
}
function listenSensor(staticRegistered){
if(staticRegistered !== undefined)
registered = staticRegistered;
if(!registered){
if(sensor.api.indexOf("temperature") !== -1 && graphic === undefined)
graphic = new Thermometer(sensor.id);
else if(graphic === undefined)
graphic = new LineChart(sensor.id);
sensor.addEventListener('sensor', eventListenerFunction, true);
registered = true;
$("#sensorButton").text("Stop");
}
else{
sensor.removeEventListener('sensor', eventListenerFunction, true);
registered = false;
$("#sensorButton").text("Start");
}
}
function showActuator(){
if(actuator.range[0].length == 2 && actComponent === undefined){
if(actuator.range[0][0] == 0 && actuator.range[0][1] == 1)
actComponent = new Switch(actuator.id, 0, 1);
else
actComponent = new Slider(actuator.id, actuator.range[0][0], actuator.range[0][1]);
}
else if(actComponent === undefined)
actComponent = new InputBox(actuator.id);
}
| {
$("#overlay").show();
if(Object.keys(sensor).length != 0) {
$("#sensorButton").show();
$("#serviceName").text(sensor.displayName);
$("#serviceDescription").text(sensor.description);
listenSensor();
}
else if(Object.keys(actuator).length != 0) {
$("#sensorButton").hide();
$("#serviceName").text(actuator.displayName);
$("#serviceDescription").text(actuator.description);
showActuator();
}
} | identifier_body |
main.js | var sensorServices = [];
var actuatorServices = [];
var sensor_types = "http://webinos.org/api/sensors/*";
var actuator_types = "http://webinos.org/api/actuators/*";
var idObject_to_identify = {};
var sensor = {};
var actuator = {};
var registered = false;
var toFind = true;
var toRemoveFromFile = false;
var operations = {
"ADD" : false,
"REMOVE" : true
}
//necessary for graph google component
google.load("visualization", "1", {packages:["corechart"]});
//onLoad is the ready function!
$(document).ready(onLoad);
//graphic to show on display
var graphic;
//actuator component to show on display
var actComponent;
var eventListenerFunction = function(event){
if(graphic.type == "thermometer"){
graphic.setVal(event.sensorValues[0]);
$("#content_chart").show();
}else if(graphic.type == "line-chart"){
var time=new Date(event.timestamp);
time=(time.getUTCHours()+2)+ ":"+time.getUTCMinutes()+":"+time.getUTCSeconds();
var arrayTMP = new Array();
arrayTMP[0] = time;
arrayTMP[1] = parseInt(event.sensorValues[0]);
var dimGraphData = graphic.graphData.addRow(arrayTMP);
graphic.numberOfValues++;
graphic.chart.draw(graphic.graphData, graphic.options);
if(graphic.numberOfValues>150){
graphic.graphData.removeRow(0);
}
$("#content_chart").show();
}else{
console.log("Error!");
}
$("#values").text(event.sensorValues[0]);
};
var video, canvas, context, imageData, detector;
function onLoad(){
video = document.getElementById("video");
canvas = document.getElementById("canvas");
context = canvas.getContext("2d");
canvas.width = parseInt(canvas.style.width);
canvas.height = parseInt(canvas.style.height);
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (navigator.getUserMedia){
function successCallback(stream){
if (window.webkitURL) {
video.src = window.webkitURL.createObjectURL(stream);
} else if (video.mozSrcObject !== undefined) {
video.mozSrcObject = stream;
} else {
video.src = stream;
}
}
function errorCallback(error){
}
navigator.getUserMedia({video: true}, successCallback, errorCallback);
detector = new AR.Detector();
requestAnimationFrame(tick);
discoverFileSystem();
//discoverActuators();
//discoverSensors();
$("#back").text("Back");
$("#back").on("click", function(){
hideOverlay();
});
$("#sensorButton").on("click", function(){
listenSensor();
});
}
}
function tick(){
requestAnimationFrame(tick);
if (video.readyState === video.HAVE_ENOUGH_DATA){
snapshot();
var markers = detector.detect(imageData);
read(markers);
}
}
function snapshot(){
context.drawImage(video, 0, 0, canvas.width, canvas.height);
imageData = context.getImageData(0, 0, canvas.width, canvas.height);
}
function read(marker){
for(var i = 0; i !== marker.length; ++i){
var id = idObject_to_identify[marker[i].id];
//update grafich
$("#new_marker").text(marker[i].id);
if( id === undefined){
//console.log("Error! This marker is not associated to any sensor/actuator");
drawCorners(marker);
drawId(marker);
}
else{
//console.log("Marker correctly readed");
if(Object.keys(sensor).length == 0 && toFind && Object.keys(actuator).length == 0)
find(id);
}
}
}
function drawCorners(markers){
var corners, corner, i, j;
context.lineWidth = 3;
for (i = 0; i !== markers.length; ++ i){
corners = markers[i].corners;
context.strokeStyle = "red";
context.beginPath();
for (j = 0; j !== corners.length; ++ j){
corner = corners[j];
context.moveTo(corner.x, corner.y);
corner = corners[(j + 1) % corners.length];
context.lineTo(corner.x, corner.y);
}
context.stroke();
context.closePath();
context.strokeStyle = "green";
context.strokeRect(corners[0].x - 2, corners[0].y - 2, 4, 4);
}
}
function drawId(markers){
var corners, corner, x, y, i, j;
context.strokeStyle = "blue";
context.lineWidth = 1;
for (i = 0; i !== markers.length; ++ i){
corners = markers[i].corners;
x = Infinity;
y = Infinity;
for (j = 0; j !== corners.length; ++ j){
corner = corners[j];
x = Math.min(x, corner.x);
y = Math.min(y, corner.y);
}
context.strokeText(markers[i].id, x, y)
}
}
function find(id){
toFind = false;
//sensor
for (var i = 0; i < sensorServices.length; i++) |
//actuator
for (i = 0; i < actuatorServices.length; i++) {
if (actuatorServices[i].id == id) {
actuatorServices[i].bind({onBind:function(){
actuator = actuatorServices[i];
toFind = true;
showOverlay();
}
});
return;
}
}
//DA VERIFICAREEEEEEEEEEEEE - TODO
//toFind = true;
}
function showOverlay() {
$("#overlay").show();
if(Object.keys(sensor).length != 0) {
$("#sensorButton").show();
$("#serviceName").text(sensor.displayName);
$("#serviceDescription").text(sensor.description);
listenSensor();
}
else if(Object.keys(actuator).length != 0) {
$("#sensorButton").hide();
$("#serviceName").text(actuator.displayName);
$("#serviceDescription").text(actuator.description);
showActuator();
}
}
function hideOverlay() {
$("#overlay").hide();
$("#content_chart").empty();
$("#content_actuator").empty();
$("#values").empty();
if(Object.keys(sensor).length != 0) {
listenSensor(operations.REMOVE);
sensor = {};
graphic = undefined;
}
else if(Object.keys(actuator).length != 0) {
actuator = {};
actComponent = undefined;
}
}
function discoverSensors() {
var serviceType = new ServiceType(sensor_types);
webinos.discovery.findServices(serviceType, {
onFound: function (service) {
sensorServices.push(service);
insertNewRowInTable(service.id,service.description);
},
onLost: function(service){
},
onError: function(error){
}
});
}
function discoverActuators() {
var serviceType = new ServiceType(actuator_types);
webinos.discovery.findServices(serviceType, {
onFound: function (service) {
actuatorServices.push(service);
insertNewRowInTable(service.id,service.description);
},
onLost: function(service){
},
onError: function(error){
}
});
}
function insertNewRowInTable(id, description){
var found = false;
var html = "";
html += '<tr id="'+id+'">';
html += '<td>'+description+'</td>';
for(var markerID in idObject_to_identify){
if(idObject_to_identify[markerID] == id){
html += '<td id="markerID_'+id+'">'+markerID+'</td>';
found = true;
break;
}
}
if(!found)
html += '<td id="markerID_'+id+'"></td>';
html += '<td><div id="remove_'+id+'"><img width="15px" height="15px" src="assets/img/x_min.png" style="float:right;"></div></td>';
html += '</tr>';
$("#example").append(html);
$("#"+id).on("click", function(){
if(toRemoveFromFile == operations.ADD){
var saID = this.id;
var markerID = $("#new_marker").text();
if(markerID.length != 0){
//IF THERE IS another marker with the same sensor ID, I have to remove this!
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == saID)
delete idObject_to_identify[i];
}
idObject_to_identify[markerID] = saID;
save_file(idObject_to_identify, file_name_aruco);
//var idElem = "#markerID_"+saID;
//$(idElem).text(markerID);
}
}
updateGUIMarkerMatching();
toRemoveFromFile = operations.ADD;
});
$("#remove_"+id).on("click", function(){
var splittingString = this.id.split("_");
var markerID;
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == splittingString[1]){
markerID = i;
}
}
delete idObject_to_identify[markerID];
save_file(idObject_to_identify, file_name_aruco);
toRemoveFromFile = operations.REMOVE;
});
}
function updateGUIMarkerMatching(){
//sensors
for(var j in sensorServices){
var isThereMarkerMatched = "";
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == sensorServices[j].id){
isThereMarkerMatched = i;
break;
}
}
var idElem = "#markerID_"+sensorServices[j].id;
$(idElem).text("");
$(idElem).text(isThereMarkerMatched);
}
//actuators
for(var j in actuatorServices){
var isThereMarkerMatched = "";
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == actuatorServices[j].id){
isThereMarkerMatched = i;
break;
}
}
var idElem = "#markerID_" + actuatorServices[j].id;
$(idElem).text("");
$(idElem).text(isThereMarkerMatched);
}
}
function listenSensor(staticRegistered){
if(staticRegistered !== undefined)
registered = staticRegistered;
if(!registered){
if(sensor.api.indexOf("temperature") !== -1 && graphic === undefined)
graphic = new Thermometer(sensor.id);
else if(graphic === undefined)
graphic = new LineChart(sensor.id);
sensor.addEventListener('sensor', eventListenerFunction, true);
registered = true;
$("#sensorButton").text("Stop");
}
else{
sensor.removeEventListener('sensor', eventListenerFunction, true);
registered = false;
$("#sensorButton").text("Start");
}
}
function showActuator(){
if(actuator.range[0].length == 2 && actComponent === undefined){
if(actuator.range[0][0] == 0 && actuator.range[0][1] == 1)
actComponent = new Switch(actuator.id, 0, 1);
else
actComponent = new Slider(actuator.id, actuator.range[0][0], actuator.range[0][1]);
}
else if(actComponent === undefined)
actComponent = new InputBox(actuator.id);
}
| {
if(sensorServices[i].id == id){
sensorServices[i].bind({onBind:function(){
sensorServices[i].configureSensor({rate: 1000, eventFireMode: "fixedinterval"},
function(){
sensor = sensorServices[i];
toFind = true;
showOverlay();
},
function (){
sensor = undefined;
console.error('Error configuring Sensor ' + service.api);
});
}
});
return;
}
} | conditional_block |
main.js | var sensorServices = [];
var actuatorServices = [];
var sensor_types = "http://webinos.org/api/sensors/*";
var actuator_types = "http://webinos.org/api/actuators/*";
var idObject_to_identify = {};
var sensor = {};
var actuator = {};
var registered = false;
var toFind = true;
var toRemoveFromFile = false;
var operations = {
"ADD" : false,
"REMOVE" : true
}
//necessary for graph google component
google.load("visualization", "1", {packages:["corechart"]});
//onLoad is the ready function!
$(document).ready(onLoad);
//graphic to show on display
var graphic;
//actuator component to show on display
var actComponent;
var eventListenerFunction = function(event){
if(graphic.type == "thermometer"){
graphic.setVal(event.sensorValues[0]);
$("#content_chart").show();
}else if(graphic.type == "line-chart"){
var time=new Date(event.timestamp);
time=(time.getUTCHours()+2)+ ":"+time.getUTCMinutes()+":"+time.getUTCSeconds();
var arrayTMP = new Array();
arrayTMP[0] = time;
arrayTMP[1] = parseInt(event.sensorValues[0]);
var dimGraphData = graphic.graphData.addRow(arrayTMP);
graphic.numberOfValues++;
graphic.chart.draw(graphic.graphData, graphic.options);
if(graphic.numberOfValues>150){
graphic.graphData.removeRow(0);
}
$("#content_chart").show();
}else{
console.log("Error!");
}
$("#values").text(event.sensorValues[0]);
};
var video, canvas, context, imageData, detector;
function onLoad(){
video = document.getElementById("video");
canvas = document.getElementById("canvas");
context = canvas.getContext("2d");
canvas.width = parseInt(canvas.style.width);
canvas.height = parseInt(canvas.style.height);
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (navigator.getUserMedia){
function successCallback(stream){
if (window.webkitURL) {
video.src = window.webkitURL.createObjectURL(stream);
} else if (video.mozSrcObject !== undefined) {
video.mozSrcObject = stream;
} else {
video.src = stream;
}
}
function errorCallback(error){
}
navigator.getUserMedia({video: true}, successCallback, errorCallback);
detector = new AR.Detector();
requestAnimationFrame(tick);
discoverFileSystem();
//discoverActuators();
//discoverSensors();
$("#back").text("Back");
$("#back").on("click", function(){
hideOverlay();
});
$("#sensorButton").on("click", function(){
listenSensor();
});
}
}
function tick(){
requestAnimationFrame(tick);
if (video.readyState === video.HAVE_ENOUGH_DATA){
snapshot();
var markers = detector.detect(imageData);
read(markers);
}
}
function snapshot(){
context.drawImage(video, 0, 0, canvas.width, canvas.height);
imageData = context.getImageData(0, 0, canvas.width, canvas.height);
}
function read(marker){
for(var i = 0; i !== marker.length; ++i){
var id = idObject_to_identify[marker[i].id];
//update grafich
$("#new_marker").text(marker[i].id);
if( id === undefined){
//console.log("Error! This marker is not associated to any sensor/actuator");
drawCorners(marker);
drawId(marker);
}
else{
//console.log("Marker correctly readed");
if(Object.keys(sensor).length == 0 && toFind && Object.keys(actuator).length == 0)
find(id);
}
}
}
function drawCorners(markers){
var corners, corner, i, j;
context.lineWidth = 3;
for (i = 0; i !== markers.length; ++ i){
corners = markers[i].corners;
context.strokeStyle = "red";
context.beginPath();
for (j = 0; j !== corners.length; ++ j){
corner = corners[j];
context.moveTo(corner.x, corner.y);
corner = corners[(j + 1) % corners.length];
context.lineTo(corner.x, corner.y);
}
context.stroke();
context.closePath();
context.strokeStyle = "green";
context.strokeRect(corners[0].x - 2, corners[0].y - 2, 4, 4);
}
}
function drawId(markers){
var corners, corner, x, y, i, j;
context.strokeStyle = "blue";
context.lineWidth = 1;
for (i = 0; i !== markers.length; ++ i){
corners = markers[i].corners;
x = Infinity;
y = Infinity;
for (j = 0; j !== corners.length; ++ j){
corner = corners[j];
x = Math.min(x, corner.x);
y = Math.min(y, corner.y);
}
context.strokeText(markers[i].id, x, y)
}
}
function find(id){
toFind = false;
//sensor
for (var i = 0; i < sensorServices.length; i++) {
if(sensorServices[i].id == id){
sensorServices[i].bind({onBind:function(){
sensorServices[i].configureSensor({rate: 1000, eventFireMode: "fixedinterval"},
function(){
sensor = sensorServices[i];
toFind = true;
showOverlay();
},
function (){
sensor = undefined;
console.error('Error configuring Sensor ' + service.api);
});
}
});
return;
}
}
//actuator
for (i = 0; i < actuatorServices.length; i++) {
if (actuatorServices[i].id == id) {
actuatorServices[i].bind({onBind:function(){
actuator = actuatorServices[i];
toFind = true;
showOverlay();
}
}); | //toFind = true;
}
function showOverlay() {
$("#overlay").show();
if(Object.keys(sensor).length != 0) {
$("#sensorButton").show();
$("#serviceName").text(sensor.displayName);
$("#serviceDescription").text(sensor.description);
listenSensor();
}
else if(Object.keys(actuator).length != 0) {
$("#sensorButton").hide();
$("#serviceName").text(actuator.displayName);
$("#serviceDescription").text(actuator.description);
showActuator();
}
}
function hideOverlay() {
$("#overlay").hide();
$("#content_chart").empty();
$("#content_actuator").empty();
$("#values").empty();
if(Object.keys(sensor).length != 0) {
listenSensor(operations.REMOVE);
sensor = {};
graphic = undefined;
}
else if(Object.keys(actuator).length != 0) {
actuator = {};
actComponent = undefined;
}
}
function discoverSensors() {
var serviceType = new ServiceType(sensor_types);
webinos.discovery.findServices(serviceType, {
onFound: function (service) {
sensorServices.push(service);
insertNewRowInTable(service.id,service.description);
},
onLost: function(service){
},
onError: function(error){
}
});
}
function discoverActuators() {
var serviceType = new ServiceType(actuator_types);
webinos.discovery.findServices(serviceType, {
onFound: function (service) {
actuatorServices.push(service);
insertNewRowInTable(service.id,service.description);
},
onLost: function(service){
},
onError: function(error){
}
});
}
function insertNewRowInTable(id, description){
var found = false;
var html = "";
html += '<tr id="'+id+'">';
html += '<td>'+description+'</td>';
for(var markerID in idObject_to_identify){
if(idObject_to_identify[markerID] == id){
html += '<td id="markerID_'+id+'">'+markerID+'</td>';
found = true;
break;
}
}
if(!found)
html += '<td id="markerID_'+id+'"></td>';
html += '<td><div id="remove_'+id+'"><img width="15px" height="15px" src="assets/img/x_min.png" style="float:right;"></div></td>';
html += '</tr>';
$("#example").append(html);
$("#"+id).on("click", function(){
if(toRemoveFromFile == operations.ADD){
var saID = this.id;
var markerID = $("#new_marker").text();
if(markerID.length != 0){
//IF THERE IS another marker with the same sensor ID, I have to remove this!
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == saID)
delete idObject_to_identify[i];
}
idObject_to_identify[markerID] = saID;
save_file(idObject_to_identify, file_name_aruco);
//var idElem = "#markerID_"+saID;
//$(idElem).text(markerID);
}
}
updateGUIMarkerMatching();
toRemoveFromFile = operations.ADD;
});
$("#remove_"+id).on("click", function(){
var splittingString = this.id.split("_");
var markerID;
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == splittingString[1]){
markerID = i;
}
}
delete idObject_to_identify[markerID];
save_file(idObject_to_identify, file_name_aruco);
toRemoveFromFile = operations.REMOVE;
});
}
function updateGUIMarkerMatching(){
//sensors
for(var j in sensorServices){
var isThereMarkerMatched = "";
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == sensorServices[j].id){
isThereMarkerMatched = i;
break;
}
}
var idElem = "#markerID_"+sensorServices[j].id;
$(idElem).text("");
$(idElem).text(isThereMarkerMatched);
}
//actuators
for(var j in actuatorServices){
var isThereMarkerMatched = "";
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == actuatorServices[j].id){
isThereMarkerMatched = i;
break;
}
}
var idElem = "#markerID_" + actuatorServices[j].id;
$(idElem).text("");
$(idElem).text(isThereMarkerMatched);
}
}
function listenSensor(staticRegistered){
if(staticRegistered !== undefined)
registered = staticRegistered;
if(!registered){
if(sensor.api.indexOf("temperature") !== -1 && graphic === undefined)
graphic = new Thermometer(sensor.id);
else if(graphic === undefined)
graphic = new LineChart(sensor.id);
sensor.addEventListener('sensor', eventListenerFunction, true);
registered = true;
$("#sensorButton").text("Stop");
}
else{
sensor.removeEventListener('sensor', eventListenerFunction, true);
registered = false;
$("#sensorButton").text("Start");
}
}
function showActuator(){
if(actuator.range[0].length == 2 && actComponent === undefined){
if(actuator.range[0][0] == 0 && actuator.range[0][1] == 1)
actComponent = new Switch(actuator.id, 0, 1);
else
actComponent = new Slider(actuator.id, actuator.range[0][0], actuator.range[0][1]);
}
else if(actComponent === undefined)
actComponent = new InputBox(actuator.id);
} | return;
}
}
//DA VERIFICAREEEEEEEEEEEEE - TODO | random_line_split |
main.js | var sensorServices = [];
var actuatorServices = [];
var sensor_types = "http://webinos.org/api/sensors/*";
var actuator_types = "http://webinos.org/api/actuators/*";
var idObject_to_identify = {};
var sensor = {};
var actuator = {};
var registered = false;
var toFind = true;
var toRemoveFromFile = false;
var operations = {
"ADD" : false,
"REMOVE" : true
}
//necessary for graph google component
google.load("visualization", "1", {packages:["corechart"]});
//onLoad is the ready function!
$(document).ready(onLoad);
//graphic to show on display
var graphic;
//actuator component to show on display
var actComponent;
var eventListenerFunction = function(event){
if(graphic.type == "thermometer"){
graphic.setVal(event.sensorValues[0]);
$("#content_chart").show();
}else if(graphic.type == "line-chart"){
var time=new Date(event.timestamp);
time=(time.getUTCHours()+2)+ ":"+time.getUTCMinutes()+":"+time.getUTCSeconds();
var arrayTMP = new Array();
arrayTMP[0] = time;
arrayTMP[1] = parseInt(event.sensorValues[0]);
var dimGraphData = graphic.graphData.addRow(arrayTMP);
graphic.numberOfValues++;
graphic.chart.draw(graphic.graphData, graphic.options);
if(graphic.numberOfValues>150){
graphic.graphData.removeRow(0);
}
$("#content_chart").show();
}else{
console.log("Error!");
}
$("#values").text(event.sensorValues[0]);
};
var video, canvas, context, imageData, detector;
function onLoad(){
video = document.getElementById("video");
canvas = document.getElementById("canvas");
context = canvas.getContext("2d");
canvas.width = parseInt(canvas.style.width);
canvas.height = parseInt(canvas.style.height);
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
if (navigator.getUserMedia){
function successCallback(stream){
if (window.webkitURL) {
video.src = window.webkitURL.createObjectURL(stream);
} else if (video.mozSrcObject !== undefined) {
video.mozSrcObject = stream;
} else {
video.src = stream;
}
}
function | (error){
}
navigator.getUserMedia({video: true}, successCallback, errorCallback);
detector = new AR.Detector();
requestAnimationFrame(tick);
discoverFileSystem();
//discoverActuators();
//discoverSensors();
$("#back").text("Back");
$("#back").on("click", function(){
hideOverlay();
});
$("#sensorButton").on("click", function(){
listenSensor();
});
}
}
function tick(){
requestAnimationFrame(tick);
if (video.readyState === video.HAVE_ENOUGH_DATA){
snapshot();
var markers = detector.detect(imageData);
read(markers);
}
}
function snapshot(){
context.drawImage(video, 0, 0, canvas.width, canvas.height);
imageData = context.getImageData(0, 0, canvas.width, canvas.height);
}
function read(marker){
for(var i = 0; i !== marker.length; ++i){
var id = idObject_to_identify[marker[i].id];
//update grafich
$("#new_marker").text(marker[i].id);
if( id === undefined){
//console.log("Error! This marker is not associated to any sensor/actuator");
drawCorners(marker);
drawId(marker);
}
else{
//console.log("Marker correctly readed");
if(Object.keys(sensor).length == 0 && toFind && Object.keys(actuator).length == 0)
find(id);
}
}
}
function drawCorners(markers){
var corners, corner, i, j;
context.lineWidth = 3;
for (i = 0; i !== markers.length; ++ i){
corners = markers[i].corners;
context.strokeStyle = "red";
context.beginPath();
for (j = 0; j !== corners.length; ++ j){
corner = corners[j];
context.moveTo(corner.x, corner.y);
corner = corners[(j + 1) % corners.length];
context.lineTo(corner.x, corner.y);
}
context.stroke();
context.closePath();
context.strokeStyle = "green";
context.strokeRect(corners[0].x - 2, corners[0].y - 2, 4, 4);
}
}
function drawId(markers){
var corners, corner, x, y, i, j;
context.strokeStyle = "blue";
context.lineWidth = 1;
for (i = 0; i !== markers.length; ++ i){
corners = markers[i].corners;
x = Infinity;
y = Infinity;
for (j = 0; j !== corners.length; ++ j){
corner = corners[j];
x = Math.min(x, corner.x);
y = Math.min(y, corner.y);
}
context.strokeText(markers[i].id, x, y)
}
}
function find(id){
toFind = false;
//sensor
for (var i = 0; i < sensorServices.length; i++) {
if(sensorServices[i].id == id){
sensorServices[i].bind({onBind:function(){
sensorServices[i].configureSensor({rate: 1000, eventFireMode: "fixedinterval"},
function(){
sensor = sensorServices[i];
toFind = true;
showOverlay();
},
function (){
sensor = undefined;
console.error('Error configuring Sensor ' + service.api);
});
}
});
return;
}
}
//actuator
for (i = 0; i < actuatorServices.length; i++) {
if (actuatorServices[i].id == id) {
actuatorServices[i].bind({onBind:function(){
actuator = actuatorServices[i];
toFind = true;
showOverlay();
}
});
return;
}
}
//DA VERIFICAREEEEEEEEEEEEE - TODO
//toFind = true;
}
function showOverlay() {
$("#overlay").show();
if(Object.keys(sensor).length != 0) {
$("#sensorButton").show();
$("#serviceName").text(sensor.displayName);
$("#serviceDescription").text(sensor.description);
listenSensor();
}
else if(Object.keys(actuator).length != 0) {
$("#sensorButton").hide();
$("#serviceName").text(actuator.displayName);
$("#serviceDescription").text(actuator.description);
showActuator();
}
}
function hideOverlay() {
$("#overlay").hide();
$("#content_chart").empty();
$("#content_actuator").empty();
$("#values").empty();
if(Object.keys(sensor).length != 0) {
listenSensor(operations.REMOVE);
sensor = {};
graphic = undefined;
}
else if(Object.keys(actuator).length != 0) {
actuator = {};
actComponent = undefined;
}
}
function discoverSensors() {
var serviceType = new ServiceType(sensor_types);
webinos.discovery.findServices(serviceType, {
onFound: function (service) {
sensorServices.push(service);
insertNewRowInTable(service.id,service.description);
},
onLost: function(service){
},
onError: function(error){
}
});
}
function discoverActuators() {
var serviceType = new ServiceType(actuator_types);
webinos.discovery.findServices(serviceType, {
onFound: function (service) {
actuatorServices.push(service);
insertNewRowInTable(service.id,service.description);
},
onLost: function(service){
},
onError: function(error){
}
});
}
function insertNewRowInTable(id, description){
var found = false;
var html = "";
html += '<tr id="'+id+'">';
html += '<td>'+description+'</td>';
for(var markerID in idObject_to_identify){
if(idObject_to_identify[markerID] == id){
html += '<td id="markerID_'+id+'">'+markerID+'</td>';
found = true;
break;
}
}
if(!found)
html += '<td id="markerID_'+id+'"></td>';
html += '<td><div id="remove_'+id+'"><img width="15px" height="15px" src="assets/img/x_min.png" style="float:right;"></div></td>';
html += '</tr>';
$("#example").append(html);
$("#"+id).on("click", function(){
if(toRemoveFromFile == operations.ADD){
var saID = this.id;
var markerID = $("#new_marker").text();
if(markerID.length != 0){
//IF THERE IS another marker with the same sensor ID, I have to remove this!
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == saID)
delete idObject_to_identify[i];
}
idObject_to_identify[markerID] = saID;
save_file(idObject_to_identify, file_name_aruco);
//var idElem = "#markerID_"+saID;
//$(idElem).text(markerID);
}
}
updateGUIMarkerMatching();
toRemoveFromFile = operations.ADD;
});
$("#remove_"+id).on("click", function(){
var splittingString = this.id.split("_");
var markerID;
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == splittingString[1]){
markerID = i;
}
}
delete idObject_to_identify[markerID];
save_file(idObject_to_identify, file_name_aruco);
toRemoveFromFile = operations.REMOVE;
});
}
function updateGUIMarkerMatching(){
//sensors
for(var j in sensorServices){
var isThereMarkerMatched = "";
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == sensorServices[j].id){
isThereMarkerMatched = i;
break;
}
}
var idElem = "#markerID_"+sensorServices[j].id;
$(idElem).text("");
$(idElem).text(isThereMarkerMatched);
}
//actuators
for(var j in actuatorServices){
var isThereMarkerMatched = "";
for(var i in idObject_to_identify){
if(idObject_to_identify[i] == actuatorServices[j].id){
isThereMarkerMatched = i;
break;
}
}
var idElem = "#markerID_" + actuatorServices[j].id;
$(idElem).text("");
$(idElem).text(isThereMarkerMatched);
}
}
function listenSensor(staticRegistered){
if(staticRegistered !== undefined)
registered = staticRegistered;
if(!registered){
if(sensor.api.indexOf("temperature") !== -1 && graphic === undefined)
graphic = new Thermometer(sensor.id);
else if(graphic === undefined)
graphic = new LineChart(sensor.id);
sensor.addEventListener('sensor', eventListenerFunction, true);
registered = true;
$("#sensorButton").text("Stop");
}
else{
sensor.removeEventListener('sensor', eventListenerFunction, true);
registered = false;
$("#sensorButton").text("Start");
}
}
function showActuator(){
if(actuator.range[0].length == 2 && actComponent === undefined){
if(actuator.range[0][0] == 0 && actuator.range[0][1] == 1)
actComponent = new Switch(actuator.id, 0, 1);
else
actComponent = new Slider(actuator.id, actuator.range[0][0], actuator.range[0][1]);
}
else if(actComponent === undefined)
actComponent = new InputBox(actuator.id);
}
| errorCallback | identifier_name |
scheduler.rs | //! Scheduler is responsible for allocating containers on cluster nodes according to the currently
//! submitted jobs and concurrency levels. It is only used on the master node.
use crate::executor::*;
use actix::fut::wrap_future;
use actix::prelude::*;
use actix::registry::SystemService;
use actix::spawn;
use actix_web::{client, HttpMessage};
use failure::{err_msg, Error};
use futures::future::{join_all, Future};
use rand::distributions::WeightedIndex;
use rand::prelude::*;
use serde_json;
use shiplift::builder::{ContainerOptions, ContainerOptionsBuilder};
use std::collections::HashMap;
use std::fs;
use std::net::{SocketAddr, ToSocketAddrs};
use std::path::PathBuf;
use std::time::Duration;
use uuid::Uuid;
pub type AllocationId = String;
pub type NodeId = String;
pub type JobId = String;
pub type ServiceName = String;
const RESOURCE_REFRESH_INTERVAL: Duration = Duration::from_secs(5);
/// A job specification (in docker-compose format)
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct JobSpec {
pub services: HashMap<String, ServiceSpec>,
}
/// A service element within the job spec
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct ServiceSpec {
pub image: String,
pub command: Option<String>,
pub entrypoint: Option<String>,
#[serde(default)]
pub ports: Vec<String>,
#[serde(default)]
pub volumes: Vec<String>,
#[serde(default)]
pub environment: Vec<String>,
}
impl ServiceSpec {
/// Create ContainerOptions based on this service spec
pub fn build_container_options(&self) -> Result<ContainerOptionsBuilder, Error> {
let mut opt = ContainerOptions::builder(&*self.image);
opt.volumes(self.volumes.iter().map(|i| &**i).collect())
.env(self.environment.iter().map(|i| &**i).collect());
if let Some(cmd) = &self.command {
opt.cmd(vec![&*cmd]);
}
if let Some(entrypoint) = &self.entrypoint {
opt.entrypoint(entrypoint);
}
for port in &self.ports {
let mut port = port.split(':');
let host_port = port.next().unwrap().parse()?;
let container_port = port.next().unwrap().parse()?;
opt.expose(container_port, "tcp", host_port);
}
Ok(opt)
}
}
/// Describes the state of the cluster including all jobs and nodes.
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)]
pub struct ClusterState {
pub jobs: HashMap<JobId, JobSpec>,
pub services: HashMap<JobId, HashMap<ServiceName, ServiceConfig>>,
pub nodes: HashMap<NodeId, Node>,
pub allocations: HashMap<AllocationId, Allocation>,
pub master_node: Option<NodeId>,
}
impl ClusterState {
/// Get a reference to the current master node
pub fn master_node(&self) -> Option<&Node> {
match &self.master_node {
Some(master_id) => Some(&self.nodes[master_id]),
None => None,
}
}
}
/// Element of cluster state assignming a job's task to a node.
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Allocation {
pub allocation_id: AllocationId,
pub node_id: NodeId,
pub job_id: JobId,
pub service_name: ServiceName,
}
/// Runtime configuration of job services (including concurrency level)
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct ServiceConfig {
pub scale: usize,
}
/// Element of cluster state used to describe a member of the cluster.
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Node {
pub node_id: NodeId,
pub cluster_address: SocketAddr,
}
impl Node {
pub fn new<S, T>(node_id: T, cluster_address: S) -> Node
where
S: ToSocketAddrs,
T: Into<NodeId>,
{
Node {
cluster_address: cluster_address.to_socket_addrs().unwrap().next().unwrap(),
node_id: node_id.into(),
}
}
}
/// Updates the cluster state to match requested jobs / concurrency levels.
#[derive(Default)]
pub struct Scheduler {
state: ClusterState,
node_resources: HashMap<NodeId, NodeResources>,
state_path: Option<PathBuf>,
}
impl Scheduler {
/// Set this node as master in an empty cluster.
fn bootstrap(&mut self, node: Node) -> Result<(), Error> {
if self.state.master_node.is_some() || !self.state.nodes.is_empty() {
return Err(err_msg("Cannot bootstrap a cluster with existing nodes."));
}
info!("Bootstrapping cluster as node {}", node.node_id);
self.state.master_node = Some(node.node_id.clone());
self.state.nodes.insert(node.node_id.clone(), node);
Executor::from_registry().do_send(ExecutorCommand::UpdateState(self.state.clone()));
Ok(())
}
/// Add / remove allocations based on current jobs and concurrency requirements.
fn update_schedule(&mut self) {
// Build a map of service to existing allocations
let mut service_allocs: HashMap<_, Vec<AllocationId>> = HashMap::new();
for alloc in self.state.allocations.values() {
service_allocs
.entry((&alloc.job_id, &alloc.service_name))
.or_default()
.push(alloc.allocation_id.clone());
}
// Put the changes we need to make here, since we can't modify self.state.allocations while
// borrowed
let mut to_remove = Vec::new();
let mut to_add = Vec::new();
// Used for weighted random node selection
let nodes: Vec<_> = self.state.nodes.keys().collect();
let node_index = WeightedIndex::new(nodes.iter().map(|id| {
self.node_resources
.get(*id)
.map(|resources| resources.total_memory - resources.used_memory)
.unwrap_or(1)
}))
.unwrap();
// Compare the existing allocations with the desired concurrency of each service
for (job_id, job_services) in &self.state.services {
for (service_name, service) in job_services {
let existing = service_allocs
.remove(&(&job_id, &service_name))
.unwrap_or_default();
let diff = service.scale as isize - existing.len() as isize;
debug!("Scheduling {}.{} -> {}", job_id, service_name, diff);
if diff > 0 {
// Create new allocations
for node in node_index
.sample_iter(&mut thread_rng())
.take(diff as usize)
{
to_add.push(Allocation {
allocation_id: Uuid::new_v4().to_hyphenated().to_string(),
node_id: nodes[node].clone(),
job_id: job_id.clone(),
service_name: service_name.clone(),
});
}
} else {
to_remove.extend(existing.iter().take(diff.abs() as usize).cloned());
}
}
}
// Remove any allocations that don't correspond to any service
for allocs in service_allocs.values() {
to_remove.extend(allocs.iter().cloned());
}
// Now we drop the index service_allocs and we can mutate the state
for alloc_id in to_remove {
self.state.allocations.remove(&alloc_id);
}
for alloc in to_add.drain(..) {
self.state
.allocations
.insert(alloc.allocation_id.clone(), alloc);
}
self.save_state();
spawn(
self.update_nodes()
.then(|res| check_err("Update nodes", res)),
);
}
/// Send the latest state to each node.
fn update_nodes(&self) -> impl Future<Item = (), Error = Error> {
let update_fut: Vec<_> = self
.state
.nodes
.values()
.map(|node| {
client::post(format!("http://{}/node/state", node.cluster_address))
.json(&self.state)
.unwrap()
.send()
})
.collect();
join_all(update_fut)
.from_err()
.map(|results| info!("Sent updated state to {} node(s)", results.len()))
}
fn load_state(&mut self) -> Result<(), Error> {
if let Some(path) = &self.state_path {
info!("Loading state from: {:?}", path);
let raw_state = fs::File::open(path)?;
self.state = serde_json::from_reader(raw_state)?;
self.update_schedule();
}
Ok(())
}
fn save_state(&mut self) {
if let Some(path) = &self.state_path {
info!("Saving state to: {:?}", path);
match serde_json::to_string(&self.state) {
Ok(serialized) => match fs::write(path, serialized) {
Ok(_) => {}
Err(e) => error!("Failed to write state: {:?}", e),
},
Err(e) => error!("Failed to serialize state: {:?}", e),
}
}
}
}
impl Actor for Scheduler {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
// Poll node resource usage (so we don't need to request it each time we reschedule)
ctx.run_interval(RESOURCE_REFRESH_INTERVAL, |_, ctx| {
let update_fut = wrap_future::<_, Self>(ctx.address().send(GetClusterResources))
.map_err(|e, _, _| error!("Failed request resource refresh: {:?}", e))
.map(|res, scheduler, _| match res {
Ok(res) => scheduler.node_resources = res,
Err(e) => error!("Failed to refresh node resources: {:?}", e),
});
ctx.spawn(update_fut);
});
}
}
impl Supervised for Scheduler {}
impl SystemService for Scheduler {}
/// Fire-and-forget type commands for the scheduler
#[derive(Clone, Debug)]
pub enum SchedulerCommand {
CreateJob(JobId, JobSpec),
DeleteJob(JobId),
UpdateService(JobId, ServiceName, ServiceConfig),
BootstrapNode(Node),
RegisterNode(Node),
SetStatePath(PathBuf),
}
impl Message for SchedulerCommand {
type Result = Result<(), Error>;
}
impl Handler<SchedulerCommand> for Scheduler {
type Result = Result<(), Error>;
fn handle(&mut self, cmd: SchedulerCommand, _: &mut Context<Self>) -> Self::Result {
debug!("Scheduler handling command: {:?}", cmd);
match cmd {
SchedulerCommand::CreateJob(job_id, job) => {
job.services.keys().for_each(|service_name| {
self.state
.services
.entry(job_id.clone())
.or_default()
.insert(service_name.clone(), ServiceConfig { scale: 1 });
});
self.state.jobs.insert(job_id, job);
self.update_schedule();
Ok(())
}
SchedulerCommand::UpdateService(job_id, service_name, service_config) => {
let result = self
.state
.services
.get_mut(&job_id)
.and_then(|services| services.get_mut(&service_name))
.map(|service| {
*service = service_config;
{}
})
.ok_or_else(|| err_msg("Error does not exist"));
self.update_schedule();
result
}
SchedulerCommand::DeleteJob(job_id) => {
self.state.jobs.remove(&job_id);
self.state.services.remove(&job_id);
self.update_schedule();
Ok(())
}
SchedulerCommand::BootstrapNode(node) => self.bootstrap(node),
SchedulerCommand::RegisterNode(node) => {
self.state.nodes.insert(node.node_id.clone(), node);
spawn(
self.update_nodes()
.map_err(|e| error!("Failed to update new node: {}", e)),
);
Ok(())
}
SchedulerCommand::SetStatePath(path) => {
self.state_path = Some(path);
self.load_state()
}
}
}
}
/// Message type for requesting resource usage of all nodes
pub struct GetClusterResources;
impl Message for GetClusterResources {
type Result = Result<HashMap<String, NodeResources>, Error>;
}
impl Handler<GetClusterResources> for Scheduler {
type Result = ResponseFuture<HashMap<String, NodeResources>, Error>;
fn handle(&mut self, _: GetClusterResources, _: &mut Context<Self>) -> Self::Result {
let node_queries: Vec<_> = self
.state
.nodes
.values()
.map(|node| {
let node_id = node.node_id.clone();
client::get(format!("http://{}/node/resources", node.cluster_address))
.finish()
.unwrap()
.send()
.map_err(Error::from)
.and_then(|res| res.json().from_err())
.then(move |res| {
Ok::<_, Error>(match res {
Ok(ok) => Some((node_id, ok)),
Err(_) => None,
})
})
})
.collect();
Box::new(
join_all(node_queries).map(|mut res| res.drain(..).filter_map(|res| res).collect()),
)
}
}
/// Message type for requesting the current list of jobs
pub struct ListJobs;
impl Message for ListJobs {
type Result = Result<HashMap<String, JobSpec>, Error>;
}
impl Handler<ListJobs> for Scheduler {
type Result = Result<HashMap<String, JobSpec>, Error>;
fn handle(&mut self, _: ListJobs, _: &mut Context<Self>) -> Self::Result {
Ok(self.state.jobs.clone())
}
}
/// Messsage requesting a list of allocations
pub struct ListAllocations;
impl Message for ListAllocations {
type Result = Result<Vec<Allocation>, Error>;
}
impl Handler<ListAllocations> for Scheduler {
type Result = Result<Vec<Allocation>, Error>;
fn handle(&mut self, _: ListAllocations, _: &mut Context<Self>) -> Self::Result {
Ok(self.state.allocations.values().cloned().collect())
}
}
#[cfg(test)]
mod test {
use crate::scheduler::*;
use crate::test_support::*;
use serde_yaml;
#[test]
fn | () {
let job: JobSpec =
serde_yaml::from_str(TEST_JOB_SPEC).expect("Failed to parse sample job spec");
with_bootstrap_node(|| {
Scheduler::from_registry()
.send(SchedulerCommand::CreateJob(String::from("test-job"), job))
.and_then(move |res| {
assert!(res.is_ok());
Scheduler::from_registry().send(ListJobs)
})
.map(|res| {
assert_eq!(res.expect("List jobs failed").len(), 1);
})
});
}
}
| test_create_job | identifier_name |
scheduler.rs | //! Scheduler is responsible for allocating containers on cluster nodes according to the currently
//! submitted jobs and concurrency levels. It is only used on the master node.
use crate::executor::*;
use actix::fut::wrap_future;
use actix::prelude::*;
use actix::registry::SystemService;
use actix::spawn;
use actix_web::{client, HttpMessage};
use failure::{err_msg, Error};
use futures::future::{join_all, Future};
use rand::distributions::WeightedIndex;
use rand::prelude::*;
use serde_json;
use shiplift::builder::{ContainerOptions, ContainerOptionsBuilder};
use std::collections::HashMap;
use std::fs;
use std::net::{SocketAddr, ToSocketAddrs};
use std::path::PathBuf;
use std::time::Duration;
use uuid::Uuid;
pub type AllocationId = String;
pub type NodeId = String;
pub type JobId = String;
pub type ServiceName = String;
const RESOURCE_REFRESH_INTERVAL: Duration = Duration::from_secs(5);
/// A job specification (in docker-compose format)
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct JobSpec {
pub services: HashMap<String, ServiceSpec>,
}
/// A service element within the job spec
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct ServiceSpec {
pub image: String,
pub command: Option<String>,
pub entrypoint: Option<String>,
#[serde(default)]
pub ports: Vec<String>,
#[serde(default)]
pub volumes: Vec<String>,
#[serde(default)]
pub environment: Vec<String>,
}
impl ServiceSpec {
/// Create ContainerOptions based on this service spec
pub fn build_container_options(&self) -> Result<ContainerOptionsBuilder, Error> {
let mut opt = ContainerOptions::builder(&*self.image);
opt.volumes(self.volumes.iter().map(|i| &**i).collect())
.env(self.environment.iter().map(|i| &**i).collect());
if let Some(cmd) = &self.command {
opt.cmd(vec![&*cmd]);
}
if let Some(entrypoint) = &self.entrypoint {
opt.entrypoint(entrypoint);
}
for port in &self.ports {
let mut port = port.split(':');
let host_port = port.next().unwrap().parse()?;
let container_port = port.next().unwrap().parse()?;
opt.expose(container_port, "tcp", host_port);
}
Ok(opt)
}
}
/// Describes the state of the cluster including all jobs and nodes.
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)]
pub struct ClusterState {
pub jobs: HashMap<JobId, JobSpec>,
pub services: HashMap<JobId, HashMap<ServiceName, ServiceConfig>>,
pub nodes: HashMap<NodeId, Node>,
pub allocations: HashMap<AllocationId, Allocation>,
pub master_node: Option<NodeId>,
}
impl ClusterState {
/// Get a reference to the current master node
pub fn master_node(&self) -> Option<&Node> {
match &self.master_node {
Some(master_id) => Some(&self.nodes[master_id]),
None => None,
}
}
}
/// Element of cluster state assignming a job's task to a node.
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Allocation {
pub allocation_id: AllocationId,
pub node_id: NodeId,
pub job_id: JobId,
pub service_name: ServiceName,
}
/// Runtime configuration of job services (including concurrency level)
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct ServiceConfig {
pub scale: usize,
}
/// Element of cluster state used to describe a member of the cluster.
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Node {
pub node_id: NodeId,
pub cluster_address: SocketAddr,
}
impl Node {
pub fn new<S, T>(node_id: T, cluster_address: S) -> Node
where
S: ToSocketAddrs,
T: Into<NodeId>,
{
Node {
cluster_address: cluster_address.to_socket_addrs().unwrap().next().unwrap(),
node_id: node_id.into(),
}
}
}
/// Updates the cluster state to match requested jobs / concurrency levels.
#[derive(Default)]
pub struct Scheduler {
state: ClusterState,
node_resources: HashMap<NodeId, NodeResources>,
state_path: Option<PathBuf>,
}
impl Scheduler {
/// Set this node as master in an empty cluster.
fn bootstrap(&mut self, node: Node) -> Result<(), Error> {
if self.state.master_node.is_some() || !self.state.nodes.is_empty() {
return Err(err_msg("Cannot bootstrap a cluster with existing nodes."));
}
info!("Bootstrapping cluster as node {}", node.node_id);
self.state.master_node = Some(node.node_id.clone());
self.state.nodes.insert(node.node_id.clone(), node);
Executor::from_registry().do_send(ExecutorCommand::UpdateState(self.state.clone()));
Ok(())
}
/// Add / remove allocations based on current jobs and concurrency requirements.
fn update_schedule(&mut self) {
// Build a map of service to existing allocations
let mut service_allocs: HashMap<_, Vec<AllocationId>> = HashMap::new();
for alloc in self.state.allocations.values() {
service_allocs
.entry((&alloc.job_id, &alloc.service_name))
.or_default()
.push(alloc.allocation_id.clone());
}
// Put the changes we need to make here, since we can't modify self.state.allocations while
// borrowed
let mut to_remove = Vec::new();
let mut to_add = Vec::new();
// Used for weighted random node selection
let nodes: Vec<_> = self.state.nodes.keys().collect();
let node_index = WeightedIndex::new(nodes.iter().map(|id| {
self.node_resources
.get(*id)
.map(|resources| resources.total_memory - resources.used_memory)
.unwrap_or(1)
}))
.unwrap();
// Compare the existing allocations with the desired concurrency of each service
for (job_id, job_services) in &self.state.services {
for (service_name, service) in job_services {
let existing = service_allocs
.remove(&(&job_id, &service_name))
.unwrap_or_default();
let diff = service.scale as isize - existing.len() as isize;
debug!("Scheduling {}.{} -> {}", job_id, service_name, diff);
if diff > 0 {
// Create new allocations
for node in node_index
.sample_iter(&mut thread_rng())
.take(diff as usize)
{
to_add.push(Allocation {
allocation_id: Uuid::new_v4().to_hyphenated().to_string(),
node_id: nodes[node].clone(),
job_id: job_id.clone(),
service_name: service_name.clone(),
});
}
} else { | }
// Remove any allocations that don't correspond to any service
for allocs in service_allocs.values() {
to_remove.extend(allocs.iter().cloned());
}
// Now we drop the index service_allocs and we can mutate the state
for alloc_id in to_remove {
self.state.allocations.remove(&alloc_id);
}
for alloc in to_add.drain(..) {
self.state
.allocations
.insert(alloc.allocation_id.clone(), alloc);
}
self.save_state();
spawn(
self.update_nodes()
.then(|res| check_err("Update nodes", res)),
);
}
/// Send the latest state to each node.
fn update_nodes(&self) -> impl Future<Item = (), Error = Error> {
let update_fut: Vec<_> = self
.state
.nodes
.values()
.map(|node| {
client::post(format!("http://{}/node/state", node.cluster_address))
.json(&self.state)
.unwrap()
.send()
})
.collect();
join_all(update_fut)
.from_err()
.map(|results| info!("Sent updated state to {} node(s)", results.len()))
}
fn load_state(&mut self) -> Result<(), Error> {
if let Some(path) = &self.state_path {
info!("Loading state from: {:?}", path);
let raw_state = fs::File::open(path)?;
self.state = serde_json::from_reader(raw_state)?;
self.update_schedule();
}
Ok(())
}
fn save_state(&mut self) {
if let Some(path) = &self.state_path {
info!("Saving state to: {:?}", path);
match serde_json::to_string(&self.state) {
Ok(serialized) => match fs::write(path, serialized) {
Ok(_) => {}
Err(e) => error!("Failed to write state: {:?}", e),
},
Err(e) => error!("Failed to serialize state: {:?}", e),
}
}
}
}
impl Actor for Scheduler {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
// Poll node resource usage (so we don't need to request it each time we reschedule)
ctx.run_interval(RESOURCE_REFRESH_INTERVAL, |_, ctx| {
let update_fut = wrap_future::<_, Self>(ctx.address().send(GetClusterResources))
.map_err(|e, _, _| error!("Failed request resource refresh: {:?}", e))
.map(|res, scheduler, _| match res {
Ok(res) => scheduler.node_resources = res,
Err(e) => error!("Failed to refresh node resources: {:?}", e),
});
ctx.spawn(update_fut);
});
}
}
impl Supervised for Scheduler {}
impl SystemService for Scheduler {}
/// Fire-and-forget type commands for the scheduler
#[derive(Clone, Debug)]
pub enum SchedulerCommand {
CreateJob(JobId, JobSpec),
DeleteJob(JobId),
UpdateService(JobId, ServiceName, ServiceConfig),
BootstrapNode(Node),
RegisterNode(Node),
SetStatePath(PathBuf),
}
impl Message for SchedulerCommand {
type Result = Result<(), Error>;
}
impl Handler<SchedulerCommand> for Scheduler {
type Result = Result<(), Error>;
fn handle(&mut self, cmd: SchedulerCommand, _: &mut Context<Self>) -> Self::Result {
debug!("Scheduler handling command: {:?}", cmd);
match cmd {
SchedulerCommand::CreateJob(job_id, job) => {
job.services.keys().for_each(|service_name| {
self.state
.services
.entry(job_id.clone())
.or_default()
.insert(service_name.clone(), ServiceConfig { scale: 1 });
});
self.state.jobs.insert(job_id, job);
self.update_schedule();
Ok(())
}
SchedulerCommand::UpdateService(job_id, service_name, service_config) => {
let result = self
.state
.services
.get_mut(&job_id)
.and_then(|services| services.get_mut(&service_name))
.map(|service| {
*service = service_config;
{}
})
.ok_or_else(|| err_msg("Error does not exist"));
self.update_schedule();
result
}
SchedulerCommand::DeleteJob(job_id) => {
self.state.jobs.remove(&job_id);
self.state.services.remove(&job_id);
self.update_schedule();
Ok(())
}
SchedulerCommand::BootstrapNode(node) => self.bootstrap(node),
SchedulerCommand::RegisterNode(node) => {
self.state.nodes.insert(node.node_id.clone(), node);
spawn(
self.update_nodes()
.map_err(|e| error!("Failed to update new node: {}", e)),
);
Ok(())
}
SchedulerCommand::SetStatePath(path) => {
self.state_path = Some(path);
self.load_state()
}
}
}
}
/// Message type for requesting resource usage of all nodes
pub struct GetClusterResources;
impl Message for GetClusterResources {
type Result = Result<HashMap<String, NodeResources>, Error>;
}
impl Handler<GetClusterResources> for Scheduler {
type Result = ResponseFuture<HashMap<String, NodeResources>, Error>;
fn handle(&mut self, _: GetClusterResources, _: &mut Context<Self>) -> Self::Result {
let node_queries: Vec<_> = self
.state
.nodes
.values()
.map(|node| {
let node_id = node.node_id.clone();
client::get(format!("http://{}/node/resources", node.cluster_address))
.finish()
.unwrap()
.send()
.map_err(Error::from)
.and_then(|res| res.json().from_err())
.then(move |res| {
Ok::<_, Error>(match res {
Ok(ok) => Some((node_id, ok)),
Err(_) => None,
})
})
})
.collect();
Box::new(
join_all(node_queries).map(|mut res| res.drain(..).filter_map(|res| res).collect()),
)
}
}
/// Message type for requesting the current list of jobs
pub struct ListJobs;
impl Message for ListJobs {
type Result = Result<HashMap<String, JobSpec>, Error>;
}
impl Handler<ListJobs> for Scheduler {
type Result = Result<HashMap<String, JobSpec>, Error>;
fn handle(&mut self, _: ListJobs, _: &mut Context<Self>) -> Self::Result {
Ok(self.state.jobs.clone())
}
}
/// Messsage requesting a list of allocations
pub struct ListAllocations;
impl Message for ListAllocations {
type Result = Result<Vec<Allocation>, Error>;
}
impl Handler<ListAllocations> for Scheduler {
type Result = Result<Vec<Allocation>, Error>;
fn handle(&mut self, _: ListAllocations, _: &mut Context<Self>) -> Self::Result {
Ok(self.state.allocations.values().cloned().collect())
}
}
#[cfg(test)]
mod test {
use crate::scheduler::*;
use crate::test_support::*;
use serde_yaml;
#[test]
fn test_create_job() {
let job: JobSpec =
serde_yaml::from_str(TEST_JOB_SPEC).expect("Failed to parse sample job spec");
with_bootstrap_node(|| {
Scheduler::from_registry()
.send(SchedulerCommand::CreateJob(String::from("test-job"), job))
.and_then(move |res| {
assert!(res.is_ok());
Scheduler::from_registry().send(ListJobs)
})
.map(|res| {
assert_eq!(res.expect("List jobs failed").len(), 1);
})
});
}
} | to_remove.extend(existing.iter().take(diff.abs() as usize).cloned());
}
} | random_line_split |
scheduler.rs | //! Scheduler is responsible for allocating containers on cluster nodes according to the currently
//! submitted jobs and concurrency levels. It is only used on the master node.
use crate::executor::*;
use actix::fut::wrap_future;
use actix::prelude::*;
use actix::registry::SystemService;
use actix::spawn;
use actix_web::{client, HttpMessage};
use failure::{err_msg, Error};
use futures::future::{join_all, Future};
use rand::distributions::WeightedIndex;
use rand::prelude::*;
use serde_json;
use shiplift::builder::{ContainerOptions, ContainerOptionsBuilder};
use std::collections::HashMap;
use std::fs;
use std::net::{SocketAddr, ToSocketAddrs};
use std::path::PathBuf;
use std::time::Duration;
use uuid::Uuid;
pub type AllocationId = String;
pub type NodeId = String;
pub type JobId = String;
pub type ServiceName = String;
const RESOURCE_REFRESH_INTERVAL: Duration = Duration::from_secs(5);
/// A job specification (in docker-compose format)
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct JobSpec {
pub services: HashMap<String, ServiceSpec>,
}
/// A service element within the job spec
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct ServiceSpec {
pub image: String,
pub command: Option<String>,
pub entrypoint: Option<String>,
#[serde(default)]
pub ports: Vec<String>,
#[serde(default)]
pub volumes: Vec<String>,
#[serde(default)]
pub environment: Vec<String>,
}
impl ServiceSpec {
/// Create ContainerOptions based on this service spec
pub fn build_container_options(&self) -> Result<ContainerOptionsBuilder, Error> {
let mut opt = ContainerOptions::builder(&*self.image);
opt.volumes(self.volumes.iter().map(|i| &**i).collect())
.env(self.environment.iter().map(|i| &**i).collect());
if let Some(cmd) = &self.command {
opt.cmd(vec![&*cmd]);
}
if let Some(entrypoint) = &self.entrypoint {
opt.entrypoint(entrypoint);
}
for port in &self.ports {
let mut port = port.split(':');
let host_port = port.next().unwrap().parse()?;
let container_port = port.next().unwrap().parse()?;
opt.expose(container_port, "tcp", host_port);
}
Ok(opt)
}
}
/// Describes the state of the cluster including all jobs and nodes.
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)]
pub struct ClusterState {
pub jobs: HashMap<JobId, JobSpec>,
pub services: HashMap<JobId, HashMap<ServiceName, ServiceConfig>>,
pub nodes: HashMap<NodeId, Node>,
pub allocations: HashMap<AllocationId, Allocation>,
pub master_node: Option<NodeId>,
}
impl ClusterState {
/// Get a reference to the current master node
pub fn master_node(&self) -> Option<&Node> {
match &self.master_node {
Some(master_id) => Some(&self.nodes[master_id]),
None => None,
}
}
}
/// Element of cluster state assignming a job's task to a node.
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Allocation {
pub allocation_id: AllocationId,
pub node_id: NodeId,
pub job_id: JobId,
pub service_name: ServiceName,
}
/// Runtime configuration of job services (including concurrency level)
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct ServiceConfig {
pub scale: usize,
}
/// Element of cluster state used to describe a member of the cluster.
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct Node {
pub node_id: NodeId,
pub cluster_address: SocketAddr,
}
impl Node {
pub fn new<S, T>(node_id: T, cluster_address: S) -> Node
where
S: ToSocketAddrs,
T: Into<NodeId>,
{
Node {
cluster_address: cluster_address.to_socket_addrs().unwrap().next().unwrap(),
node_id: node_id.into(),
}
}
}
/// Updates the cluster state to match requested jobs / concurrency levels.
#[derive(Default)]
pub struct Scheduler {
state: ClusterState,
node_resources: HashMap<NodeId, NodeResources>,
state_path: Option<PathBuf>,
}
impl Scheduler {
/// Set this node as master in an empty cluster.
fn bootstrap(&mut self, node: Node) -> Result<(), Error> {
if self.state.master_node.is_some() || !self.state.nodes.is_empty() {
return Err(err_msg("Cannot bootstrap a cluster with existing nodes."));
}
info!("Bootstrapping cluster as node {}", node.node_id);
self.state.master_node = Some(node.node_id.clone());
self.state.nodes.insert(node.node_id.clone(), node);
Executor::from_registry().do_send(ExecutorCommand::UpdateState(self.state.clone()));
Ok(())
}
/// Add / remove allocations based on current jobs and concurrency requirements.
fn update_schedule(&mut self) {
// Build a map of service to existing allocations
let mut service_allocs: HashMap<_, Vec<AllocationId>> = HashMap::new();
for alloc in self.state.allocations.values() {
service_allocs
.entry((&alloc.job_id, &alloc.service_name))
.or_default()
.push(alloc.allocation_id.clone());
}
// Put the changes we need to make here, since we can't modify self.state.allocations while
// borrowed
let mut to_remove = Vec::new();
let mut to_add = Vec::new();
// Used for weighted random node selection
let nodes: Vec<_> = self.state.nodes.keys().collect();
let node_index = WeightedIndex::new(nodes.iter().map(|id| {
self.node_resources
.get(*id)
.map(|resources| resources.total_memory - resources.used_memory)
.unwrap_or(1)
}))
.unwrap();
// Compare the existing allocations with the desired concurrency of each service
for (job_id, job_services) in &self.state.services {
for (service_name, service) in job_services {
let existing = service_allocs
.remove(&(&job_id, &service_name))
.unwrap_or_default();
let diff = service.scale as isize - existing.len() as isize;
debug!("Scheduling {}.{} -> {}", job_id, service_name, diff);
if diff > 0 {
// Create new allocations
for node in node_index
.sample_iter(&mut thread_rng())
.take(diff as usize)
{
to_add.push(Allocation {
allocation_id: Uuid::new_v4().to_hyphenated().to_string(),
node_id: nodes[node].clone(),
job_id: job_id.clone(),
service_name: service_name.clone(),
});
}
} else {
to_remove.extend(existing.iter().take(diff.abs() as usize).cloned());
}
}
}
// Remove any allocations that don't correspond to any service
for allocs in service_allocs.values() {
to_remove.extend(allocs.iter().cloned());
}
// Now we drop the index service_allocs and we can mutate the state
for alloc_id in to_remove {
self.state.allocations.remove(&alloc_id);
}
for alloc in to_add.drain(..) {
self.state
.allocations
.insert(alloc.allocation_id.clone(), alloc);
}
self.save_state();
spawn(
self.update_nodes()
.then(|res| check_err("Update nodes", res)),
);
}
/// Send the latest state to each node.
fn update_nodes(&self) -> impl Future<Item = (), Error = Error> {
let update_fut: Vec<_> = self
.state
.nodes
.values()
.map(|node| {
client::post(format!("http://{}/node/state", node.cluster_address))
.json(&self.state)
.unwrap()
.send()
})
.collect();
join_all(update_fut)
.from_err()
.map(|results| info!("Sent updated state to {} node(s)", results.len()))
}
fn load_state(&mut self) -> Result<(), Error> {
if let Some(path) = &self.state_path {
info!("Loading state from: {:?}", path);
let raw_state = fs::File::open(path)?;
self.state = serde_json::from_reader(raw_state)?;
self.update_schedule();
}
Ok(())
}
fn save_state(&mut self) {
if let Some(path) = &self.state_path {
info!("Saving state to: {:?}", path);
match serde_json::to_string(&self.state) {
Ok(serialized) => match fs::write(path, serialized) {
Ok(_) => {}
Err(e) => error!("Failed to write state: {:?}", e),
},
Err(e) => error!("Failed to serialize state: {:?}", e),
}
}
}
}
impl Actor for Scheduler {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
// Poll node resource usage (so we don't need to request it each time we reschedule)
ctx.run_interval(RESOURCE_REFRESH_INTERVAL, |_, ctx| {
let update_fut = wrap_future::<_, Self>(ctx.address().send(GetClusterResources))
.map_err(|e, _, _| error!("Failed request resource refresh: {:?}", e))
.map(|res, scheduler, _| match res {
Ok(res) => scheduler.node_resources = res,
Err(e) => error!("Failed to refresh node resources: {:?}", e),
});
ctx.spawn(update_fut);
});
}
}
impl Supervised for Scheduler {}
impl SystemService for Scheduler {}
/// Fire-and-forget type commands for the scheduler
#[derive(Clone, Debug)]
pub enum SchedulerCommand {
CreateJob(JobId, JobSpec),
DeleteJob(JobId),
UpdateService(JobId, ServiceName, ServiceConfig),
BootstrapNode(Node),
RegisterNode(Node),
SetStatePath(PathBuf),
}
impl Message for SchedulerCommand {
type Result = Result<(), Error>;
}
impl Handler<SchedulerCommand> for Scheduler {
type Result = Result<(), Error>;
fn handle(&mut self, cmd: SchedulerCommand, _: &mut Context<Self>) -> Self::Result {
debug!("Scheduler handling command: {:?}", cmd);
match cmd {
SchedulerCommand::CreateJob(job_id, job) => {
job.services.keys().for_each(|service_name| {
self.state
.services
.entry(job_id.clone())
.or_default()
.insert(service_name.clone(), ServiceConfig { scale: 1 });
});
self.state.jobs.insert(job_id, job);
self.update_schedule();
Ok(())
}
SchedulerCommand::UpdateService(job_id, service_name, service_config) => {
let result = self
.state
.services
.get_mut(&job_id)
.and_then(|services| services.get_mut(&service_name))
.map(|service| {
*service = service_config;
{}
})
.ok_or_else(|| err_msg("Error does not exist"));
self.update_schedule();
result
}
SchedulerCommand::DeleteJob(job_id) => {
self.state.jobs.remove(&job_id);
self.state.services.remove(&job_id);
self.update_schedule();
Ok(())
}
SchedulerCommand::BootstrapNode(node) => self.bootstrap(node),
SchedulerCommand::RegisterNode(node) => {
self.state.nodes.insert(node.node_id.clone(), node);
spawn(
self.update_nodes()
.map_err(|e| error!("Failed to update new node: {}", e)),
);
Ok(())
}
SchedulerCommand::SetStatePath(path) => {
self.state_path = Some(path);
self.load_state()
}
}
}
}
/// Message type for requesting resource usage of all nodes
pub struct GetClusterResources;
impl Message for GetClusterResources {
type Result = Result<HashMap<String, NodeResources>, Error>;
}
impl Handler<GetClusterResources> for Scheduler {
type Result = ResponseFuture<HashMap<String, NodeResources>, Error>;
fn handle(&mut self, _: GetClusterResources, _: &mut Context<Self>) -> Self::Result {
let node_queries: Vec<_> = self
.state
.nodes
.values()
.map(|node| {
let node_id = node.node_id.clone();
client::get(format!("http://{}/node/resources", node.cluster_address))
.finish()
.unwrap()
.send()
.map_err(Error::from)
.and_then(|res| res.json().from_err())
.then(move |res| {
Ok::<_, Error>(match res {
Ok(ok) => Some((node_id, ok)),
Err(_) => None,
})
})
})
.collect();
Box::new(
join_all(node_queries).map(|mut res| res.drain(..).filter_map(|res| res).collect()),
)
}
}
/// Message type for requesting the current list of jobs
pub struct ListJobs;
impl Message for ListJobs {
type Result = Result<HashMap<String, JobSpec>, Error>;
}
impl Handler<ListJobs> for Scheduler {
type Result = Result<HashMap<String, JobSpec>, Error>;
fn handle(&mut self, _: ListJobs, _: &mut Context<Self>) -> Self::Result {
Ok(self.state.jobs.clone())
}
}
/// Messsage requesting a list of allocations
pub struct ListAllocations;
impl Message for ListAllocations {
type Result = Result<Vec<Allocation>, Error>;
}
impl Handler<ListAllocations> for Scheduler {
type Result = Result<Vec<Allocation>, Error>;
fn handle(&mut self, _: ListAllocations, _: &mut Context<Self>) -> Self::Result {
Ok(self.state.allocations.values().cloned().collect())
}
}
#[cfg(test)]
mod test {
use crate::scheduler::*;
use crate::test_support::*;
use serde_yaml;
#[test]
fn test_create_job() |
}
| {
let job: JobSpec =
serde_yaml::from_str(TEST_JOB_SPEC).expect("Failed to parse sample job spec");
with_bootstrap_node(|| {
Scheduler::from_registry()
.send(SchedulerCommand::CreateJob(String::from("test-job"), job))
.and_then(move |res| {
assert!(res.is_ok());
Scheduler::from_registry().send(ListJobs)
})
.map(|res| {
assert_eq!(res.expect("List jobs failed").len(), 1);
})
});
} | identifier_body |
lib.rs | //! Write your own tests and benchmarks that look and behave like built-in tests!
//!
//! This is a simple and small test harness that mimics the original `libtest`
//! (used by `cargo test`/`rustc --test`). That means: all output looks pretty
//! much like `cargo test` and most CLI arguments are understood and used. With
//! that plumbing work out of the way, your test runner can focus on the actual
//! testing.
//!
//! For a small real world example, see [`examples/tidy.rs`][1].
//!
//! [1]: https://github.com/LukasKalbertodt/libtest-mimic/blob/master/examples/tidy.rs
//!
//! # Usage
//!
//! To use this, you most likely want to add a manual `[[test]]` section to
//! `Cargo.toml` and set `harness = false`. For example:
//!
//! ```toml
//! [[test]]
//! name = "mytest"
//! path = "tests/mytest.rs"
//! harness = false
//! ```
//!
//! And in `tests/mytest.rs` you would call [`run`] in the `main` function:
//!
//! ```no_run
//! use libtest_mimic::{Arguments, Trial};
//!
//!
//! // Parse command line arguments
//! let args = Arguments::from_args();
//!
//! // Create a list of tests and/or benchmarks (in this case: two dummy tests).
//! let tests = vec![
//! Trial::test("succeeding_test", move || Ok(())),
//! Trial::test("failing_test", move || Err("Woops".into())),
//! ];
//!
//! // Run all tests and exit the application appropriatly.
//! libtest_mimic::run(&args, tests).exit();
//! ```
//!
//! Instead of returning `Ok` or `Err` directly, you want to actually perform
//! your tests, of course. See [`Trial::test`] for more information on how to
//! define a test. You can of course list all your tests manually. But in many
//! cases it is useful to generate one test per file in a directory, for
//! example.
//!
//! You can then run `cargo test --test mytest` to run it. To see the CLI
//! arguments supported by this crate, run `cargo test --test mytest -- -h`.
//!
//!
//! # Known limitations and differences to the official test harness
//!
//! `libtest-mimic` works on a best-effort basis: it tries to be as close to
//! `libtest` as possible, but there are differences for a variety of reasons.
//! For example, some rarely used features might not be implemented, some
//! features are extremely difficult to implement, and removing minor,
//! unimportant differences is just not worth the hassle.
//!
//! Some of the notable differences:
//!
//! - Output capture and `--nocapture`: simply not supported. The official
//! `libtest` uses internal `std` functions to temporarily redirect output.
//! `libtest-mimic` cannot use those. See [this issue][capture] for more
//! information.
//! - `--format=json|junit`
//!
//! [capture]: https://github.com/LukasKalbertodt/libtest-mimic/issues/9
#![forbid(unsafe_code)]
use std::{process, sync::mpsc, fmt, time::Instant};
mod args;
mod printer;
use printer::Printer;
use threadpool::ThreadPool;
pub use crate::args::{Arguments, ColorSetting, FormatSetting};
/// A single test or benchmark.
///
/// The original `libtest` often calls benchmarks "tests", which is a bit
/// confusing. So in this library, it is called "trial".
///
/// A trial is created via [`Trial::test`] or [`Trial::bench`]. The trial's
/// `name` is printed and used for filtering. The `runner` is called when the
/// test/benchmark is executed to determine its outcome. If `runner` panics,
/// the trial is considered "failed". If you need the behavior of
/// `#[should_panic]` you need to catch the panic yourself. You likely want to
/// compare the panic payload to an expected value anyway.
pub struct Trial {
runner: Box<dyn FnOnce(bool) -> Outcome + Send>,
info: TestInfo,
}
impl Trial {
/// Creates a (non-benchmark) test with the given name and runner.
///
/// The runner returning `Ok(())` is interpreted as the test passing. If the
/// runner returns `Err(_)`, the test is considered failed.
pub fn test<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce() -> Result<(), Failed> + Send + 'static,
{
Self {
runner: Box::new(move |_test_mode| match runner() {
Ok(()) => Outcome::Passed,
Err(failed) => Outcome::Failed(failed),
}),
info: TestInfo {
name: name.into(),
kind: String::new(),
is_ignored: false,
is_bench: false,
},
}
}
/// Creates a benchmark with the given name and runner.
///
/// If the runner's parameter `test_mode` is `true`, the runner function
/// should run all code just once, without measuring, just to make sure it
/// does not panic. If the parameter is `false`, it should perform the
/// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`,
/// but if it's `false`, you have to return a `Measurement`, or else the
/// benchmark is considered a failure.
///
/// `test_mode` is `true` if neither `--bench` nor `--test` are set, and
/// `false` when `--bench` is set. If `--test` is set, benchmarks are not
/// ran at all, and both flags cannot be set at the same time.
pub fn bench<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send + 'static,
{
Self {
runner: Box::new(move |test_mode| match runner(test_mode) {
Err(failed) => Outcome::Failed(failed),
Ok(_) if test_mode => Outcome::Passed,
Ok(Some(measurement)) => Outcome::Measured(measurement),
Ok(None)
=> Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()),
}),
info: TestInfo {
name: name.into(),
kind: String::new(),
is_ignored: false,
is_bench: true,
},
}
}
/// Sets the "kind" of this test/benchmark. If this string is not
/// empty, it is printed in brackets before the test name (e.g.
/// `test [my-kind] test_name`). (Default: *empty*)
///
/// This is the only extension to the original libtest.
pub fn with_kind(self, kind: impl Into<String>) -> Self {
Self {
info: TestInfo {
kind: kind.into(),
..self.info
},
..self
}
}
/// Sets whether or not this test is considered "ignored". (Default: `false`)
///
/// With the built-in test suite, you can annotate `#[ignore]` on tests to
/// not execute them by default (for example because they take a long time
/// or require a special environment). If the `--ignored` flag is set,
/// ignored tests are executed, too.
pub fn with_ignored_flag(self, is_ignored: bool) -> Self {
Self {
info: TestInfo {
is_ignored,
..self.info
},
..self
}
}
/// Returns the name of this trial.
pub fn name(&self) -> &str {
&self.info.name
}
/// Returns the kind of this trial. If you have not set a kind, this is an
/// empty string.
pub fn kind(&self) -> &str {
&self.info.kind
}
/// Returns whether this trial has been marked as *ignored*.
pub fn has_ignored_flag(&self) -> bool {
self.info.is_ignored
}
/// Returns `true` iff this trial is a test (as opposed to a benchmark).
pub fn is_test(&self) -> bool {
!self.info.is_bench
}
/// Returns `true` iff this trial is a benchmark (as opposed to a test).
pub fn is_bench(&self) -> bool {
self.info.is_bench
}
}
impl fmt::Debug for Trial {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct OpaqueRunner;
impl fmt::Debug for OpaqueRunner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<runner>")
}
}
f.debug_struct("Test")
.field("runner", &OpaqueRunner)
.field("name", &self.info.name)
.field("kind", &self.info.kind)
.field("is_ignored", &self.info.is_ignored)
.field("is_bench", &self.info.is_bench)
.finish()
}
}
#[derive(Debug)]
struct TestInfo {
name: String,
kind: String,
is_ignored: bool,
is_bench: bool,
}
/// Output of a benchmark.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Measurement {
/// Average time in ns.
pub avg: u64,
/// Variance in ns.
pub variance: u64,
}
/// Indicates that a test/benchmark has failed. Optionally carries a message.
///
/// You usually want to use the `From` impl of this type, which allows you to
/// convert any `T: fmt::Display` (e.g. `String`, `&str`, ...) into `Failed`.
#[derive(Debug, Clone)]
pub struct Failed {
msg: Option<String>,
}
impl Failed {
/// Creates an instance without message.
pub fn without_message() -> Self {
Self { msg: None }
}
/// Returns the message of this instance.
pub fn message(&self) -> Option<&str> {
self.msg.as_deref()
}
}
impl<M: std::fmt::Display> From<M> for Failed {
fn from(msg: M) -> Self {
Self {
msg: Some(msg.to_string())
}
}
}
/// The outcome of performing a test/benchmark.
#[derive(Debug, Clone)]
enum Outcome {
/// The test passed.
Passed,
/// The test or benchmark failed.
Failed(Failed),
/// The test or benchmark was ignored.
Ignored,
/// The benchmark was successfully run.
Measured(Measurement),
}
/// Contains information about the entire test run. Is returned by[`run`].
///
/// This type is marked as `#[must_use]`. Usually, you just call
/// [`exit()`][Conclusion::exit] on the result of `run` to exit the application
/// with the correct exit code. But you can also store this value and inspect
/// its data.
#[derive(Clone, Debug, PartialEq, Eq)]
#[must_use = "Call `exit()` or `exit_if_failed()` to set the correct return code"]
pub struct Conclusion {
/// Number of tests and benchmarks that were filtered out (either by the
/// filter-in pattern or by `--skip` arguments).
pub num_filtered_out: u64,
/// Number of passed tests.
pub num_passed: u64,
/// Number of failed tests and benchmarks.
pub num_failed: u64,
/// Number of ignored tests and benchmarks.
pub num_ignored: u64,
/// Number of benchmarks that successfully ran.
pub num_measured: u64,
}
impl Conclusion {
/// Exits the application with an appropriate error code (0 if all tests
/// have passed, 101 if there have been failures).
pub fn exit(&self) -> ! {
self.exit_if_failed();
process::exit(0);
}
/// Exits the application with error code 101 if there were any failures.
/// Otherwise, returns normally.
pub fn exit_if_failed(&self) {
if self.has_failed() {
process::exit(101)
}
}
/// Returns whether there have been any failures.
pub fn has_failed(&self) -> bool {
self.num_failed > 0
}
fn empty() -> Self {
Self {
num_filtered_out: 0,
num_passed: 0,
num_failed: 0,
num_ignored: 0,
num_measured: 0,
}
}
}
impl Arguments {
/// Returns `true` if the given test should be ignored.
fn is_ignored(&self, test: &Trial) -> bool {
(test.info.is_ignored && !self.ignored && !self.include_ignored)
|| (test.info.is_bench && self.test)
|| (!test.info.is_bench && self.bench)
}
fn is_filtered_out(&self, test: &Trial) -> bool {
let test_name = &test.info.name;
// If a filter was specified, apply this
if let Some(filter) = &self.filter {
match self.exact {
true if test_name != filter => return true,
false if !test_name.contains(filter) => return true, | for skip_filter in &self.skip {
match self.exact {
true if test_name == skip_filter => return true,
false if test_name.contains(skip_filter) => return true,
_ => {}
}
}
if self.ignored && !test.info.is_ignored {
return true;
}
false
}
}
/// Runs all given trials (tests & benchmarks).
///
/// This is the central function of this crate. It provides the framework for
/// the testing harness. It does all the printing and house keeping.
///
/// The returned value contains a couple of useful information. See
/// [`Conclusion`] for more information. If `--list` was specified, a list is
/// printed and a dummy `Conclusion` is returned.
pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
let start_instant = Instant::now();
let mut conclusion = Conclusion::empty();
// Apply filtering
if args.filter.is_some() || !args.skip.is_empty() || args.ignored {
let len_before = tests.len() as u64;
tests.retain(|test| !args.is_filtered_out(test));
conclusion.num_filtered_out = len_before - tests.len() as u64;
}
let tests = tests;
// Create printer which is used for all output.
let mut printer = printer::Printer::new(args, &tests);
// If `--list` is specified, just print the list and return.
if args.list {
printer.print_list(&tests, args.ignored);
return Conclusion::empty();
}
// Print number of tests
printer.print_title(tests.len() as u64);
let mut failed_tests = Vec::new();
let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| {
printer.print_single_outcome(&outcome);
// Handle outcome
match outcome {
Outcome::Passed => conclusion.num_passed += 1,
Outcome::Failed(failed) => {
failed_tests.push((test, failed.msg));
conclusion.num_failed += 1;
},
Outcome::Ignored => conclusion.num_ignored += 1,
Outcome::Measured(_) => conclusion.num_measured += 1,
}
};
// Execute all tests.
let test_mode = !args.bench;
if args.test_threads == Some(1) {
// Run test sequentially in main thread
for test in tests {
// Print `test foo ...`, run the test, then print the outcome in
// the same line.
printer.print_test(&test.info);
let outcome = if args.is_ignored(&test) {
Outcome::Ignored
} else {
run_single(test.runner, test_mode)
};
handle_outcome(outcome, test.info, &mut printer);
}
} else {
// Run test in thread pool.
let pool = match args.test_threads {
Some(num_threads) => ThreadPool::new(num_threads),
None => ThreadPool::default()
};
let (sender, receiver) = mpsc::channel();
let num_tests = tests.len();
for test in tests {
if args.is_ignored(&test) {
sender.send((Outcome::Ignored, test.info)).unwrap();
} else {
let sender = sender.clone();
pool.execute(move || {
// It's fine to ignore the result of sending. If the
// receiver has hung up, everything will wind down soon
// anyway.
let outcome = run_single(test.runner, test_mode);
let _ = sender.send((outcome, test.info));
});
}
}
for (outcome, test_info) in receiver.iter().take(num_tests) {
// In multithreaded mode, we do only print the start of the line
// after the test ran, as otherwise it would lead to terribly
// interleaved output.
printer.print_test(&test_info);
handle_outcome(outcome, test_info, &mut printer);
}
}
// Print failures if there were any, and the final summary.
if !failed_tests.is_empty() {
printer.print_failures(&failed_tests);
}
printer.print_summary(&conclusion, start_instant.elapsed());
conclusion
}
/// Runs the given runner, catching any panics and treating them as a failed test.
fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome {
use std::panic::{catch_unwind, AssertUnwindSafe};
catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| {
// The `panic` information is just an `Any` object representing the
// value the panic was invoked with. For most panics (which use
// `panic!` like `println!`), this is either `&str` or `String`.
let payload = e.downcast_ref::<String>()
.map(|s| s.as_str())
.or(e.downcast_ref::<&str>().map(|s| *s));
let msg = match payload {
Some(payload) => format!("test panicked: {payload}"),
None => format!("test panicked"),
};
Outcome::Failed(msg.into())
})
} | _ => {}
};
}
// If any skip pattern were specified, test for all patterns. | random_line_split |
lib.rs | //! Write your own tests and benchmarks that look and behave like built-in tests!
//!
//! This is a simple and small test harness that mimics the original `libtest`
//! (used by `cargo test`/`rustc --test`). That means: all output looks pretty
//! much like `cargo test` and most CLI arguments are understood and used. With
//! that plumbing work out of the way, your test runner can focus on the actual
//! testing.
//!
//! For a small real world example, see [`examples/tidy.rs`][1].
//!
//! [1]: https://github.com/LukasKalbertodt/libtest-mimic/blob/master/examples/tidy.rs
//!
//! # Usage
//!
//! To use this, you most likely want to add a manual `[[test]]` section to
//! `Cargo.toml` and set `harness = false`. For example:
//!
//! ```toml
//! [[test]]
//! name = "mytest"
//! path = "tests/mytest.rs"
//! harness = false
//! ```
//!
//! And in `tests/mytest.rs` you would call [`run`] in the `main` function:
//!
//! ```no_run
//! use libtest_mimic::{Arguments, Trial};
//!
//!
//! // Parse command line arguments
//! let args = Arguments::from_args();
//!
//! // Create a list of tests and/or benchmarks (in this case: two dummy tests).
//! let tests = vec![
//! Trial::test("succeeding_test", move || Ok(())),
//! Trial::test("failing_test", move || Err("Woops".into())),
//! ];
//!
//! // Run all tests and exit the application appropriatly.
//! libtest_mimic::run(&args, tests).exit();
//! ```
//!
//! Instead of returning `Ok` or `Err` directly, you want to actually perform
//! your tests, of course. See [`Trial::test`] for more information on how to
//! define a test. You can of course list all your tests manually. But in many
//! cases it is useful to generate one test per file in a directory, for
//! example.
//!
//! You can then run `cargo test --test mytest` to run it. To see the CLI
//! arguments supported by this crate, run `cargo test --test mytest -- -h`.
//!
//!
//! # Known limitations and differences to the official test harness
//!
//! `libtest-mimic` works on a best-effort basis: it tries to be as close to
//! `libtest` as possible, but there are differences for a variety of reasons.
//! For example, some rarely used features might not be implemented, some
//! features are extremely difficult to implement, and removing minor,
//! unimportant differences is just not worth the hassle.
//!
//! Some of the notable differences:
//!
//! - Output capture and `--nocapture`: simply not supported. The official
//! `libtest` uses internal `std` functions to temporarily redirect output.
//! `libtest-mimic` cannot use those. See [this issue][capture] for more
//! information.
//! - `--format=json|junit`
//!
//! [capture]: https://github.com/LukasKalbertodt/libtest-mimic/issues/9
#![forbid(unsafe_code)]
use std::{process, sync::mpsc, fmt, time::Instant};
mod args;
mod printer;
use printer::Printer;
use threadpool::ThreadPool;
pub use crate::args::{Arguments, ColorSetting, FormatSetting};
/// A single test or benchmark.
///
/// The original `libtest` often calls benchmarks "tests", which is a bit
/// confusing. So in this library, it is called "trial".
///
/// A trial is created via [`Trial::test`] or [`Trial::bench`]. The trial's
/// `name` is printed and used for filtering. The `runner` is called when the
/// test/benchmark is executed to determine its outcome. If `runner` panics,
/// the trial is considered "failed". If you need the behavior of
/// `#[should_panic]` you need to catch the panic yourself. You likely want to
/// compare the panic payload to an expected value anyway.
pub struct Trial {
runner: Box<dyn FnOnce(bool) -> Outcome + Send>,
info: TestInfo,
}
impl Trial {
/// Creates a (non-benchmark) test with the given name and runner.
///
/// The runner returning `Ok(())` is interpreted as the test passing. If the
/// runner returns `Err(_)`, the test is considered failed.
pub fn test<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce() -> Result<(), Failed> + Send + 'static,
{
Self {
runner: Box::new(move |_test_mode| match runner() {
Ok(()) => Outcome::Passed,
Err(failed) => Outcome::Failed(failed),
}),
info: TestInfo {
name: name.into(),
kind: String::new(),
is_ignored: false,
is_bench: false,
},
}
}
/// Creates a benchmark with the given name and runner.
///
/// If the runner's parameter `test_mode` is `true`, the runner function
/// should run all code just once, without measuring, just to make sure it
/// does not panic. If the parameter is `false`, it should perform the
/// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`,
/// but if it's `false`, you have to return a `Measurement`, or else the
/// benchmark is considered a failure.
///
/// `test_mode` is `true` if neither `--bench` nor `--test` are set, and
/// `false` when `--bench` is set. If `--test` is set, benchmarks are not
/// ran at all, and both flags cannot be set at the same time.
pub fn bench<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send + 'static,
{
Self {
runner: Box::new(move |test_mode| match runner(test_mode) {
Err(failed) => Outcome::Failed(failed),
Ok(_) if test_mode => Outcome::Passed,
Ok(Some(measurement)) => Outcome::Measured(measurement),
Ok(None)
=> Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()),
}),
info: TestInfo {
name: name.into(),
kind: String::new(),
is_ignored: false,
is_bench: true,
},
}
}
/// Sets the "kind" of this test/benchmark. If this string is not
/// empty, it is printed in brackets before the test name (e.g.
/// `test [my-kind] test_name`). (Default: *empty*)
///
/// This is the only extension to the original libtest.
pub fn with_kind(self, kind: impl Into<String>) -> Self {
Self {
info: TestInfo {
kind: kind.into(),
..self.info
},
..self
}
}
/// Sets whether or not this test is considered "ignored". (Default: `false`)
///
/// With the built-in test suite, you can annotate `#[ignore]` on tests to
/// not execute them by default (for example because they take a long time
/// or require a special environment). If the `--ignored` flag is set,
/// ignored tests are executed, too.
pub fn with_ignored_flag(self, is_ignored: bool) -> Self {
Self {
info: TestInfo {
is_ignored,
..self.info
},
..self
}
}
/// Returns the name of this trial.
pub fn name(&self) -> &str {
&self.info.name
}
/// Returns the kind of this trial. If you have not set a kind, this is an
/// empty string.
pub fn kind(&self) -> &str {
&self.info.kind
}
/// Returns whether this trial has been marked as *ignored*.
pub fn has_ignored_flag(&self) -> bool {
self.info.is_ignored
}
/// Returns `true` iff this trial is a test (as opposed to a benchmark).
pub fn is_test(&self) -> bool {
!self.info.is_bench
}
/// Returns `true` iff this trial is a benchmark (as opposed to a test).
pub fn is_bench(&self) -> bool {
self.info.is_bench
}
}
impl fmt::Debug for Trial {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct OpaqueRunner;
impl fmt::Debug for OpaqueRunner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<runner>")
}
}
f.debug_struct("Test")
.field("runner", &OpaqueRunner)
.field("name", &self.info.name)
.field("kind", &self.info.kind)
.field("is_ignored", &self.info.is_ignored)
.field("is_bench", &self.info.is_bench)
.finish()
}
}
#[derive(Debug)]
struct TestInfo {
name: String,
kind: String,
is_ignored: bool,
is_bench: bool,
}
/// Output of a benchmark.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Measurement {
/// Average time in ns.
pub avg: u64,
/// Variance in ns.
pub variance: u64,
}
/// Indicates that a test/benchmark has failed. Optionally carries a message.
///
/// You usually want to use the `From` impl of this type, which allows you to
/// convert any `T: fmt::Display` (e.g. `String`, `&str`, ...) into `Failed`.
#[derive(Debug, Clone)]
pub struct Failed {
msg: Option<String>,
}
impl Failed {
/// Creates an instance without message.
pub fn without_message() -> Self {
Self { msg: None }
}
/// Returns the message of this instance.
pub fn message(&self) -> Option<&str> {
self.msg.as_deref()
}
}
impl<M: std::fmt::Display> From<M> for Failed {
fn from(msg: M) -> Self {
Self {
msg: Some(msg.to_string())
}
}
}
/// The outcome of performing a test/benchmark.
#[derive(Debug, Clone)]
enum | {
/// The test passed.
Passed,
/// The test or benchmark failed.
Failed(Failed),
/// The test or benchmark was ignored.
Ignored,
/// The benchmark was successfully run.
Measured(Measurement),
}
/// Contains information about the entire test run. Is returned by[`run`].
///
/// This type is marked as `#[must_use]`. Usually, you just call
/// [`exit()`][Conclusion::exit] on the result of `run` to exit the application
/// with the correct exit code. But you can also store this value and inspect
/// its data.
#[derive(Clone, Debug, PartialEq, Eq)]
#[must_use = "Call `exit()` or `exit_if_failed()` to set the correct return code"]
pub struct Conclusion {
/// Number of tests and benchmarks that were filtered out (either by the
/// filter-in pattern or by `--skip` arguments).
pub num_filtered_out: u64,
/// Number of passed tests.
pub num_passed: u64,
/// Number of failed tests and benchmarks.
pub num_failed: u64,
/// Number of ignored tests and benchmarks.
pub num_ignored: u64,
/// Number of benchmarks that successfully ran.
pub num_measured: u64,
}
impl Conclusion {
/// Exits the application with an appropriate error code (0 if all tests
/// have passed, 101 if there have been failures).
pub fn exit(&self) -> ! {
self.exit_if_failed();
process::exit(0);
}
/// Exits the application with error code 101 if there were any failures.
/// Otherwise, returns normally.
pub fn exit_if_failed(&self) {
if self.has_failed() {
process::exit(101)
}
}
/// Returns whether there have been any failures.
pub fn has_failed(&self) -> bool {
self.num_failed > 0
}
fn empty() -> Self {
Self {
num_filtered_out: 0,
num_passed: 0,
num_failed: 0,
num_ignored: 0,
num_measured: 0,
}
}
}
impl Arguments {
/// Returns `true` if the given test should be ignored.
fn is_ignored(&self, test: &Trial) -> bool {
(test.info.is_ignored && !self.ignored && !self.include_ignored)
|| (test.info.is_bench && self.test)
|| (!test.info.is_bench && self.bench)
}
fn is_filtered_out(&self, test: &Trial) -> bool {
let test_name = &test.info.name;
// If a filter was specified, apply this
if let Some(filter) = &self.filter {
match self.exact {
true if test_name != filter => return true,
false if !test_name.contains(filter) => return true,
_ => {}
};
}
// If any skip pattern were specified, test for all patterns.
for skip_filter in &self.skip {
match self.exact {
true if test_name == skip_filter => return true,
false if test_name.contains(skip_filter) => return true,
_ => {}
}
}
if self.ignored && !test.info.is_ignored {
return true;
}
false
}
}
/// Runs all given trials (tests & benchmarks).
///
/// This is the central function of this crate. It provides the framework for
/// the testing harness. It does all the printing and house keeping.
///
/// The returned value contains a couple of useful information. See
/// [`Conclusion`] for more information. If `--list` was specified, a list is
/// printed and a dummy `Conclusion` is returned.
pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
let start_instant = Instant::now();
let mut conclusion = Conclusion::empty();
// Apply filtering
if args.filter.is_some() || !args.skip.is_empty() || args.ignored {
let len_before = tests.len() as u64;
tests.retain(|test| !args.is_filtered_out(test));
conclusion.num_filtered_out = len_before - tests.len() as u64;
}
let tests = tests;
// Create printer which is used for all output.
let mut printer = printer::Printer::new(args, &tests);
// If `--list` is specified, just print the list and return.
if args.list {
printer.print_list(&tests, args.ignored);
return Conclusion::empty();
}
// Print number of tests
printer.print_title(tests.len() as u64);
let mut failed_tests = Vec::new();
let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| {
printer.print_single_outcome(&outcome);
// Handle outcome
match outcome {
Outcome::Passed => conclusion.num_passed += 1,
Outcome::Failed(failed) => {
failed_tests.push((test, failed.msg));
conclusion.num_failed += 1;
},
Outcome::Ignored => conclusion.num_ignored += 1,
Outcome::Measured(_) => conclusion.num_measured += 1,
}
};
// Execute all tests.
let test_mode = !args.bench;
if args.test_threads == Some(1) {
// Run test sequentially in main thread
for test in tests {
// Print `test foo ...`, run the test, then print the outcome in
// the same line.
printer.print_test(&test.info);
let outcome = if args.is_ignored(&test) {
Outcome::Ignored
} else {
run_single(test.runner, test_mode)
};
handle_outcome(outcome, test.info, &mut printer);
}
} else {
// Run test in thread pool.
let pool = match args.test_threads {
Some(num_threads) => ThreadPool::new(num_threads),
None => ThreadPool::default()
};
let (sender, receiver) = mpsc::channel();
let num_tests = tests.len();
for test in tests {
if args.is_ignored(&test) {
sender.send((Outcome::Ignored, test.info)).unwrap();
} else {
let sender = sender.clone();
pool.execute(move || {
// It's fine to ignore the result of sending. If the
// receiver has hung up, everything will wind down soon
// anyway.
let outcome = run_single(test.runner, test_mode);
let _ = sender.send((outcome, test.info));
});
}
}
for (outcome, test_info) in receiver.iter().take(num_tests) {
// In multithreaded mode, we do only print the start of the line
// after the test ran, as otherwise it would lead to terribly
// interleaved output.
printer.print_test(&test_info);
handle_outcome(outcome, test_info, &mut printer);
}
}
// Print failures if there were any, and the final summary.
if !failed_tests.is_empty() {
printer.print_failures(&failed_tests);
}
printer.print_summary(&conclusion, start_instant.elapsed());
conclusion
}
/// Runs the given runner, catching any panics and treating them as a failed test.
fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome {
use std::panic::{catch_unwind, AssertUnwindSafe};
catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| {
// The `panic` information is just an `Any` object representing the
// value the panic was invoked with. For most panics (which use
// `panic!` like `println!`), this is either `&str` or `String`.
let payload = e.downcast_ref::<String>()
.map(|s| s.as_str())
.or(e.downcast_ref::<&str>().map(|s| *s));
let msg = match payload {
Some(payload) => format!("test panicked: {payload}"),
None => format!("test panicked"),
};
Outcome::Failed(msg.into())
})
}
| Outcome | identifier_name |
lib.rs | //! Write your own tests and benchmarks that look and behave like built-in tests!
//!
//! This is a simple and small test harness that mimics the original `libtest`
//! (used by `cargo test`/`rustc --test`). That means: all output looks pretty
//! much like `cargo test` and most CLI arguments are understood and used. With
//! that plumbing work out of the way, your test runner can focus on the actual
//! testing.
//!
//! For a small real world example, see [`examples/tidy.rs`][1].
//!
//! [1]: https://github.com/LukasKalbertodt/libtest-mimic/blob/master/examples/tidy.rs
//!
//! # Usage
//!
//! To use this, you most likely want to add a manual `[[test]]` section to
//! `Cargo.toml` and set `harness = false`. For example:
//!
//! ```toml
//! [[test]]
//! name = "mytest"
//! path = "tests/mytest.rs"
//! harness = false
//! ```
//!
//! And in `tests/mytest.rs` you would call [`run`] in the `main` function:
//!
//! ```no_run
//! use libtest_mimic::{Arguments, Trial};
//!
//!
//! // Parse command line arguments
//! let args = Arguments::from_args();
//!
//! // Create a list of tests and/or benchmarks (in this case: two dummy tests).
//! let tests = vec![
//! Trial::test("succeeding_test", move || Ok(())),
//! Trial::test("failing_test", move || Err("Woops".into())),
//! ];
//!
//! // Run all tests and exit the application appropriatly.
//! libtest_mimic::run(&args, tests).exit();
//! ```
//!
//! Instead of returning `Ok` or `Err` directly, you want to actually perform
//! your tests, of course. See [`Trial::test`] for more information on how to
//! define a test. You can of course list all your tests manually. But in many
//! cases it is useful to generate one test per file in a directory, for
//! example.
//!
//! You can then run `cargo test --test mytest` to run it. To see the CLI
//! arguments supported by this crate, run `cargo test --test mytest -- -h`.
//!
//!
//! # Known limitations and differences to the official test harness
//!
//! `libtest-mimic` works on a best-effort basis: it tries to be as close to
//! `libtest` as possible, but there are differences for a variety of reasons.
//! For example, some rarely used features might not be implemented, some
//! features are extremely difficult to implement, and removing minor,
//! unimportant differences is just not worth the hassle.
//!
//! Some of the notable differences:
//!
//! - Output capture and `--nocapture`: simply not supported. The official
//! `libtest` uses internal `std` functions to temporarily redirect output.
//! `libtest-mimic` cannot use those. See [this issue][capture] for more
//! information.
//! - `--format=json|junit`
//!
//! [capture]: https://github.com/LukasKalbertodt/libtest-mimic/issues/9
#![forbid(unsafe_code)]
use std::{process, sync::mpsc, fmt, time::Instant};
mod args;
mod printer;
use printer::Printer;
use threadpool::ThreadPool;
pub use crate::args::{Arguments, ColorSetting, FormatSetting};
/// A single test or benchmark.
///
/// The original `libtest` often calls benchmarks "tests", which is a bit
/// confusing. So in this library, it is called "trial".
///
/// A trial is created via [`Trial::test`] or [`Trial::bench`]. The trial's
/// `name` is printed and used for filtering. The `runner` is called when the
/// test/benchmark is executed to determine its outcome. If `runner` panics,
/// the trial is considered "failed". If you need the behavior of
/// `#[should_panic]` you need to catch the panic yourself. You likely want to
/// compare the panic payload to an expected value anyway.
pub struct Trial {
runner: Box<dyn FnOnce(bool) -> Outcome + Send>,
info: TestInfo,
}
impl Trial {
/// Creates a (non-benchmark) test with the given name and runner.
///
/// The runner returning `Ok(())` is interpreted as the test passing. If the
/// runner returns `Err(_)`, the test is considered failed.
pub fn test<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce() -> Result<(), Failed> + Send + 'static,
|
/// Creates a benchmark with the given name and runner.
///
/// If the runner's parameter `test_mode` is `true`, the runner function
/// should run all code just once, without measuring, just to make sure it
/// does not panic. If the parameter is `false`, it should perform the
/// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`,
/// but if it's `false`, you have to return a `Measurement`, or else the
/// benchmark is considered a failure.
///
/// `test_mode` is `true` if neither `--bench` nor `--test` are set, and
/// `false` when `--bench` is set. If `--test` is set, benchmarks are not
/// ran at all, and both flags cannot be set at the same time.
pub fn bench<R>(name: impl Into<String>, runner: R) -> Self
where
R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send + 'static,
{
Self {
runner: Box::new(move |test_mode| match runner(test_mode) {
Err(failed) => Outcome::Failed(failed),
Ok(_) if test_mode => Outcome::Passed,
Ok(Some(measurement)) => Outcome::Measured(measurement),
Ok(None)
=> Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()),
}),
info: TestInfo {
name: name.into(),
kind: String::new(),
is_ignored: false,
is_bench: true,
},
}
}
/// Sets the "kind" of this test/benchmark. If this string is not
/// empty, it is printed in brackets before the test name (e.g.
/// `test [my-kind] test_name`). (Default: *empty*)
///
/// This is the only extension to the original libtest.
pub fn with_kind(self, kind: impl Into<String>) -> Self {
Self {
info: TestInfo {
kind: kind.into(),
..self.info
},
..self
}
}
/// Sets whether or not this test is considered "ignored". (Default: `false`)
///
/// With the built-in test suite, you can annotate `#[ignore]` on tests to
/// not execute them by default (for example because they take a long time
/// or require a special environment). If the `--ignored` flag is set,
/// ignored tests are executed, too.
pub fn with_ignored_flag(self, is_ignored: bool) -> Self {
Self {
info: TestInfo {
is_ignored,
..self.info
},
..self
}
}
/// Returns the name of this trial.
pub fn name(&self) -> &str {
&self.info.name
}
/// Returns the kind of this trial. If you have not set a kind, this is an
/// empty string.
pub fn kind(&self) -> &str {
&self.info.kind
}
/// Returns whether this trial has been marked as *ignored*.
pub fn has_ignored_flag(&self) -> bool {
self.info.is_ignored
}
/// Returns `true` iff this trial is a test (as opposed to a benchmark).
pub fn is_test(&self) -> bool {
!self.info.is_bench
}
/// Returns `true` iff this trial is a benchmark (as opposed to a test).
pub fn is_bench(&self) -> bool {
self.info.is_bench
}
}
impl fmt::Debug for Trial {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
struct OpaqueRunner;
impl fmt::Debug for OpaqueRunner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<runner>")
}
}
f.debug_struct("Test")
.field("runner", &OpaqueRunner)
.field("name", &self.info.name)
.field("kind", &self.info.kind)
.field("is_ignored", &self.info.is_ignored)
.field("is_bench", &self.info.is_bench)
.finish()
}
}
#[derive(Debug)]
struct TestInfo {
name: String,
kind: String,
is_ignored: bool,
is_bench: bool,
}
/// Output of a benchmark.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Measurement {
/// Average time in ns.
pub avg: u64,
/// Variance in ns.
pub variance: u64,
}
/// Indicates that a test/benchmark has failed. Optionally carries a message.
///
/// You usually want to use the `From` impl of this type, which allows you to
/// convert any `T: fmt::Display` (e.g. `String`, `&str`, ...) into `Failed`.
#[derive(Debug, Clone)]
pub struct Failed {
msg: Option<String>,
}
impl Failed {
/// Creates an instance without message.
pub fn without_message() -> Self {
Self { msg: None }
}
/// Returns the message of this instance.
pub fn message(&self) -> Option<&str> {
self.msg.as_deref()
}
}
impl<M: std::fmt::Display> From<M> for Failed {
fn from(msg: M) -> Self {
Self {
msg: Some(msg.to_string())
}
}
}
/// The outcome of performing a test/benchmark.
#[derive(Debug, Clone)]
enum Outcome {
/// The test passed.
Passed,
/// The test or benchmark failed.
Failed(Failed),
/// The test or benchmark was ignored.
Ignored,
/// The benchmark was successfully run.
Measured(Measurement),
}
/// Contains information about the entire test run. Is returned by[`run`].
///
/// This type is marked as `#[must_use]`. Usually, you just call
/// [`exit()`][Conclusion::exit] on the result of `run` to exit the application
/// with the correct exit code. But you can also store this value and inspect
/// its data.
#[derive(Clone, Debug, PartialEq, Eq)]
#[must_use = "Call `exit()` or `exit_if_failed()` to set the correct return code"]
pub struct Conclusion {
/// Number of tests and benchmarks that were filtered out (either by the
/// filter-in pattern or by `--skip` arguments).
pub num_filtered_out: u64,
/// Number of passed tests.
pub num_passed: u64,
/// Number of failed tests and benchmarks.
pub num_failed: u64,
/// Number of ignored tests and benchmarks.
pub num_ignored: u64,
/// Number of benchmarks that successfully ran.
pub num_measured: u64,
}
impl Conclusion {
/// Exits the application with an appropriate error code (0 if all tests
/// have passed, 101 if there have been failures).
pub fn exit(&self) -> ! {
self.exit_if_failed();
process::exit(0);
}
/// Exits the application with error code 101 if there were any failures.
/// Otherwise, returns normally.
pub fn exit_if_failed(&self) {
if self.has_failed() {
process::exit(101)
}
}
/// Returns whether there have been any failures.
pub fn has_failed(&self) -> bool {
self.num_failed > 0
}
fn empty() -> Self {
Self {
num_filtered_out: 0,
num_passed: 0,
num_failed: 0,
num_ignored: 0,
num_measured: 0,
}
}
}
impl Arguments {
/// Returns `true` if the given test should be ignored.
fn is_ignored(&self, test: &Trial) -> bool {
(test.info.is_ignored && !self.ignored && !self.include_ignored)
|| (test.info.is_bench && self.test)
|| (!test.info.is_bench && self.bench)
}
fn is_filtered_out(&self, test: &Trial) -> bool {
let test_name = &test.info.name;
// If a filter was specified, apply this
if let Some(filter) = &self.filter {
match self.exact {
true if test_name != filter => return true,
false if !test_name.contains(filter) => return true,
_ => {}
};
}
// If any skip pattern were specified, test for all patterns.
for skip_filter in &self.skip {
match self.exact {
true if test_name == skip_filter => return true,
false if test_name.contains(skip_filter) => return true,
_ => {}
}
}
if self.ignored && !test.info.is_ignored {
return true;
}
false
}
}
/// Runs all given trials (tests & benchmarks).
///
/// This is the central function of this crate. It provides the framework for
/// the testing harness. It does all the printing and house keeping.
///
/// The returned value contains a couple of useful information. See
/// [`Conclusion`] for more information. If `--list` was specified, a list is
/// printed and a dummy `Conclusion` is returned.
pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
let start_instant = Instant::now();
let mut conclusion = Conclusion::empty();
// Apply filtering
if args.filter.is_some() || !args.skip.is_empty() || args.ignored {
let len_before = tests.len() as u64;
tests.retain(|test| !args.is_filtered_out(test));
conclusion.num_filtered_out = len_before - tests.len() as u64;
}
let tests = tests;
// Create printer which is used for all output.
let mut printer = printer::Printer::new(args, &tests);
// If `--list` is specified, just print the list and return.
if args.list {
printer.print_list(&tests, args.ignored);
return Conclusion::empty();
}
// Print number of tests
printer.print_title(tests.len() as u64);
let mut failed_tests = Vec::new();
let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| {
printer.print_single_outcome(&outcome);
// Handle outcome
match outcome {
Outcome::Passed => conclusion.num_passed += 1,
Outcome::Failed(failed) => {
failed_tests.push((test, failed.msg));
conclusion.num_failed += 1;
},
Outcome::Ignored => conclusion.num_ignored += 1,
Outcome::Measured(_) => conclusion.num_measured += 1,
}
};
// Execute all tests.
let test_mode = !args.bench;
if args.test_threads == Some(1) {
// Run test sequentially in main thread
for test in tests {
// Print `test foo ...`, run the test, then print the outcome in
// the same line.
printer.print_test(&test.info);
let outcome = if args.is_ignored(&test) {
Outcome::Ignored
} else {
run_single(test.runner, test_mode)
};
handle_outcome(outcome, test.info, &mut printer);
}
} else {
// Run test in thread pool.
let pool = match args.test_threads {
Some(num_threads) => ThreadPool::new(num_threads),
None => ThreadPool::default()
};
let (sender, receiver) = mpsc::channel();
let num_tests = tests.len();
for test in tests {
if args.is_ignored(&test) {
sender.send((Outcome::Ignored, test.info)).unwrap();
} else {
let sender = sender.clone();
pool.execute(move || {
// It's fine to ignore the result of sending. If the
// receiver has hung up, everything will wind down soon
// anyway.
let outcome = run_single(test.runner, test_mode);
let _ = sender.send((outcome, test.info));
});
}
}
for (outcome, test_info) in receiver.iter().take(num_tests) {
// In multithreaded mode, we do only print the start of the line
// after the test ran, as otherwise it would lead to terribly
// interleaved output.
printer.print_test(&test_info);
handle_outcome(outcome, test_info, &mut printer);
}
}
// Print failures if there were any, and the final summary.
if !failed_tests.is_empty() {
printer.print_failures(&failed_tests);
}
printer.print_summary(&conclusion, start_instant.elapsed());
conclusion
}
/// Runs the given runner, catching any panics and treating them as a failed test.
fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome {
use std::panic::{catch_unwind, AssertUnwindSafe};
catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| {
// The `panic` information is just an `Any` object representing the
// value the panic was invoked with. For most panics (which use
// `panic!` like `println!`), this is either `&str` or `String`.
let payload = e.downcast_ref::<String>()
.map(|s| s.as_str())
.or(e.downcast_ref::<&str>().map(|s| *s));
let msg = match payload {
Some(payload) => format!("test panicked: {payload}"),
None => format!("test panicked"),
};
Outcome::Failed(msg.into())
})
}
| {
Self {
runner: Box::new(move |_test_mode| match runner() {
Ok(()) => Outcome::Passed,
Err(failed) => Outcome::Failed(failed),
}),
info: TestInfo {
name: name.into(),
kind: String::new(),
is_ignored: false,
is_bench: false,
},
}
} | identifier_body |
websocket.rs | use serde_json;
use ws::{listen, Handler, Factory, Sender, Handshake, Request, Response as WsResponse, Message, CloseCode};
use ws::{Error as WsError, ErrorKind as WsErrorKind, Result as WsResult};
use graph::{PossibleErr as GraphErr, *};
use std::thread;
use std::thread::{JoinHandle};
use std::fmt;
use std::result;
enum PossibleErr{
Ws(WsError),
String(String),
GraphErr(GraphErr),
JsonErr(serde_json::Error),
Disp(Box<fmt::Display>),
None
}
impl fmt::Display for PossibleErr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::PossibleErr::*;
match *self{
Ws(ref w) => w.fmt(f),
String(ref s) => s.fmt(f),
GraphErr(ref g) => g.fmt(f),
JsonErr(ref j) => j.fmt(f),
Disp(ref d) => d.fmt(f),
None => (None).fmt(f)
}
}
}
type Result<T> = result::Result<T, PossibleErr>;
impl From<::std::option::NoneError> for PossibleErr{
fn from(_: ::std::option::NoneError) -> PossibleErr{
PossibleErr::None
}
}
impl From<WsError> for PossibleErr{
fn from(g: WsError) -> PossibleErr{
PossibleErr::Ws(g)
}
}
impl From<GraphErr> for PossibleErr{
fn from(g: GraphErr) -> PossibleErr{
PossibleErr::GraphErr(g)
}
}
impl From<serde_json::Error> for PossibleErr{
fn from(j: serde_json::Error) -> PossibleErr{
PossibleErr::JsonErr(j)
}
}
fn to_ws_err(e: PossibleErr, kind: WsErrorKind) -> WsError{
use self::GraphErr::Ws as GWs;
use self::PossibleErr::*;
match e{
Ws(w) | GraphErr(GWs(w)) => w,
_ => WsError::new(kind,
format!("{}", e))
}
}
fn to_ws(e: PossibleErr) -> WsError{
to_ws_err(e, WsErrorKind::Internal)
}
impl Into<WsError> for PossibleErr{
fn into(self) -> WsError{
to_ws(self)
}
}
fn decode_command(msg: Message) -> Result<Command>{
match msg{
Message::Text(t) => {
Ok(serde_json::from_str(&t[..])?)
},
Message::Binary(..) =>
Err(WsError::new(
WsErrorKind::Protocol,
format!("binary message received where expecting text JSON")).into())
}
}
fn encode_response(response: Response) -> WsResult<Message> |
fn encode_update<T: Into<Update>>(update: T) -> WsResult<Message>{
match serde_json::to_string(&update.into()){
Ok(s) => Ok(Message::Text(s)),
Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_update failed {:?}", e)))
}
}
struct ClientCommon;
impl ClientCommon{
fn on_open(out: &Sender, store: &GraphStore, id: GraphId,
client_type: ClientType) -> Result<()>{
if let Ok(_) = store.attach(id, client_type, out.clone()){
trace!("Client supplied valid GraphId {}", id);
Ok(())
}
else{
let err = format!("GraphId {} does not exist", id);
out.send(encode_response(Response::Error(DataValue::from(err.clone())))?)?;
Ok(()) //Err(WsError::new(WsErrorKind::Protocol, err))
}
}
fn on_command(_out: &Sender, store: &GraphStore,
command: &Command, graph: GraphId,
_client_type: ClientType) -> Result<Option<Response>> {
use graph::Command::*;
let response = match *command{
AddLink{ source: ref from, target: ref to } => {
let graph = store.get(graph)?;
let result = graph.add_link(from, to);
match result{
Response::Ok => {
graph.repeat_to(ClientType::Both, encode_update(command.clone())?);
Response::Ok
}
_ => result
}
},
_ => {return Ok(None)}
};
Ok(Some(response))
}
}
#[derive(Copy,Clone)]
struct FrontendClient{
graph: GraphId
}
impl FrontendClient{
fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{
ClientCommon::on_open(out, store, id, ClientType::Frontend)?;
trace!("Frontend attached to GraphId {}", id);
out.send(
encode_update(
Command::SetGraph{
graph: store.get(id)?.data.clone()
}
)?
)?;
out.send(
encode_update(
Response::Warning("Test Warning".into())
)?
)?;
Ok(FrontendClient{ graph: id })
}
fn on_command(&self, out: &Sender, store: &GraphStore,
command: &Command) -> Result<Response> {
//use graph::Command::*;
if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, ClientType::Frontend)?{
return Ok(common);
}
match *command{
_ => Err(WsError::new(
WsErrorKind::Protocol,
format!("Expected Frontend command, got {:?}",
command)).into())
}
}
}
#[derive(Copy,Clone)]
struct BackendClient{
graph: GraphId
}
impl BackendClient{
fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{
ClientCommon::on_open(out, store, id, ClientType::Backend)?;
trace!("Backend attached to GraphId {}", id);
Ok(BackendClient{ graph: id })
}
fn on_command(&self, out: &Sender, store: &GraphStore,
command: &Command) -> Result<Response> {
use graph::Command::*;
let client_type = ClientType::Backend;
if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, client_type.clone())?{
return Ok(common);
}
match *command{
SetGraph{ ref graph } => Ok({
trace!("set graph {:?}", graph);
store.set_graph(self.graph, graph.clone())?;
store.repeat_to(self.graph, client_type.opposite(),
encode_update(command.clone())?)?;
Response::Ok
}),
SetData{ ref id, ref value } => Ok({
trace!("set data {:?} = {:?}", id, value);
//store.set_data(self.graph, id, value)?;
store.repeat_to(self.graph, client_type.opposite(),
encode_update(command.clone())?)?;
Response::Ok
}),
_ => Err(WsError::new(WsErrorKind::Protocol,
format!("Expected Backend command, got {:?}",
command)).into())
}
}
}
#[derive(Copy,Clone)]
enum ClientState{
Frontend(FrontendClient),
Backend(BackendClient),
AwaitingType
}
struct ServerHandler{
out: Sender,
store: GraphStore,
state: ClientState,
addr: String
}
impl ServerHandler{
fn on_open_inner(&mut self, hs: Handshake) -> Result<()>{
if let Some(ip_addr) = hs.peer_addr {
let ip_string = format!("{}", ip_addr);
info!("{:>20} - connection {:?} established", ip_string, self.out.token());
self.addr = ip_string;
}
else{
debug!("Connection without IP address?");
}
self.out.send(
serde_json::to_string(
&self.store.list())?
)?;
Ok(())
}
fn on_message_inner(&mut self, msg: Message) -> Result<()> {
use self::ClientState::*;
use graph::Command::{FrontendAttach, BackendAttach};
let command = decode_command(msg)?;
let response = match self.state.clone() {
Frontend(client) =>
client.on_command(&self.out, &self.store, &command)?,
Backend(client) => // different type for client than the above match
client.on_command(&self.out, &self.store, &command)?,
AwaitingType => {
let out = &self.out;
let store = &self.store;
let state = match command{
FrontendAttach{ id } =>
Frontend(FrontendClient::on_open(out, store, id)?),
BackendAttach { id } => {
let id = match id{
Some(id) if self.store.contains_key(id) => id,
Some(id) => self.store.empty_at(id),
None => self.store.new_empty()
};
Backend(BackendClient::on_open(out, store, id)?)
},
_ =>
return Err(WsError::new(WsErrorKind::Protocol,
"Expected FrontendAttach or BackendAttach, got something else").into())
};
self.state = state;
Response::Ok
}
};
if response != Response::Ok{ // don't generate Ok messages, they're pointless and hard to coordinate
self.out.send(encode_response(response)?)?
}
Ok(())
}
}
impl Handler for ServerHandler{
fn on_open(&mut self, hs: Handshake) -> WsResult<()>{
self.on_open_inner(hs).map_err(|e|e.into())
}
fn on_request(&mut self, req: &Request) -> WsResult<WsResponse> {
let mut res = WsResponse::from_request(req)?;
let protocol_name = "selenologist-node-editor";
res.set_protocol(protocol_name);
Ok(res)
}
fn on_message(&mut self, msg: Message) -> WsResult<()> {
self.on_message_inner(msg).map_err(|e|e.into())
}
fn on_close(&mut self, code: CloseCode, reason: &str){
use self::ClientState::*;
trace!("Closing connection {:?} because {:?} {}", self.addr, code, reason);
match self.state{
Backend(BackendClient{ graph }) |
Frontend(FrontendClient{ graph }) => {
self.store.remove_listener(graph, self.out.token().0)
.unwrap();
}
_ => {}
}
}
}
#[derive(Default)]
struct ServerFactory{
store: GraphStore,
}
impl Factory for ServerFactory{
type Handler = ServerHandler;
fn connection_made(&mut self, out: Sender) -> Self::Handler{
ServerHandler{
out,
store: self.store.clone(),
state: ClientState::AwaitingType,
addr: "0.0.0.0:0".into()
}
}
}
pub fn launch_thread()
-> JoinHandle<()>
{
thread::Builder::new()
.name("websocket".into())
.spawn(move || {
let mut factory = ServerFactory::default();
let listen_addr = "127.0.0.1:3001";
info!("Attempting to listen on {}", listen_addr);
listen(listen_addr, |out| factory.connection_made(out)).unwrap()
}).unwrap()
}
| {
match serde_json::to_string(&response){
Ok(s) => Ok(Message::Text(s)),
Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_response failed {:?}", e)))
}
} | identifier_body |
websocket.rs | use serde_json;
use ws::{listen, Handler, Factory, Sender, Handshake, Request, Response as WsResponse, Message, CloseCode};
use ws::{Error as WsError, ErrorKind as WsErrorKind, Result as WsResult};
use graph::{PossibleErr as GraphErr, *};
use std::thread;
use std::thread::{JoinHandle};
use std::fmt;
use std::result;
enum PossibleErr{
Ws(WsError),
String(String),
GraphErr(GraphErr),
JsonErr(serde_json::Error),
Disp(Box<fmt::Display>),
None
}
impl fmt::Display for PossibleErr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::PossibleErr::*;
match *self{
Ws(ref w) => w.fmt(f),
String(ref s) => s.fmt(f),
GraphErr(ref g) => g.fmt(f),
JsonErr(ref j) => j.fmt(f),
Disp(ref d) => d.fmt(f),
None => (None).fmt(f)
}
}
}
type Result<T> = result::Result<T, PossibleErr>;
impl From<::std::option::NoneError> for PossibleErr{
fn from(_: ::std::option::NoneError) -> PossibleErr{
PossibleErr::None
}
}
impl From<WsError> for PossibleErr{
fn from(g: WsError) -> PossibleErr{
PossibleErr::Ws(g)
}
}
impl From<GraphErr> for PossibleErr{
fn from(g: GraphErr) -> PossibleErr{
PossibleErr::GraphErr(g)
}
}
impl From<serde_json::Error> for PossibleErr{
fn from(j: serde_json::Error) -> PossibleErr{
PossibleErr::JsonErr(j)
}
}
fn to_ws_err(e: PossibleErr, kind: WsErrorKind) -> WsError{
use self::GraphErr::Ws as GWs;
use self::PossibleErr::*;
match e{
Ws(w) | GraphErr(GWs(w)) => w,
_ => WsError::new(kind,
format!("{}", e))
}
}
fn to_ws(e: PossibleErr) -> WsError{
to_ws_err(e, WsErrorKind::Internal)
}
impl Into<WsError> for PossibleErr{
fn into(self) -> WsError{
to_ws(self)
}
}
fn decode_command(msg: Message) -> Result<Command>{
match msg{
Message::Text(t) => {
Ok(serde_json::from_str(&t[..])?)
},
Message::Binary(..) =>
Err(WsError::new(
WsErrorKind::Protocol,
format!("binary message received where expecting text JSON")).into())
}
}
fn encode_response(response: Response) -> WsResult<Message>{
match serde_json::to_string(&response){
Ok(s) => Ok(Message::Text(s)),
Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_response failed {:?}", e)))
}
}
fn encode_update<T: Into<Update>>(update: T) -> WsResult<Message>{
match serde_json::to_string(&update.into()){
Ok(s) => Ok(Message::Text(s)),
Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_update failed {:?}", e)))
}
}
struct ClientCommon;
impl ClientCommon{
fn on_open(out: &Sender, store: &GraphStore, id: GraphId,
client_type: ClientType) -> Result<()>{
if let Ok(_) = store.attach(id, client_type, out.clone()){
trace!("Client supplied valid GraphId {}", id);
Ok(())
}
else{
let err = format!("GraphId {} does not exist", id);
out.send(encode_response(Response::Error(DataValue::from(err.clone())))?)?;
Ok(()) //Err(WsError::new(WsErrorKind::Protocol, err))
}
}
fn on_command(_out: &Sender, store: &GraphStore,
command: &Command, graph: GraphId,
_client_type: ClientType) -> Result<Option<Response>> {
use graph::Command::*;
let response = match *command{
AddLink{ source: ref from, target: ref to } => {
let graph = store.get(graph)?;
let result = graph.add_link(from, to);
match result{
Response::Ok => {
graph.repeat_to(ClientType::Both, encode_update(command.clone())?);
Response::Ok
}
_ => result
}
},
_ => {return Ok(None)}
};
Ok(Some(response))
}
}
#[derive(Copy,Clone)]
struct FrontendClient{
graph: GraphId
}
impl FrontendClient{
fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{
ClientCommon::on_open(out, store, id, ClientType::Frontend)?;
trace!("Frontend attached to GraphId {}", id);
out.send(
encode_update(
Command::SetGraph{
graph: store.get(id)?.data.clone()
}
)?
)?;
out.send(
encode_update(
Response::Warning("Test Warning".into())
)?
)?;
Ok(FrontendClient{ graph: id })
}
fn on_command(&self, out: &Sender, store: &GraphStore,
command: &Command) -> Result<Response> {
//use graph::Command::*;
if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, ClientType::Frontend)?{
return Ok(common);
}
match *command{
_ => Err(WsError::new(
WsErrorKind::Protocol,
format!("Expected Frontend command, got {:?}",
command)).into())
}
}
}
#[derive(Copy,Clone)]
struct BackendClient{
graph: GraphId
}
impl BackendClient{
fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{
ClientCommon::on_open(out, store, id, ClientType::Backend)?;
trace!("Backend attached to GraphId {}", id);
Ok(BackendClient{ graph: id })
}
fn on_command(&self, out: &Sender, store: &GraphStore,
command: &Command) -> Result<Response> {
use graph::Command::*;
let client_type = ClientType::Backend;
if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, client_type.clone())?{
return Ok(common);
}
match *command{
SetGraph{ ref graph } => Ok({
trace!("set graph {:?}", graph);
store.set_graph(self.graph, graph.clone())?;
store.repeat_to(self.graph, client_type.opposite(),
encode_update(command.clone())?)?;
Response::Ok
}),
SetData{ ref id, ref value } => Ok({
trace!("set data {:?} = {:?}", id, value);
//store.set_data(self.graph, id, value)?;
store.repeat_to(self.graph, client_type.opposite(),
encode_update(command.clone())?)?;
Response::Ok
}),
_ => Err(WsError::new(WsErrorKind::Protocol,
format!("Expected Backend command, got {:?}",
command)).into())
}
}
}
#[derive(Copy,Clone)]
enum ClientState{
Frontend(FrontendClient),
Backend(BackendClient),
AwaitingType
}
struct ServerHandler{
out: Sender,
store: GraphStore,
state: ClientState,
addr: String
}
impl ServerHandler{
fn on_open_inner(&mut self, hs: Handshake) -> Result<()>{
if let Some(ip_addr) = hs.peer_addr {
let ip_string = format!("{}", ip_addr);
info!("{:>20} - connection {:?} established", ip_string, self.out.token());
self.addr = ip_string;
}
else{
debug!("Connection without IP address?");
}
self.out.send(
serde_json::to_string(
&self.store.list())?
)?;
Ok(())
}
fn on_message_inner(&mut self, msg: Message) -> Result<()> {
use self::ClientState::*;
use graph::Command::{FrontendAttach, BackendAttach};
let command = decode_command(msg)?;
let response = match self.state.clone() {
Frontend(client) =>
client.on_command(&self.out, &self.store, &command)?,
Backend(client) => // different type for client than the above match
client.on_command(&self.out, &self.store, &command)?,
AwaitingType => {
let out = &self.out;
let store = &self.store;
let state = match command{
FrontendAttach{ id } =>
Frontend(FrontendClient::on_open(out, store, id)?),
BackendAttach { id } => {
let id = match id{
Some(id) if self.store.contains_key(id) => id,
Some(id) => self.store.empty_at(id),
None => self.store.new_empty()
};
Backend(BackendClient::on_open(out, store, id)?)
},
_ =>
return Err(WsError::new(WsErrorKind::Protocol,
"Expected FrontendAttach or BackendAttach, got something else").into())
};
self.state = state;
Response::Ok
}
};
if response != Response::Ok{ // don't generate Ok messages, they're pointless and hard to coordinate
self.out.send(encode_response(response)?)?
}
Ok(())
}
}
impl Handler for ServerHandler{
fn on_open(&mut self, hs: Handshake) -> WsResult<()>{
self.on_open_inner(hs).map_err(|e|e.into())
}
fn on_request(&mut self, req: &Request) -> WsResult<WsResponse> {
let mut res = WsResponse::from_request(req)?;
let protocol_name = "selenologist-node-editor";
res.set_protocol(protocol_name);
Ok(res)
}
fn on_message(&mut self, msg: Message) -> WsResult<()> {
self.on_message_inner(msg).map_err(|e|e.into())
}
fn on_close(&mut self, code: CloseCode, reason: &str){
use self::ClientState::*;
trace!("Closing connection {:?} because {:?} {}", self.addr, code, reason);
match self.state{
Backend(BackendClient{ graph }) |
Frontend(FrontendClient{ graph }) => {
self.store.remove_listener(graph, self.out.token().0)
.unwrap();
}
_ => {}
}
}
}
#[derive(Default)] | }
impl Factory for ServerFactory{
type Handler = ServerHandler;
fn connection_made(&mut self, out: Sender) -> Self::Handler{
ServerHandler{
out,
store: self.store.clone(),
state: ClientState::AwaitingType,
addr: "0.0.0.0:0".into()
}
}
}
pub fn launch_thread()
-> JoinHandle<()>
{
thread::Builder::new()
.name("websocket".into())
.spawn(move || {
let mut factory = ServerFactory::default();
let listen_addr = "127.0.0.1:3001";
info!("Attempting to listen on {}", listen_addr);
listen(listen_addr, |out| factory.connection_made(out)).unwrap()
}).unwrap()
} | struct ServerFactory{
store: GraphStore, | random_line_split |
websocket.rs | use serde_json;
use ws::{listen, Handler, Factory, Sender, Handshake, Request, Response as WsResponse, Message, CloseCode};
use ws::{Error as WsError, ErrorKind as WsErrorKind, Result as WsResult};
use graph::{PossibleErr as GraphErr, *};
use std::thread;
use std::thread::{JoinHandle};
use std::fmt;
use std::result;
enum PossibleErr{
Ws(WsError),
String(String),
GraphErr(GraphErr),
JsonErr(serde_json::Error),
Disp(Box<fmt::Display>),
None
}
impl fmt::Display for PossibleErr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::PossibleErr::*;
match *self{
Ws(ref w) => w.fmt(f),
String(ref s) => s.fmt(f),
GraphErr(ref g) => g.fmt(f),
JsonErr(ref j) => j.fmt(f),
Disp(ref d) => d.fmt(f),
None => (None).fmt(f)
}
}
}
type Result<T> = result::Result<T, PossibleErr>;
impl From<::std::option::NoneError> for PossibleErr{
fn from(_: ::std::option::NoneError) -> PossibleErr{
PossibleErr::None
}
}
impl From<WsError> for PossibleErr{
fn from(g: WsError) -> PossibleErr{
PossibleErr::Ws(g)
}
}
impl From<GraphErr> for PossibleErr{
fn from(g: GraphErr) -> PossibleErr{
PossibleErr::GraphErr(g)
}
}
impl From<serde_json::Error> for PossibleErr{
fn from(j: serde_json::Error) -> PossibleErr{
PossibleErr::JsonErr(j)
}
}
fn to_ws_err(e: PossibleErr, kind: WsErrorKind) -> WsError{
use self::GraphErr::Ws as GWs;
use self::PossibleErr::*;
match e{
Ws(w) | GraphErr(GWs(w)) => w,
_ => WsError::new(kind,
format!("{}", e))
}
}
fn to_ws(e: PossibleErr) -> WsError{
to_ws_err(e, WsErrorKind::Internal)
}
impl Into<WsError> for PossibleErr{
fn into(self) -> WsError{
to_ws(self)
}
}
fn decode_command(msg: Message) -> Result<Command>{
match msg{
Message::Text(t) => {
Ok(serde_json::from_str(&t[..])?)
},
Message::Binary(..) =>
Err(WsError::new(
WsErrorKind::Protocol,
format!("binary message received where expecting text JSON")).into())
}
}
fn encode_response(response: Response) -> WsResult<Message>{
match serde_json::to_string(&response){
Ok(s) => Ok(Message::Text(s)),
Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_response failed {:?}", e)))
}
}
fn encode_update<T: Into<Update>>(update: T) -> WsResult<Message>{
match serde_json::to_string(&update.into()){
Ok(s) => Ok(Message::Text(s)),
Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_update failed {:?}", e)))
}
}
struct ClientCommon;
impl ClientCommon{
fn on_open(out: &Sender, store: &GraphStore, id: GraphId,
client_type: ClientType) -> Result<()>{
if let Ok(_) = store.attach(id, client_type, out.clone()){
trace!("Client supplied valid GraphId {}", id);
Ok(())
}
else{
let err = format!("GraphId {} does not exist", id);
out.send(encode_response(Response::Error(DataValue::from(err.clone())))?)?;
Ok(()) //Err(WsError::new(WsErrorKind::Protocol, err))
}
}
fn on_command(_out: &Sender, store: &GraphStore,
command: &Command, graph: GraphId,
_client_type: ClientType) -> Result<Option<Response>> {
use graph::Command::*;
let response = match *command{
AddLink{ source: ref from, target: ref to } => {
let graph = store.get(graph)?;
let result = graph.add_link(from, to);
match result{
Response::Ok => {
graph.repeat_to(ClientType::Both, encode_update(command.clone())?);
Response::Ok
}
_ => result
}
},
_ => {return Ok(None)}
};
Ok(Some(response))
}
}
#[derive(Copy,Clone)]
struct FrontendClient{
graph: GraphId
}
impl FrontendClient{
fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{
ClientCommon::on_open(out, store, id, ClientType::Frontend)?;
trace!("Frontend attached to GraphId {}", id);
out.send(
encode_update(
Command::SetGraph{
graph: store.get(id)?.data.clone()
}
)?
)?;
out.send(
encode_update(
Response::Warning("Test Warning".into())
)?
)?;
Ok(FrontendClient{ graph: id })
}
fn on_command(&self, out: &Sender, store: &GraphStore,
command: &Command) -> Result<Response> {
//use graph::Command::*;
if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, ClientType::Frontend)?{
return Ok(common);
}
match *command{
_ => Err(WsError::new(
WsErrorKind::Protocol,
format!("Expected Frontend command, got {:?}",
command)).into())
}
}
}
#[derive(Copy,Clone)]
struct BackendClient{
graph: GraphId
}
impl BackendClient{
fn | (out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{
ClientCommon::on_open(out, store, id, ClientType::Backend)?;
trace!("Backend attached to GraphId {}", id);
Ok(BackendClient{ graph: id })
}
fn on_command(&self, out: &Sender, store: &GraphStore,
command: &Command) -> Result<Response> {
use graph::Command::*;
let client_type = ClientType::Backend;
if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, client_type.clone())?{
return Ok(common);
}
match *command{
SetGraph{ ref graph } => Ok({
trace!("set graph {:?}", graph);
store.set_graph(self.graph, graph.clone())?;
store.repeat_to(self.graph, client_type.opposite(),
encode_update(command.clone())?)?;
Response::Ok
}),
SetData{ ref id, ref value } => Ok({
trace!("set data {:?} = {:?}", id, value);
//store.set_data(self.graph, id, value)?;
store.repeat_to(self.graph, client_type.opposite(),
encode_update(command.clone())?)?;
Response::Ok
}),
_ => Err(WsError::new(WsErrorKind::Protocol,
format!("Expected Backend command, got {:?}",
command)).into())
}
}
}
#[derive(Copy,Clone)]
enum ClientState{
Frontend(FrontendClient),
Backend(BackendClient),
AwaitingType
}
struct ServerHandler{
out: Sender,
store: GraphStore,
state: ClientState,
addr: String
}
impl ServerHandler{
fn on_open_inner(&mut self, hs: Handshake) -> Result<()>{
if let Some(ip_addr) = hs.peer_addr {
let ip_string = format!("{}", ip_addr);
info!("{:>20} - connection {:?} established", ip_string, self.out.token());
self.addr = ip_string;
}
else{
debug!("Connection without IP address?");
}
self.out.send(
serde_json::to_string(
&self.store.list())?
)?;
Ok(())
}
fn on_message_inner(&mut self, msg: Message) -> Result<()> {
use self::ClientState::*;
use graph::Command::{FrontendAttach, BackendAttach};
let command = decode_command(msg)?;
let response = match self.state.clone() {
Frontend(client) =>
client.on_command(&self.out, &self.store, &command)?,
Backend(client) => // different type for client than the above match
client.on_command(&self.out, &self.store, &command)?,
AwaitingType => {
let out = &self.out;
let store = &self.store;
let state = match command{
FrontendAttach{ id } =>
Frontend(FrontendClient::on_open(out, store, id)?),
BackendAttach { id } => {
let id = match id{
Some(id) if self.store.contains_key(id) => id,
Some(id) => self.store.empty_at(id),
None => self.store.new_empty()
};
Backend(BackendClient::on_open(out, store, id)?)
},
_ =>
return Err(WsError::new(WsErrorKind::Protocol,
"Expected FrontendAttach or BackendAttach, got something else").into())
};
self.state = state;
Response::Ok
}
};
if response != Response::Ok{ // don't generate Ok messages, they're pointless and hard to coordinate
self.out.send(encode_response(response)?)?
}
Ok(())
}
}
impl Handler for ServerHandler{
fn on_open(&mut self, hs: Handshake) -> WsResult<()>{
self.on_open_inner(hs).map_err(|e|e.into())
}
fn on_request(&mut self, req: &Request) -> WsResult<WsResponse> {
let mut res = WsResponse::from_request(req)?;
let protocol_name = "selenologist-node-editor";
res.set_protocol(protocol_name);
Ok(res)
}
fn on_message(&mut self, msg: Message) -> WsResult<()> {
self.on_message_inner(msg).map_err(|e|e.into())
}
fn on_close(&mut self, code: CloseCode, reason: &str){
use self::ClientState::*;
trace!("Closing connection {:?} because {:?} {}", self.addr, code, reason);
match self.state{
Backend(BackendClient{ graph }) |
Frontend(FrontendClient{ graph }) => {
self.store.remove_listener(graph, self.out.token().0)
.unwrap();
}
_ => {}
}
}
}
#[derive(Default)]
struct ServerFactory{
store: GraphStore,
}
impl Factory for ServerFactory{
type Handler = ServerHandler;
fn connection_made(&mut self, out: Sender) -> Self::Handler{
ServerHandler{
out,
store: self.store.clone(),
state: ClientState::AwaitingType,
addr: "0.0.0.0:0".into()
}
}
}
pub fn launch_thread()
-> JoinHandle<()>
{
thread::Builder::new()
.name("websocket".into())
.spawn(move || {
let mut factory = ServerFactory::default();
let listen_addr = "127.0.0.1:3001";
info!("Attempting to listen on {}", listen_addr);
listen(listen_addr, |out| factory.connection_made(out)).unwrap()
}).unwrap()
}
| on_open | identifier_name |
ecxgboost_predicttemp_gpu_multi_live_hourly.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
author: yetao.lu
date: 2018/11/8
description:根据所有模型用一个卡串行,修改为一个模型用一个看,服务器有四个看,至少可以4个并行
"""
import logging,sys,datetime,os,pygrib,numpy,MySQLdb,time,math
import xgboost,multiprocessing
from sklearn.externals import joblib
from apscheduler.schedulers.background import BackgroundScheduler
def getStationList(csvfile):
stationlist=[]
fileread=open(csvfile,'r')
firstline=fileread.readline()
while True:
line=fileread.readline()
perlist=line.split(',')
if len(perlist)>=4:
stationlist.append(perlist)
if not line or line=='':
break
return stationlist
def calculate16gribvalue(latlonArray,indexlat,indexlon,vstring):
vstring.append( | ][indexlon])
vstring.append(latlonArray[indexlat][indexlon + 1])
vstring.append(latlonArray[indexlat + 1][indexlon + 1])
vstring.append(latlonArray[indexlat + 1][indexlon])
vstring.append(latlonArray[indexlat - 1][indexlon - 1])
vstring.append(latlonArray[indexlat - 1][indexlon])
vstring.append(latlonArray[indexlat - 1][indexlon + 1])
vstring.append(latlonArray[indexlat - 1][indexlon + 2])
vstring.append(latlonArray[indexlat][indexlon + 2])
vstring.append(latlonArray[indexlat + 1][indexlon + 2])
vstring.append(latlonArray[indexlat + 2][indexlon + 2])
vstring.append(latlonArray[indexlat + 2][indexlon + 1])
vstring.append(latlonArray[indexlat + 2][indexlon])
vstring.append(latlonArray[indexlat + 2][indexlon - 1])
vstring.append(latlonArray[indexlat + 1][indexlon - 1])
vstring.append(latlonArray[indexlat][indexlon - 1])
return vstring
def calculatedemvalue(demcsv):
demdict={}
csvread=open(demcsv,'r')
while True:
line=csvread.readline()
linearray=line.split(',')
if len(linearray)>2:
demdict[linearray[0]]=linearray
if not line:
break
return demdict
'''
遍历文件要放在外层,这里是单个文件进行预测
'''
def predictmodel(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,foretime,gpu):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
allvaluelist=[]
if file[-3:]=='001' and (file[:3]=='D2S' or file[:3]=='D2D'):
grbs=pygrib.open(filefullpath)
grb_2t = grbs.select(name='2 metre temperature')
tempArray = grb_2t[0].values
grb_2d = grbs.select(name='2 metre dewpoint temperature')
dewpointArray = grb_2d[0].values
grb_10u = grbs.select(name='10 metre U wind component')
u10Array = grb_10u[0].values
grb_10v = grbs.select(name='10 metre V wind component')
v10Array = grb_10v[0].values
grb_tcc = grbs.select(name='Total cloud cover')
tccArray = grb_tcc[0].values
grb_lcc = grbs.select(name='Low cloud cover')
lccArray = grb_lcc[0].values
grb_z = grbs.select(name='Geopotential')
geoArray=grb_z[0].values
grb_500rh = grbs.select(name='Relative humidity', level=500)
rh500Array = grb_500rh[0].values
grb_850rh = grbs.select(name='Relative humidity', level=850)
rh850Array = grb_850rh[0].values
#遍历站点->要素遍历、
for i in range(len(stationlist)):
#print len(stationlist)
perlist=stationlist[i]
stationid=perlist[0]
latitude=float(perlist[1])
longitude=float(perlist[2])
alti=float(perlist[3])
#站点左上角点的索引
indexlat = int((60 - latitude) / 0.1)
indexlon = int((longitude -60) / 0.1)
per_station_value_list=[]
calculate16gribvalue(tempArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(dewpointArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(u10Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(v10Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(tccArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(lccArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(geoArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(rh500Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(rh850Array,indexlat,indexlon,per_station_value_list)
per_station_value_list.append(latitude)
per_station_value_list.append(longitude)
per_station_value_list.append(alti)
# 站点高程:取计算好的站点周边16个点的高程值
demlist = demdict[stationlist[i][0]]
for u in range(1, len(demlist), 1):
per_station_value_list.append(float(demlist[u]))
allvaluelist.append(per_station_value_list)
#print(per_station_value_list)
trainarray=numpy.array(allvaluelist)
params001 = {
'tree_method': 'gpu_hist',
'booster': 'gbtree',
'objective': 'reg:linear', # 线性回归
'gamma': 0.2, # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。
'max_depth': 12, # 构建树的深度,越大越容易过拟合
'lambda': 2, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
'subsample': 0.7, # 随机采样训练样本
'colsample_bytree': 0.7, # 生成树时进行的列采样
'min_child_weight': 3,
# 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
# ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
# 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
'silent': 0, # 设置成1则没有运行信息输出,最好是设置为0.
'eta': 0.01, # 如同学习率
'seed': 1000,
# 'nthread':3,# cpu 线程数,不设置取最大值
# 'eval_metric': 'auc'
'scale_pos_weight': 1,
'n_gpus': 1
}
xgbst=xgboost.Booster(params001)
xgbst.load_model(modelfile)
scaler=joblib.load(scalefile)
#print(modelfile,scalefile)
trainarray_t=scaler.transform(trainarray)
#标准化后的矩阵坑我2次了:看好是标准化后的还是标准化前的
xgbtrain=xgboost.DMatrix(trainarray_t)
result=xgbst.predict(xgbtrain)
#print(result)
logger.info(result)
#结果入库
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',3307)
#db = MySQLdb.connect('192.168.10.84', 'admin', 'moji_China_123','moge')
cursor = db.cursor()
origin = datetime.datetime.strftime(origintime, '%Y-%m-%d %H:%M:%S')
forecast = datetime.datetime.strftime(foretime, '%Y-%m-%d %H:%M:%S')
forecast_year = foretime.year
forecast_month = foretime.month
forecast_day = foretime.day
forecast_hour = foretime.hour
forecast_minute = foretime.minute
timestr = datetime.datetime.strftime(origintime, '%Y%m%d%H%M%S')
# csv = os.path.join(outpath, origin+'_'+forecast + '.csv')
# csvfile = open(csv, 'w')
sql = 'replace into t_r_ec_city_forecast_ele_mos_dem (city_id,initial_time,forecast_time,forecast_year,forecast_month,forecast_day,forecast_hour,temperature)VALUES(%s,%s,%s,%s,%s,%s,%s,%s)'
L = []
for j in range(len(stationlist)):
perstationlist = []
stationid = stationlist[j][0]
temp = result[j]
# 每个站点存储
perstationlist.append(stationid)
perstationlist.append(origin)
perstationlist.append(forecast)
perstationlist.append(forecast_year)
perstationlist.append(forecast_month)
perstationlist.append(forecast_day)
perstationlist.append(forecast_hour)
perstationlist.append(temp)
L.append(perstationlist)
#logger.info(perstationlist)
# # sql='insert into t_r_ec_mos_city_forecast_ele(city_id,initial_time,forecast_time,forecsat_year,forecast_month,forecast_day,forecast_hour,temperature)VALUES ()'
# # sql = 'insert into t_r_ec_city_forecast_ele_mos (city_id,initial_time,forecast_time,forecast_year,forecast_month,forecast_day,forecast_hour,temperature,temp_max_6h,temp_min_6h,rainstate,precipitation)VALUES ("' + stationid + '","' + origin + '","' + str(
# # forecast) + '","' + str(forecast_year) + '","' + str(
# # forecast_month) + '","' + str(forecast_day) + '","' + str(
# # forecast_hour) + '","' + str(temp) + '","' + str(maxtemp)+ '","' + str(mintemp)+'","' + str(rainstate)+'","' + str(prevalue)+ '")
# # csvfile.write(stationid + '","' + origin + '","' + str(
# # forecast) + '","' + str(forecast_year) + '","' + str(
# # forecast_month) + '","' + str(forecast_day) + '","' + str(
# # forecast_hour) + '","' + str(forecast_minute) + '","' + str(
# # temp)+ '","' + str(maxtemp)+ '","' + str(mintemp)+'","' + str(rainstate)+'","' + str(prevalue))
# # csvfile.write('\n')
# # print sql
# # cursor.execute(sql)
cursor.executemany(sql, L)
db.commit()
db.close()
def calculate_avg_temp_24h_from3h_accurity(db, initial, pdate, odate, ii):
initialtime = datetime.datetime.strptime(initial, '%Y-%m-%d %H:%M:%S')
year002 = initialtime.year
month002 = initialtime.month
day002 = initialtime.day
hour002 = initialtime.hour
cursor001 = db.cursor()
#选择MOS统计之后的24小时平均气温、24小时气温最大值,24小时气温最小值
sql = 'select city_id,initial_time,avg(temperature),max(temperature),min(temperature) from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time>"' + pdate + '" and forecast_time<="' + odate + '" group by city_id'
print sql
cursor001.execute(sql)
rows = cursor001.fetchall()
if rows==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#实况数据
sql_live = 'select v01000,AVG(TEM),MAX(TEM_Max),MIN(TEM_Min) from t_r_surf_hour_ele where vdate>"' + pdate + '" and vdate<="' + odate + '" and TEM<90 and TEM_Max<90 and TEM_Min<90 group by v01000;'
print sql_live
#logger.info(sql_live)
cursor = db.cursor()
cursor.execute(sql_live)
rows_live = cursor.fetchall()
#print len(rows_live)
rowdict = {}
nn = 0
# 3度的平均气温准确率
na_avg = 0
# 3度的气温最大值准确率:用逐小时的气温最大值和预报最高气温计算
na_max = 0
# 3度的气温最小值准确率:用逐小时的气温最小值和预报最低气温计算
na_min = 0
#实况数据存入字典
for row_live in rows_live:
rowdict[row_live[0]] = row_live
#预报站点在实况字典里,计算accu
for row in rows:
if row[0] in rowdict.keys():
nn = nn + 1
#print rowdict[row[0]]
avg_temp_live = float(rowdict[row[0]][1])
avg_temp = float(row[2])
if abs(avg_temp_live - avg_temp) < 3:
na_avg = na_avg + 1
avg_mmax_live = float(rowdict[row[0]][2])
max_temp = float(row[3])
if abs(avg_mmax_live - max_temp) < 3:
na_max = na_max + 1
avg_mmin_live = float(rowdict[row[0]][3])
min_temp = float(row[4])
if abs(avg_mmin_live - min_temp) < 3:
na_min = na_min + 1
# 把一条SQL的值加载list中,然后把这个list合并到总的list中
#print na_avg, na_max, na_min, na_mmax, na_mmin
cursor = db.cursor()
sql='replace into t_r_avg_temp_est_daily_mos_dem(initial_time,forecast_days,initial_year,initial_month,initial_day,initial_hour,NN,na_avg,na_max,na_min)VALUES ("'+initial+'","'+str(ii+1)+'","'+str(year002)+'","'+str(month002)+'","'+str(day002)+'","'+str(hour002)+'","'+str(nn)+'","'+str(na_avg)+'","'+str(na_max)+'","'+str(na_min)+'")'
#sql = 'insert into t_r_avg_temp_est_daily_mos(initial_time,forecast_days,initial_year,initial_month,initial_day,initial_hour,NN,na_avg,na_max,na_min,na_mmax,na_mmin)VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
print sql
cursor.execute(sql)
db.commit()
'''
ii表示预报小时数
'''
def calculate_temp3h_accurity(db,initial,pdate,odate,ii):
#72小时气温的准确率
cursor001 = db.cursor()
sql = 'select city_id,initial_time,temperature from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time>"' + pdate + '" and forecast_time<="' + odate + '"'
print sql
cursor001.execute(sql)
rows_f=cursor001.fetchall()
#print rows_f,type(rows_f)
if rows_f==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#取逐小时的共3h的均值
sql_live = 'select v01000,AVG(TEM) from t_r_surf_hour_ele where vdate>"' + pdate + '" and vdate<="' + odate + '" and TEM <80 group by v01000;'
print sql_live
cursor=db.cursor()
cursor.execute(sql_live)
rows_l=cursor.fetchall()
dict_live={}
nn=0
na=0
sum_mae=0
sum_rmse=0
#3h的气温存入字典
for row in rows_l:
dict_live[row[0]]=float(row[1])
#判断预报站号在实况字典中,计算准确率
for row_f in rows_f:
if row_f[0] in dict_live.keys():
nn=nn+1
livetemp=float(dict_live[row_f[0]])
foretemp=float(row_f[2])
if abs(livetemp-foretemp)<3:
na=na+1
sum_mae=sum_mae+abs(livetemp-foretemp)
sum_rmse=sum_rmse+pow(livetemp-foretemp,2)
if nn<>0:
mae=sum_mae/nn
rmse=math.sqrt(sum_rmse/nn)
accu=float(na*100)/float(nn)
sql_into='replace into t_r_3h_temp_accu_mos_dem(initial_time,forecast_hours,temp_accu,temp_mae,temp_rmse)VALUES ("'+initial+'","'+str(ii)+'","'+str(accu)+'","'+str(mae)+'","'+str(rmse)+'")'
print sql_into
#logger.info(sql_into)
cursor=db.cursor()
cursor.execute(sql_into)
db.commit()
#EC每个预报时刻的准确率
def calculate_temp_h_accurity(db,initial,odate,ii):
#计算一个起始预报时次的所有预报的准确率。
cursor001 = db.cursor()
sql = 'select city_id,initial_time,temperature from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time="' + odate + '"'
#print sql
cursor001.execute(sql)
rows_f=cursor001.fetchall()
#print rows_f,type(rows_f)
if rows_f==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#这里不应该去均值,而是取点值,即预报的时刻的温度值。
sql_live = 'select v01000,TEM from t_r_surf_hour_ele where vdate="' + odate + '" and TEM <80'
#print sql_live
cursor=db.cursor()
cursor.execute(sql_live)
rows_l=cursor.fetchall()
dict_live={}
nn=0
na=0
sum_mae=0
sum_rmse=0
#3h的气温存入字典
for row in rows_l:
dict_live[row[0]]=float(row[1])
#判断预报站号在实况字典中,计算准确率
for row_f in rows_f:
if row_f[0] in dict_live.keys():
nn=nn+1
livetemp=float(dict_live[row_f[0]])
foretemp=float(row_f[2])
if abs(livetemp-foretemp)<3:
na=na+1
sum_mae=sum_mae+abs(livetemp-foretemp)
sum_rmse=sum_rmse+pow(livetemp-foretemp,2)
if nn<>0:
mae=sum_mae/nn
rmse=math.sqrt(sum_rmse/nn)
accu=float(na*100)/float(nn)
sql_into='replace into t_r_3h_temp_accu_mos_dem(initial_time,forecast_hours,temp_accu,temp_mae,temp_rmse)VALUES ("'+initial+'","'+str(ii)+'","'+str(accu)+'","'+str(mae)+'","'+str(rmse)+'")'
#print sql_into
#logger.info(sql_into)
cursor=db.cursor()
cursor.execute(sql_into)
db.commit()
def readymodel(path):
starttime=datetime.datetime.now()
year_a=starttime.year
month_a=starttime.month
day_a=starttime.day
hour_a=starttime.hour
logger.info(starttime,year_a,month_a,day_a,hour_a)
starttime001=datetime.datetime(year_a,month_a,day_a,hour_a,0,0)
#outpath='/home/wlan_dev/mos/mosresult'
csvfile='/home/wlan_dev/mos/moslive_winter/stations.csv'
demcsv='/home/wlan_dev/mos/moslive_winter/dem0.1.csv'
#给定的时间是实时时间凌晨3点,计算的是前一天的12时,相差15个小时
hours001=starttime001.hour
aa=hours001+12
starttime=starttime001+datetime.timedelta(hours=-aa)
yearint=starttime.year
#hours=starttime.hour
midpath = path + '/' + str(yearint)
initial_time=datetime.datetime.strftime(starttime,'%Y-%m-%d %H:%M:%S')
##判断数据库中是否计算过该时次
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',
3307)
cursor=db.cursor()
sql="SELECT count(*) from t_r_ec_city_forecast_ele_mos_dem where initial_time='"+initial_time+"'"
#print sql
cursor.execute(sql)
data=cursor.fetchall()
#print data
for row in data:
print row[0],int(row[0])
if int(row[0])>=172020:
return
#给定时间大于12时,计算今天的数据,如果小于12计算昨天的数据。
# if hours>=12:
# # # datestr=datetime.datetime.strftime(starttime,'%Y-%m-%d')
# # # else:
# # # nowdate=starttime+datetime.timedelta(days=-1)
# # # datestr=datetime.datetime.strftime(nowdate,'%Y-%m-%d')
datestr=datetime.datetime.strftime(starttime,'%Y-%m-%d')
ecpath=midpath+'/'+datestr
stationlist=getStationList(csvfile)
demdict=calculatedemvalue(demcsv)
#根据文件名称中的时间差来计算要用哪个模型文件和标准化文件
#遍历文件夹放在list列表中
ecfilelist=[]
for root,dirs,files in os.walk(ecpath):
for file in files:
rootpath=root
if file[-3:] == '001' and (file[:3] == 'D2S' or file[:3]=='D2D') and file[7:9]=='12':
ecfilelist.append(file)
#对ec列表进行排序:排序的目的是什么?忘记了
sortecfilelist=sorted(ecfilelist)
now=datetime.datetime.now()
logger.info(now)
#开2个进程池,分别各占一张卡,每次只训练2个模型,显卡的显存限制2个模型是最好的,也就是进程池里面有一个进程
pool001 = multiprocessing.Pool(processes=2)
pool002 = multiprocessing.Pool(processes=2)
for i in range(len(sortecfilelist)):
file=sortecfilelist[i]
# 取文件中时间字段
start = file[3:9]
end = file[11:17]
start001 = str(starttime.year) + start
end001 = str(starttime.year) + end
if start001 > end001:
end001 = str(starttime.year + 1) + end
starttime = datetime.datetime.strptime(start001, '%Y%m%d%H')
origintime = datetime.datetime.strptime(str(starttime.year) + start,
'%Y%m%d%H')
endtime = datetime.datetime.strptime(end001, '%Y%m%d%H')
#计算两个时间差几个小时
d = (endtime - starttime).days
#f = (endtime - starttime).seconds / 3600
hours = (d * 24 + (endtime - starttime).seconds / 3600)
#冬季夏季模型都是以预报小时命名
if hours%3==0:
if hours<10 and hours > 0:
id = '00'+str(hours)
elif hours>=10 and hours<100:
id = '0'+str(hours)
elif hours>=100:
id=str(hours)
else:
index_hour=(int(hours/3)+1)*3
if index_hour<10 and index_hour > 0:
id = '00'+str(index_hour)
elif index_hour>=10 and index_hour<100:
id = '0'+str(index_hour)
elif index_hour>=100:
id=str(index_hour)
#用小时数来定义预报时次,从而确定模型文件和标准化文件
filefullpath=os.path.join(rootpath,file)
datamonth=starttime.month
if datamonth>=5 and datamonth<=9:
modelfile = '/home/wlan_dev/mos/demtemp/temp_model' + id_s + '.model'
scalefile = '/home/wlan_dev/mos/demtemp/temp_scale' + id_s + '.save'
else:
modelfile = '/home/wlan_dev/mos/wintermodel_gpu/temp_model'+id+'.model'
scalefile = '/home/wlan_dev/mos/wintermodel_gpu/temp_scale'+id+'.save'
if os.path.exists(modelfile) and os.path.exists(scalefile):
if int(id)<=90:
#predictmodel(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime)
pool001.apply_async(predictmodel,args=(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime,'0'))
elif int(id)>90:
pool002.apply_async(predictmodel,args=(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime,'1'))
pool001.close()
pool002.close()
pool001.join()
pool002.join()
#增加计算准确率函数,计算10天前的准确率
now=datetime.datetime.now()
logger.info(now)
initialtime001=starttime+datetime.timedelta(days=-11)
initial = datetime.datetime.strftime(initialtime001, '%Y-%m-%d %H:%M:%S')
# logger.info(initial)
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',3307)
for n in range(10):
print n
pdatetime = initialtime001 + datetime.timedelta(days=n)
pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(days=n + 1)
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_avg_temp_24h_from3h_accurity(db, initial, pdate, odate, n)
#新EC数据前90个预报时次是逐小时的D2S开头,中间18个预报时次是逐3小时的,后面14个预报时次是6小时的
for u in range(90+18+16):
if u <=90:
#pdatetime = initialtime001 + datetime.timedelta(hours=u)
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours= (u + 1))
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_temp_h_accurity(db, initial, odate, u + 1)
elif u>90 and u<=108:
#pdatetime = initialtime001 + datetime.timedelta(hours=90+(u-90)*3)
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours= 90+(u + 1-90)*3)
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_temp_h_accurity(db, initial, odate, 90+(u + 1-90)*3 )
else:
#pdatetime = initialtime001 + datetime.timedelta(hours=6 * (u-(u - 90)/3-90))
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours=6 * (u+1-(u-90)/3-90))
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
#print initial, pdate, odate, 6 * (u + 1 - (u-90)/3-90)
calculate_temp_h_accurity(db, initial, odate,6 * (u + 1 - (u-90)/3-90))
db.close()
if __name__ == "__main__":
# 加日志
logger = logging.getLogger('apscheduler.executors.default')
# 指定logger输出格式
formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')
# 文件日志learning
logfile = '/home/wlan_dev/log/gpu_winter.log'
file_handler = logging.FileHandler(logfile)
file_handler.setFormatter(formatter) # 可以通过setFormatter指定输出格式
# 控制台日志
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter # 也可以直接给formatter赋值
# 为logger添加的日志处理器
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# 指定日志的最低输出级别,默认为WARN级别
logger.setLevel(logging.INFO)
'''
# 设定定时任务,EC数据每天4次,分布为00、06、12、18时
# 计划设定在15点、21点、第二天的3点、9点执行,
暂时线上只算12时的,其中5-9月采用夏季模型,10月到4月采用冬季模型,每天的凌晨3点计算前一天的12时数据。
定时任务用aps
'''
#path='/home/wlan_dev/mosdata'
path='/opt/meteo/cluster/data/ecmwf/orig'
#readymodel(path)
scheduler=BackgroundScheduler()
scheduler.add_job(readymodel,'cron',hour='3,5,8,11,14',args=(path,))
try:
scheduler.start()
while True:
time.sleep(2)
except Exception as ex:
logger.info(ex.message)
| latlonArray[indexlat | identifier_name |
ecxgboost_predicttemp_gpu_multi_live_hourly.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
author: yetao.lu
date: 2018/11/8
description:根据所有模型用一个卡串行,修改为一个模型用一个看,服务器有四个看,至少可以4个并行
"""
import logging,sys,datetime,os,pygrib,numpy,MySQLdb,time,math
import xgboost,multiprocessing
from sklearn.externals import joblib
from apscheduler.schedulers.background import BackgroundScheduler
def getStationList(csvfile):
stationlist=[]
fileread=open(csvfile,'r')
firstline=fileread.readline()
while True:
line=fileread.readline()
perlist=line.split(',')
if len(perlist)>=4:
stationlist.append(perlist)
if not line or line=='':
break
return stationlist
def calculate16gribvalue(latlonArray,indexlat,indexlon,vstring):
vstring.append(latlonArray[indexlat][indexlon])
vstring.append(latlonArray[indexlat][indexlon + 1])
vstring.append(latlonArray[indexlat + 1][indexlon + 1])
vstring.append(latlonArray[indexlat + 1][indexlon])
vstring.append(latlonArray[indexlat - 1][indexlon - 1])
vstring.append(latlonArray[indexlat - 1][indexlon])
vstring.append(latlonArray[indexlat - 1][indexlon + 1])
vstring.append(latlonArray[indexlat - 1][indexlon + 2])
vstring.append(latlonArray[indexlat][indexlon + 2])
vstring.append(latlonArray[indexlat + 1][indexlon + 2])
vstring.append(latlonArray[indexlat + 2][indexlon + 2])
vstring.append(latlonArray[indexlat + 2][indexlon + 1])
vstring.append(latlonArray[indexlat + 2][indexlon])
vstring.append(latlonArray[indexlat + 2][indexlon - 1])
vstring.append(latlonArray[indexlat + 1][indexlon - 1])
vstring.append(latlonArray[indexlat][indexlon - 1])
return vstring
def calculatedemvalue(demcsv):
demdict={}
csvread=open(demcsv,'r')
while True:
line=csvread.readline()
linearray=line.split(',')
if len(linearray)>2:
demdict[linearray[0]]=linearray
if not line:
break
return demdict
'''
遍历文件要放在外层,这里是单个文件进行预测
'''
def predictmodel(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,foretime,gpu):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
allvaluelist=[]
if file[-3:]=='001' and (file[:3]=='D2S' or file[:3]=='D2D'):
grbs=pygrib.open(filefullpath)
grb_2t = grbs.select(name='2 metre temperature')
tempArray = grb_2t[0].values
grb_2d = grbs.select(name='2 metre dewpoint temperature')
dewpointArray = grb_2d[0].values
grb_10u = grbs.select(name='10 metre U wind component')
u10Array = grb_10u[0].values
grb_10v = grbs.select(name='10 metre V wind component')
v10Array = grb_10v[0].values
grb_tcc = grbs.select(name='Total cloud cover')
tccArray = grb_tcc[0].values
grb_lcc = grbs.select(name='Low cloud cover')
lccArray = grb_lcc[0].values
grb_z = grbs.select(name='Geopotential')
geoArray=grb_z[0].values
grb_500rh = grbs.select(name='Relative humidity', level=500)
rh500Array = grb_500rh[0].values
grb_850rh = grbs.select(name='Relative humidity', level=850)
rh850Array = grb_850rh[0].values
#遍历站点->要素遍历、
for i in range(len(stationlist)):
#print len(stationlist)
perlist=stationlist[i]
stationid=perlist[0]
latitude=float(perlist[1])
longitude=float(perlist[2])
alti=float(perlist[3])
#站点左上角点的索引
indexlat = int((60 - latitude) / 0.1)
indexlon = int((longitude -60) / 0.1)
per_station_value_list=[]
calculate16gribvalue(tempArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(dewpointArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(u10Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(v10Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(tccArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(lccArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(geoArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(rh500Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(rh850Array,indexlat,indexlon,per_station_value_list)
per_station_value_list.append(latitude)
per_station_value_list.append(longitude)
per_station_value_list.append(alti)
# 站点高程:取计算好的站点周边16个点的高程值
demlist = demdict[stationlist[i][0]]
for u in range(1, len(demlist), 1):
per_station_value_list.append(float(demlist[u]))
allvaluelist.append(per_station_value_list)
#print(per_station_value_list)
trainarray=numpy.array(allvaluelist)
params001 = {
'tree_method': 'gpu_hist',
'booster': 'gbtree',
'objective': 'reg:linear', # 线性回归
'gamma': 0.2, # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。
'max_depth': 12, # 构建树的深度,越大越容易过拟合
'lambda': 2, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
'subsample': 0.7, # 随机采样训练样本
'colsample_bytree': 0.7, # 生成树时进行的列采样
'min_child_weight': 3,
# 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
# ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
# 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
'silent': 0, # 设置成1则没有运行信息输出,最好是设置为0.
'eta': 0.01, # 如同学习率
'seed': 1000,
# 'nthread':3,# cpu 线程数,不设置取最大值
# 'eval_metric': 'auc'
'scale_pos_weight': 1,
'n_gpus': 1
}
xgbst=xgboost.Booster(params001)
xgbst.load_model(modelfile)
scaler=joblib.load(scalefile)
#print(modelfile,scalefile)
trainarray_t=scaler.transform(trainarray)
#标准化后的矩阵坑我2次了:看好是标准化后的还是标准化前的
xgbtrain=xgboost.DMatrix(trainarray_t)
result=xgbst.predict(xgbtrain)
#print(result)
logger.info(result)
#结果入库
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',3307)
#db = MySQLdb.connect('192.168.10.84', 'admin', 'moji_China_123','moge')
cursor = db.cursor()
origin = datetime.datetime.strftime(origintime, '%Y-%m-%d %H:%M:%S')
forecast = datetime.datetime.strftime(foretime, '%Y-%m-%d %H:%M:%S')
forecast_year = foretime.year
forecast_month = foretime.month
forecast_day = foretime.day
forecast_hour = foretime.hour
forecast_minute = foretime.minute
timestr = datetime.datetime.strftime(origintime, '%Y%m%d%H%M%S')
# csv = os.path.join(outpath, origin+'_'+forecast + '.csv')
# csvfile = open(csv, 'w')
sql = 'replace into t_r_ec_city_forecast_ele_mos_dem (city_id,initial_time,forecast_time,forecast_year,forecast_month,forecast_day,forecast_hour,temperature)VALUES(%s,%s,%s,%s,%s,%s,%s,%s)'
L = []
for j in range(len(stationlist)):
perstationlist = []
stationid = stationlist[j][0]
temp = result[j]
# 每个站点存储
perstationlist.append(stationid)
perstationlist.append(origin)
perstationlist.append(forecast)
perstationlist.append(forecast_year)
perstationlist.append(forecast_month)
perstationlist.append(forecast_day)
perstationlist.append(forecast_hour)
perstationlist.append(temp)
L.append(perstationlist)
#logger.info(perstationlist)
# # sql='insert into t_r_ec_mos_city_forecast_ele(city_id,initial_time,forecast_time,forecsat_year,forecast_month,forecast_day,forecast_hour,temperature)VALUES ()'
# # sql = 'insert into t_r_ec_city_forecast_ele_mos (city_id,initial_time,forecast_time,forecast_year,forecast_month,forecast_day,forecast_hour,temperature,temp_max_6h,temp_min_6h,rainstate,precipitation)VALUES ("' + stationid + '","' + origin + '","' + str(
# # forecast) + '","' + str(forecast_year) + '","' + str(
# # forecast_month) + '","' + str(forecast_day) + '","' + str(
# # forecast_hour) + '","' + str(temp) + '","' + str(maxtemp)+ '","' + str(mintemp)+'","' + str(rainstate)+'","' + str(prevalue)+ '")
# # csvfile.write(stationid + '","' + origin + '","' + str(
# # forecast) + '","' + str(forecast_year) + '","' + str(
# # forecast_month) + '","' + str(forecast_day) + '","' + str(
# # forecast_hour) + '","' + str(forecast_minute) + '","' + str(
# # temp)+ '","' + str(maxtemp)+ '","' + str(mintemp)+'","' + str(rainstate)+'","' + str(prevalue))
# # csvfile.write('\n')
# # print sql
# # cursor.execute(sql)
cursor.executemany(sql, L)
db.commit()
db.close()
def calculate_avg_temp_24h_from3h_accurity(db, initial, pdate, odate, ii):
initialtime = datetime.datetime.strptime(initial, '%Y-%m-%d %H:%M:%S')
year002 = initialtime.year
month002 = initialtime.month
day002 = initialtime.day
hour002 = initialtime.hour
cursor001 = db.cursor()
#选择MOS统计之后的24小时平均气温、24小时气温最大值,24小时气温最小值
sql = 'select city_id,initial_time,avg(temperature),max(temperature),min(temperature) from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time>"' + pdate + '" and forecast_time<="' + odate + '" group by city_id'
print sql
cursor001.execute(sql)
rows = cursor001.fetchall()
if rows==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#实况数据
sql_live = 'select v01000,AVG(TEM),MAX(TEM_Max),MIN(TEM_Min) from t_r_surf_hour_ele where vdate>"' + pdate + '" and vdate<="' + odate + '" and TEM<90 and TEM_Max<90 and TEM_Min<90 group by v01000;'
print sql_live
#logger.info(sql_live)
cursor = db.cursor()
cursor.execute(sql_live)
rows_live = cursor.fetchall()
#print len(rows_live)
rowdict = {}
nn = 0
# 3度的平均气温准确率
na_avg = 0
# 3度的气温最大值准确率:用逐小时的气温最大值和预报最高气温计算
na_max = 0
# 3度的气温最小值准确率:用逐小时的气温最小值和预报最低气温计算
na_min = 0
#实况数据存入字典
for row_live in rows_live:
rowdict[row_live[0]] = row_live
#预报站点在实况字典里,计算accu
for row in rows:
if row[0] in rowdict.keys():
nn = nn + 1
#print rowdict[row[0]]
avg_temp_live = float(rowdict[row[0]][1])
avg_temp = float(row[2])
if abs(avg_temp_live - avg_temp) < 3:
na_avg = na_avg + 1
avg_mmax_live = float(rowdict[row[0]][2])
max_temp = float(row[3])
if abs(avg_mmax_live - max_temp) < 3:
na_max = na_max + 1
avg_mmin_live = float(rowdict[row[0]][3])
min_temp = float(row[4])
if abs(avg_mmin_live - min_temp) < 3:
na_min = na_min + 1
# 把一条SQL的值加载list中,然后把这个list合并到总的list中
#print na_avg, na_max, na_min, na_mmax, na_mmin
cursor = db.cursor()
sql='replace into t_r_avg_temp_est_daily_mos_dem(initial_time,forecast_days,initial_year,initial_month,initial_day,initial_hour,NN,na_avg,na_max,na_min)VALUES ("'+initial+'","'+str(ii+1)+'","'+str(year002)+'","'+str(month002)+'","'+str(day002)+'","'+str(hour002)+'","'+str(nn)+'","'+str(na_avg)+'","'+str(na_max)+'","'+str(na_min)+'")'
#sql = 'insert into t_r_avg_temp_est_daily_mos(initial_time,forecast_days,initial_year,initial_month,initial_day,initial_hour,NN,na_avg,na_max,na_min,na_mmax,na_mmin)VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
print sql
cursor.execute(sql)
db.commit()
'''
ii表示预报小时数
'''
def calculate_temp3h_accurity(db,initial,pdate,odate,ii):
#72小时气温的准确率
cursor001 = db.cursor()
sql = 'select city_id,initial_time,temperature from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time>"' + pdate + '" and forecast_time<="' + odate + '"'
print sql
cursor001.execute(sql)
rows_f=cursor001.fetchall()
#print rows_f,type(rows_f)
if rows_f==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#取逐小时的共3h的均值
sql_live = 'select v01000,AVG(TEM) from t_r_surf_hour_ele where vdate>"' + pdate + '" and vdate<="' + odate + '" and TEM <80 group by v01000;'
print sql_live
cursor=db.cursor()
cursor.execute(sql_live)
rows_l=cursor.fetchall()
dict_live={}
nn=0
na=0
sum_mae=0
sum_rmse=0
#3h的气温存入字典
for row in rows_l:
dict_live[row[0]]=float(row[1])
#判断预报站号在实况字典中,计算准确率
for row_f in rows_f:
if row_f[0] in dict_live.keys():
nn=nn+1
livetemp=float(dict_live[row_f[0]])
foretemp=float(row_f[2])
if abs(livetemp-foretemp)<3:
na=na+1
sum_mae=sum_mae+abs(livetemp-foretemp)
sum_rmse=sum_rmse+pow(livetemp-foretemp,2)
if nn<>0:
mae=sum_mae/nn
rmse=math.sqrt(sum_rmse/nn)
accu=float(na*100)/float(nn)
sql_into='replace into t_r_3h_temp_accu_mos_dem(initial_time,forecast_hours,temp_accu,temp_mae,temp_rmse)VALUES ("'+initial+'","'+str(ii)+'","'+str(accu)+'","'+str(mae)+'","'+str(rmse)+'")'
print sql_into
#logger.info(sql_into)
cursor=db.cursor()
cursor.execute(sql_into)
db.commit()
#EC每个预报时刻的准确率
def calculate_temp_h_accurity(db,initial,odate,ii):
#计算一个起始预报时次的所有预报的准确率。
cursor001 = db.cursor()
sql = 'select city_id,initial_time,temperature from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time="' + odate + '"'
#print sql
cursor001.execute(sql)
rows_f=cursor001.fetchall()
#print rows_f,type(rows_f)
if rows_f==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#这里不应该去均值,而是取点值,即预报的时刻的温度值。
sql_live = 'select v01000,TEM from t_r_surf_hour_ele where vdate="' + odate + '" and TEM <80'
#print sql_live
cursor=db.cursor()
cursor.execute(sql_live)
rows_l=cursor.fetchall()
dict_live={}
nn=0
na=0
sum_mae=0
sum_rmse=0
#3h的气温存入字典
for row in rows_l:
dict_live[row[0]]=float(row[1])
#判断预报站号在实况字典中,计算准确率
for row_f in rows_f:
if row_f[0] in dict_live.keys():
nn=nn+1
livetemp=float(dict_live[row_f[0]])
foretemp=float(row_f[2])
if abs(livetemp-foretemp)<3:
na=na+1
sum_mae=sum_mae+abs(livetemp-foretemp)
sum_rmse=sum_rmse+pow(livetemp-foretemp,2)
if nn<>0:
mae=sum_mae/nn
rmse=math.sqrt(sum_rmse/nn)
accu=float(na*100)/float(nn)
| # # nowdate=starttime+datetime.timedelta(days=-1)
# # # datestr=datetime.datetime.strftime(nowdate,'%Y-%m-%d')
datestr=datetime.datetime.strftime(starttime,'%Y-%m-%d')
ecpath=midpath+'/'+datestr
stationlist=getStationList(csvfile)
demdict=calculatedemvalue(demcsv)
#根据文件名称中的时间差来计算要用哪个模型文件和标准化文件
#遍历文件夹放在list列表中
ecfilelist=[]
for root,dirs,files in os.walk(ecpath):
for file in files:
rootpath=root
if file[-3:] == '001' and (file[:3] == 'D2S' or file[:3]=='D2D') and file[7:9]=='12':
ecfilelist.append(file)
#对ec列表进行排序:排序的目的是什么?忘记了
sortecfilelist=sorted(ecfilelist)
now=datetime.datetime.now()
logger.info(now)
#开2个进程池,分别各占一张卡,每次只训练2个模型,显卡的显存限制2个模型是最好的,也就是进程池里面有一个进程
pool001 = multiprocessing.Pool(processes=2)
pool002 = multiprocessing.Pool(processes=2)
for i in range(len(sortecfilelist)):
file=sortecfilelist[i]
# 取文件中时间字段
start = file[3:9]
end = file[11:17]
start001 = str(starttime.year) + start
end001 = str(starttime.year) + end
if start001 > end001:
end001 = str(starttime.year + 1) + end
starttime = datetime.datetime.strptime(start001, '%Y%m%d%H')
origintime = datetime.datetime.strptime(str(starttime.year) + start,
'%Y%m%d%H')
endtime = datetime.datetime.strptime(end001, '%Y%m%d%H')
#计算两个时间差几个小时
d = (endtime - starttime).days
#f = (endtime - starttime).seconds / 3600
hours = (d * 24 + (endtime - starttime).seconds / 3600)
#冬季夏季模型都是以预报小时命名
if hours%3==0:
if hours<10 and hours > 0:
id = '00'+str(hours)
elif hours>=10 and hours<100:
id = '0'+str(hours)
elif hours>=100:
id=str(hours)
else:
index_hour=(int(hours/3)+1)*3
if index_hour<10 and index_hour > 0:
id = '00'+str(index_hour)
elif index_hour>=10 and index_hour<100:
id = '0'+str(index_hour)
elif index_hour>=100:
id=str(index_hour)
#用小时数来定义预报时次,从而确定模型文件和标准化文件
filefullpath=os.path.join(rootpath,file)
datamonth=starttime.month
if datamonth>=5 and datamonth<=9:
modelfile = '/home/wlan_dev/mos/demtemp/temp_model' + id_s + '.model'
scalefile = '/home/wlan_dev/mos/demtemp/temp_scale' + id_s + '.save'
else:
modelfile = '/home/wlan_dev/mos/wintermodel_gpu/temp_model'+id+'.model'
scalefile = '/home/wlan_dev/mos/wintermodel_gpu/temp_scale'+id+'.save'
if os.path.exists(modelfile) and os.path.exists(scalefile):
if int(id)<=90:
#predictmodel(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime)
pool001.apply_async(predictmodel,args=(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime,'0'))
elif int(id)>90:
pool002.apply_async(predictmodel,args=(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime,'1'))
pool001.close()
pool002.close()
pool001.join()
pool002.join()
#增加计算准确率函数,计算10天前的准确率
now=datetime.datetime.now()
logger.info(now)
initialtime001=starttime+datetime.timedelta(days=-11)
initial = datetime.datetime.strftime(initialtime001, '%Y-%m-%d %H:%M:%S')
# logger.info(initial)
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',3307)
for n in range(10):
print n
pdatetime = initialtime001 + datetime.timedelta(days=n)
pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(days=n + 1)
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_avg_temp_24h_from3h_accurity(db, initial, pdate, odate, n)
#新EC数据前90个预报时次是逐小时的D2S开头,中间18个预报时次是逐3小时的,后面14个预报时次是6小时的
for u in range(90+18+16):
if u <=90:
#pdatetime = initialtime001 + datetime.timedelta(hours=u)
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours= (u + 1))
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_temp_h_accurity(db, initial, odate, u + 1)
elif u>90 and u<=108:
#pdatetime = initialtime001 + datetime.timedelta(hours=90+(u-90)*3)
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours= 90+(u + 1-90)*3)
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_temp_h_accurity(db, initial, odate, 90+(u + 1-90)*3 )
else:
#pdatetime = initialtime001 + datetime.timedelta(hours=6 * (u-(u - 90)/3-90))
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours=6 * (u+1-(u-90)/3-90))
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
#print initial, pdate, odate, 6 * (u + 1 - (u-90)/3-90)
calculate_temp_h_accurity(db, initial, odate,6 * (u + 1 - (u-90)/3-90))
db.close()
if __name__ == "__main__":
# 加日志
logger = logging.getLogger('apscheduler.executors.default')
# 指定logger输出格式
formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')
# 文件日志learning
logfile = '/home/wlan_dev/log/gpu_winter.log'
file_handler = logging.FileHandler(logfile)
file_handler.setFormatter(formatter) # 可以通过setFormatter指定输出格式
# 控制台日志
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter # 也可以直接给formatter赋值
# 为logger添加的日志处理器
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# 指定日志的最低输出级别,默认为WARN级别
logger.setLevel(logging.INFO)
'''
# 设定定时任务,EC数据每天4次,分布为00、06、12、18时
# 计划设定在15点、21点、第二天的3点、9点执行,
暂时线上只算12时的,其中5-9月采用夏季模型,10月到4月采用冬季模型,每天的凌晨3点计算前一天的12时数据。
定时任务用aps
'''
#path='/home/wlan_dev/mosdata'
path='/opt/meteo/cluster/data/ecmwf/orig'
#readymodel(path)
scheduler=BackgroundScheduler()
scheduler.add_job(readymodel,'cron',hour='3,5,8,11,14',args=(path,))
try:
scheduler.start()
while True:
time.sleep(2)
except Exception as ex:
logger.info(ex.message)
| sql_into='replace into t_r_3h_temp_accu_mos_dem(initial_time,forecast_hours,temp_accu,temp_mae,temp_rmse)VALUES ("'+initial+'","'+str(ii)+'","'+str(accu)+'","'+str(mae)+'","'+str(rmse)+'")'
#print sql_into
#logger.info(sql_into)
cursor=db.cursor()
cursor.execute(sql_into)
db.commit()
def readymodel(path):
starttime=datetime.datetime.now()
year_a=starttime.year
month_a=starttime.month
day_a=starttime.day
hour_a=starttime.hour
logger.info(starttime,year_a,month_a,day_a,hour_a)
starttime001=datetime.datetime(year_a,month_a,day_a,hour_a,0,0)
#outpath='/home/wlan_dev/mos/mosresult'
csvfile='/home/wlan_dev/mos/moslive_winter/stations.csv'
demcsv='/home/wlan_dev/mos/moslive_winter/dem0.1.csv'
#给定的时间是实时时间凌晨3点,计算的是前一天的12时,相差15个小时
hours001=starttime001.hour
aa=hours001+12
starttime=starttime001+datetime.timedelta(hours=-aa)
yearint=starttime.year
#hours=starttime.hour
midpath = path + '/' + str(yearint)
initial_time=datetime.datetime.strftime(starttime,'%Y-%m-%d %H:%M:%S')
##判断数据库中是否计算过该时次
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',
3307)
cursor=db.cursor()
sql="SELECT count(*) from t_r_ec_city_forecast_ele_mos_dem where initial_time='"+initial_time+"'"
#print sql
cursor.execute(sql)
data=cursor.fetchall()
#print data
for row in data:
print row[0],int(row[0])
if int(row[0])>=172020:
return
#给定时间大于12时,计算今天的数据,如果小于12计算昨天的数据。
# if hours>=12:
# # # datestr=datetime.datetime.strftime(starttime,'%Y-%m-%d')
# # # else:
# | identifier_body |
ecxgboost_predicttemp_gpu_multi_live_hourly.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
author: yetao.lu
date: 2018/11/8
description:根据所有模型用一个卡串行,修改为一个模型用一个看,服务器有四个看,至少可以4个并行
"""
import logging,sys,datetime,os,pygrib,numpy,MySQLdb,time,math
import xgboost,multiprocessing
from sklearn.externals import joblib
from apscheduler.schedulers.background import BackgroundScheduler
def getStationList(csvfile):
stationlist=[]
fileread=open(csvfile,'r')
firstline=fileread.readline()
while True:
line=fileread.readline()
perlist=line.split(',')
if len(perlist)>=4:
stationlist.append(perlist)
if not line or line=='':
break
return stationlist
def calculate16gribvalue(latlonArray,indexlat,indexlon,vstring):
vstring.append(latlonArray[indexlat][indexlon])
vstring.append(latlonArray[indexlat][indexlon + 1])
vstring.append(latlonArray[indexlat + 1][indexlon + 1])
vstring.append(latlonArray[indexlat + 1][indexlon])
vstring.append(latlonArray[indexlat - 1][indexlon - 1])
vstring.append(latlonArray[indexlat - 1][indexlon])
vstring.append(latlonArray[indexlat - 1][indexlon + 1])
vstring.append(latlonArray[indexlat - 1][indexlon + 2])
vstring.append(latlonArray[indexlat][indexlon + 2])
vstring.append(latlonArray[indexlat + 1][indexlon + 2])
vstring.append(latlonArray[indexlat + 2][indexlon + 2])
vstring.append(latlonArray[indexlat + 2][indexlon + 1])
vstring.append(latlonArray[indexlat + 2][indexlon])
vstring.append(latlonArray[indexlat + 2][indexlon - 1])
vstring.append(latlonArray[indexlat + 1][indexlon - 1])
vstring.append(latlonArray[indexlat][indexlon - 1])
return vstring
def calculatedemvalue(demcsv):
demdict={}
csvread=open(demcsv,'r')
while True:
line=csvread.readline()
linearray=line.split(',')
if len(linearray)>2:
demdict[linearray[0]]=linearray
if not line:
break
return demdict
'''
遍历文件要放在外层,这里是单个文件进行预测
'''
def predictmodel(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,foretime,gpu):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
allvaluelist=[]
if file[-3:]=='001' and (file[:3]=='D2S' or file[:3]=='D2D'):
grbs=pygrib.open(filefullpath)
grb_2t = grbs.select(name='2 metre temperature')
tempArray = grb_2t[0].values
grb_2d = grbs.select(name='2 metre dewpoint temperature')
dewpointArray = grb_2d[0].values
grb_10u = grbs.select(name='10 metre U wind component')
u10Array = grb_10u[0].values
grb_10v = grbs.select(name='10 metre V wind component')
v10Array = grb_10v[0].values
grb_tcc = grbs.select(name='Total cloud cover')
tccArray = grb_tcc[0].values
grb_lcc = grbs.select(name='Low cloud cover')
lccArray = grb_lcc[0].values
grb_z = grbs.select(name='Geopotential')
geoArray=grb_z[0].values
grb_500rh = grbs.select(name='Relative humidity', level=500)
rh500Array = grb_500rh[0].values
grb_850rh = grbs.select(name='Relative humidity', level=850)
rh850Array = grb_850rh[0].values
#遍历站点->要素遍历、
for i in range(len(stationlist)):
#print len(stationlist)
perlist=stationlist[i]
stationid=perlist[0]
latitude=float(perlist[1])
longitude=float(perlist[2])
alti=float(perlist[3])
#站点左上角点的索引
indexlat = int((60 - latitude) / 0.1)
indexlon = int((longitude -60) / 0.1)
per_station_value_list=[]
calculate16gribvalue(tempArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(dewpointArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(u10Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(v10Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(tccArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(lccArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(geoArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(rh500Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(rh850Array,indexlat,indexlon,per_station_value_list)
per_station_value_list.append(latitude)
per_station_value_list.append(longitude)
per_station_value_list.append(alti)
# 站点高程:取计算好的站点周边16个点的高程值
demlist = demdict[stationlist[i][0]]
for u in range(1, len(demlist), 1):
per_station_value_list.append(float(demlist[u]))
allvaluelist.append(per_station_value_list)
#print(per_station_value_list)
trainarray=numpy.array(allvaluelist)
params001 = {
'tree_method': 'gpu_hist',
'booster': 'gbtree',
'objective': 'reg:linear', # 线性回归
'gamma': 0.2, # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。
'max_depth': 12, # 构建树的深度,越大越容易过拟合
'lambda': 2, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
'subsample': 0.7, # 随机采样训练样本
'colsample_bytree': 0.7, # 生成树时进行的列采样
'min_child_weight': 3,
# 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
# ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
# 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
'silent': 0, # 设置成1则没有运行信息输出,最好是设置为0.
'eta': 0.01, # 如同学习率
'seed': 1000,
# 'nthread':3,# cpu 线程数,不设置取最大值
# 'eval_metric': 'auc'
'scale_pos_weight': 1,
'n_gpus': 1
}
xgbst=xgboost.Booster(params001)
xgbst.load_model(modelfile)
scaler=joblib.load(scalefile)
#print(modelfile,scalefile)
trainarray_t=scaler.transform(trainarray)
#标准化后的矩阵坑我2次了:看好是标准化后的还是标准化前的
xgbtrain=xgboost.DMatrix(trainarray_t)
result=xgbst.predict(xgbtrain)
#print(result)
logger.info(result)
#结果入库
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',3307)
#db = MySQLdb.connect('192.168.10.84', 'admin', 'moji_China_123','moge')
cursor = db.cursor()
origin = datetime.datetime.strftime(origintime, '%Y-%m-%d %H:%M:%S')
forecast = datetime.datetime.strftime(foretime, '%Y-%m-%d %H:%M:%S')
forecast_year = foretime.year
forecast_month = foretime.month
forecast_day = foretime.day
forecast_hour = foretime.hour
forecast_minute = foretime.minute
timestr = datetime.datetime.strftime(origintime, '%Y%m%d%H%M%S')
# csv = os.path.join(outpath, origin+'_'+forecast + '.csv')
# csvfile = open(csv, 'w')
sql = 'replace into t_r_ec_city_forecast_ele_mos_dem (city_id,initial_time,forecast_time,forecast_year,forecast_month,forecast_day,forecast_hour,temperature)VALUES(%s,%s,%s,%s,%s,%s,%s,%s)'
L = []
for j in range(len(stationlist)):
perstationlist = []
stationid = stationlist[j][0]
temp = result[j]
# 每个站点存储
perstationlist.append(stationid)
perstationlist.append(origin)
perstationlist.append(forecast)
perstationlist.append(forecast_year)
perstationlist.append(forecast_month)
perstationlist.append(forecast_day)
perstationlist.append(forecast_hour)
perstationlist.append(temp)
L.append(perstationlist)
#logger.info(perstationlist)
# # sql='insert into t_r_ec_mos_city_forecast_ele(city_id,initial_time,forecast_time,forecsat_year,forecast_month,forecast_day,forecast_hour,temperature)VALUES ()'
# # sql = 'insert into t_r_ec_city_forecast_ele_mos (city_id,initial_time,forecast_time,forecast_year,forecast_month,forecast_day,forecast_hour,temperature,temp_max_6h,temp_min_6h,rainstate,precipitation)VALUES ("' + stationid + '","' + origin + '","' + str(
# # forecast) + '","' + str(forecast_year) + '","' + str(
# # forecast_month) + '","' + str(forecast_day) + '","' + str(
# # forecast_hour) + '","' + str(temp) + '","' + str(maxtemp)+ '","' + str(mintemp)+'","' + str(rainstate)+'","' + str(prevalue)+ '")
# # csvfile.write(stationid + '","' + origin + '","' + str(
# # forecast) + '","' + str(forecast_year) + '","' + str(
# # forecast_month) + '","' + str(forecast_day) + '","' + str(
# # forecast_hour) + '","' + str(forecast_minute) + '","' + str(
# # temp)+ '","' + str(maxtemp)+ '","' + str(mintemp)+'","' + str(rainstate)+'","' + str(prevalue))
# # csvfile.write('\n')
# # print sql
# # cursor.execute(sql)
cursor.executemany(sql, L)
db.commit()
db.close()
def calculate_avg_temp_24h_from3h_accurity(db, initial, pdate, odate, ii):
initialtime = datetime.datetime.strptime(initial, '%Y-%m-%d %H:%M:%S')
year002 = initialtime.year
month002 = initialtime.month
day002 = initialtime.day
hour002 = initialtime.hour
cursor001 = db.cursor()
#选择MOS统计之后的24小时平均气温、24小时气温最大值,24小时气温最小值
sql = 'select city_id,initial_time,avg(temperature),max(temperature),min(temperature) from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time>"' + pdate + '" and forecast_time<="' + odate + '" group by city_id'
print sql
cursor001.execute(sql)
rows = cursor001.fetchall()
if rows==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#实况数据
sql_live = 'select v01000,AVG(TEM),MAX(TEM_Max),MIN(TEM_Min) from t_r_surf_hour_ele where vdate>"' + pdate + '" and vdate<="' + odate + '" and TEM<90 and TEM_Max<90 and TEM_Min<90 group by v01000;'
print sql_live
#logger.info(sql_live)
cursor = db.cursor()
cursor.execute(sql_live)
rows_live = cursor.fetchall()
#print len(rows_live)
rowdict = {}
nn = 0
# 3度的平均气温准确率
na_avg = 0
# 3度的气温最大值准确率:用逐小时的气温最大值和预报最高气温计算
na_max = 0
# 3度的气温最小值准确率:用逐小时的气温最小值和预报最低气温计算
na_min = 0
#实况数据存入字典
for row_live in rows_live:
rowdict[row_live[0]] = row_live
#预报站点在实况字典里,计算accu
for row in rows:
if row[0] in rowdict.keys():
nn = nn + 1
#print rowdict[row[0]]
avg_temp_live = float(rowdict[row[0]][1])
avg_temp = float(row[2])
if abs(avg_temp_live - avg_temp) < 3:
na_avg = na_avg + 1
avg_mmax_live = float(rowdict[row[0]][2])
max_temp = float(row[3])
if abs(avg_mmax_live - max_temp) < 3:
na_max = na_max + 1
avg_mmin_live = float(rowdict[row[0]][3])
min_temp = float(row[4])
if abs(avg_mmin_live - min_temp) < 3:
na_min = na_min + 1
# 把一条SQL的值加载list中,然后把这个list合并到总的list中
#print na_avg, na_max, na_min, na_mmax, na_mmin
cursor = db.cursor()
sql='replace into t_r_avg_temp_est_daily_mos_dem(initial_time,forecast_days,initial_year,initial_month,initial_day,initial_hour,NN,na_avg,na_max,na_min)VALUES ("'+initial+'","'+str(ii+1)+'","'+str(year002)+'","'+str(month002)+'","'+str(day002)+'","'+str(hour002)+'","'+str(nn)+'","'+str(na_avg)+'","'+str(na_max)+'","'+str(na_min)+'")'
#sql = 'insert into t_r_avg_temp_est_daily_mos(initial_time,forecast_days,initial_year,initial_month,initial_day,initial_hour,NN,na_avg,na_max,na_min,na_mmax,na_mmin)VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
print sql
cursor.execute(sql)
db.commit()
'''
ii表示预报小时数
'''
def calculate_temp3h_accurity(db,initial,pdate,odate,ii):
#72小时气温的准确率
cursor001 = db.cursor()
sql = 'select city_id,initial_time,temperature from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time>"' + pdate + '" and forecast_time<="' + odate + '"'
print sql
cursor001.execute(sql)
rows_f=cursor001.fetchall()
#print rows_f,type(rows_f)
if rows_f==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#取逐小时的共3h的均值
sql_live = 'select v01000,AVG(TEM) from t_r_surf_hour_ele where vdate>"' + pdate + '" and vdate<="' + odate + '" and TEM <80 group by v01000;'
print sql_live
cursor=db.cursor()
cursor.execute(sql_live)
rows_l=cursor.fetchall()
dict_live={}
nn=0
na=0
sum_mae=0
sum_rmse=0
#3h的气温存入字典
for row in rows_l:
dict_live[row[0]]=float(row[1])
#判断预报站号在实况字典中,计算准确率
for row_f in rows_f:
if row_f[0] in dict_live.keys():
nn=nn+1
livetemp=float(dict_live[row_f[0]])
foretemp=float(row_f[2])
if abs(livetemp-foretemp)<3:
na=na+1
sum_mae=sum_mae+abs(livetemp-foretemp)
sum_rmse=sum_rmse+pow(livetemp-foretemp,2)
if nn<>0:
mae=sum_mae/nn
rmse=math.sqrt(sum_rmse/nn)
accu=float(na*100)/float(nn)
sql_into='replace into t_r_3h_temp_accu_mos_dem(initial_time,forecast_hours,temp_accu,temp_mae,temp_rmse)VALUES ("'+initial+'","'+str(ii)+'","'+str(accu)+'","'+str(mae)+'","'+str(rmse)+'")'
print sql_into
#logger.info(sql_into)
cursor=db.cursor()
cursor.execute(sql_into) | #EC每个预报时刻的准确率
def calculate_temp_h_accurity(db,initial,odate,ii):
#计算一个起始预报时次的所有预报的准确率。
cursor001 = db.cursor()
sql = 'select city_id,initial_time,temperature from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time="' + odate + '"'
#print sql
cursor001.execute(sql)
rows_f=cursor001.fetchall()
#print rows_f,type(rows_f)
if rows_f==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#这里不应该去均值,而是取点值,即预报的时刻的温度值。
sql_live = 'select v01000,TEM from t_r_surf_hour_ele where vdate="' + odate + '" and TEM <80'
#print sql_live
cursor=db.cursor()
cursor.execute(sql_live)
rows_l=cursor.fetchall()
dict_live={}
nn=0
na=0
sum_mae=0
sum_rmse=0
#3h的气温存入字典
for row in rows_l:
dict_live[row[0]]=float(row[1])
#判断预报站号在实况字典中,计算准确率
for row_f in rows_f:
if row_f[0] in dict_live.keys():
nn=nn+1
livetemp=float(dict_live[row_f[0]])
foretemp=float(row_f[2])
if abs(livetemp-foretemp)<3:
na=na+1
sum_mae=sum_mae+abs(livetemp-foretemp)
sum_rmse=sum_rmse+pow(livetemp-foretemp,2)
if nn<>0:
mae=sum_mae/nn
rmse=math.sqrt(sum_rmse/nn)
accu=float(na*100)/float(nn)
sql_into='replace into t_r_3h_temp_accu_mos_dem(initial_time,forecast_hours,temp_accu,temp_mae,temp_rmse)VALUES ("'+initial+'","'+str(ii)+'","'+str(accu)+'","'+str(mae)+'","'+str(rmse)+'")'
#print sql_into
#logger.info(sql_into)
cursor=db.cursor()
cursor.execute(sql_into)
db.commit()
def readymodel(path):
starttime=datetime.datetime.now()
year_a=starttime.year
month_a=starttime.month
day_a=starttime.day
hour_a=starttime.hour
logger.info(starttime,year_a,month_a,day_a,hour_a)
starttime001=datetime.datetime(year_a,month_a,day_a,hour_a,0,0)
#outpath='/home/wlan_dev/mos/mosresult'
csvfile='/home/wlan_dev/mos/moslive_winter/stations.csv'
demcsv='/home/wlan_dev/mos/moslive_winter/dem0.1.csv'
#给定的时间是实时时间凌晨3点,计算的是前一天的12时,相差15个小时
hours001=starttime001.hour
aa=hours001+12
starttime=starttime001+datetime.timedelta(hours=-aa)
yearint=starttime.year
#hours=starttime.hour
midpath = path + '/' + str(yearint)
initial_time=datetime.datetime.strftime(starttime,'%Y-%m-%d %H:%M:%S')
##判断数据库中是否计算过该时次
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',
3307)
cursor=db.cursor()
sql="SELECT count(*) from t_r_ec_city_forecast_ele_mos_dem where initial_time='"+initial_time+"'"
#print sql
cursor.execute(sql)
data=cursor.fetchall()
#print data
for row in data:
print row[0],int(row[0])
if int(row[0])>=172020:
return
#给定时间大于12时,计算今天的数据,如果小于12计算昨天的数据。
# if hours>=12:
# # # datestr=datetime.datetime.strftime(starttime,'%Y-%m-%d')
# # # else:
# # # nowdate=starttime+datetime.timedelta(days=-1)
# # # datestr=datetime.datetime.strftime(nowdate,'%Y-%m-%d')
datestr=datetime.datetime.strftime(starttime,'%Y-%m-%d')
ecpath=midpath+'/'+datestr
stationlist=getStationList(csvfile)
demdict=calculatedemvalue(demcsv)
#根据文件名称中的时间差来计算要用哪个模型文件和标准化文件
#遍历文件夹放在list列表中
ecfilelist=[]
for root,dirs,files in os.walk(ecpath):
for file in files:
rootpath=root
if file[-3:] == '001' and (file[:3] == 'D2S' or file[:3]=='D2D') and file[7:9]=='12':
ecfilelist.append(file)
#对ec列表进行排序:排序的目的是什么?忘记了
sortecfilelist=sorted(ecfilelist)
now=datetime.datetime.now()
logger.info(now)
#开2个进程池,分别各占一张卡,每次只训练2个模型,显卡的显存限制2个模型是最好的,也就是进程池里面有一个进程
pool001 = multiprocessing.Pool(processes=2)
pool002 = multiprocessing.Pool(processes=2)
for i in range(len(sortecfilelist)):
file=sortecfilelist[i]
# 取文件中时间字段
start = file[3:9]
end = file[11:17]
start001 = str(starttime.year) + start
end001 = str(starttime.year) + end
if start001 > end001:
end001 = str(starttime.year + 1) + end
starttime = datetime.datetime.strptime(start001, '%Y%m%d%H')
origintime = datetime.datetime.strptime(str(starttime.year) + start,
'%Y%m%d%H')
endtime = datetime.datetime.strptime(end001, '%Y%m%d%H')
#计算两个时间差几个小时
d = (endtime - starttime).days
#f = (endtime - starttime).seconds / 3600
hours = (d * 24 + (endtime - starttime).seconds / 3600)
#冬季夏季模型都是以预报小时命名
if hours%3==0:
if hours<10 and hours > 0:
id = '00'+str(hours)
elif hours>=10 and hours<100:
id = '0'+str(hours)
elif hours>=100:
id=str(hours)
else:
index_hour=(int(hours/3)+1)*3
if index_hour<10 and index_hour > 0:
id = '00'+str(index_hour)
elif index_hour>=10 and index_hour<100:
id = '0'+str(index_hour)
elif index_hour>=100:
id=str(index_hour)
#用小时数来定义预报时次,从而确定模型文件和标准化文件
filefullpath=os.path.join(rootpath,file)
datamonth=starttime.month
if datamonth>=5 and datamonth<=9:
modelfile = '/home/wlan_dev/mos/demtemp/temp_model' + id_s + '.model'
scalefile = '/home/wlan_dev/mos/demtemp/temp_scale' + id_s + '.save'
else:
modelfile = '/home/wlan_dev/mos/wintermodel_gpu/temp_model'+id+'.model'
scalefile = '/home/wlan_dev/mos/wintermodel_gpu/temp_scale'+id+'.save'
if os.path.exists(modelfile) and os.path.exists(scalefile):
if int(id)<=90:
#predictmodel(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime)
pool001.apply_async(predictmodel,args=(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime,'0'))
elif int(id)>90:
pool002.apply_async(predictmodel,args=(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime,'1'))
pool001.close()
pool002.close()
pool001.join()
pool002.join()
#增加计算准确率函数,计算10天前的准确率
now=datetime.datetime.now()
logger.info(now)
initialtime001=starttime+datetime.timedelta(days=-11)
initial = datetime.datetime.strftime(initialtime001, '%Y-%m-%d %H:%M:%S')
# logger.info(initial)
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',3307)
for n in range(10):
print n
pdatetime = initialtime001 + datetime.timedelta(days=n)
pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(days=n + 1)
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_avg_temp_24h_from3h_accurity(db, initial, pdate, odate, n)
#新EC数据前90个预报时次是逐小时的D2S开头,中间18个预报时次是逐3小时的,后面14个预报时次是6小时的
for u in range(90+18+16):
if u <=90:
#pdatetime = initialtime001 + datetime.timedelta(hours=u)
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours= (u + 1))
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_temp_h_accurity(db, initial, odate, u + 1)
elif u>90 and u<=108:
#pdatetime = initialtime001 + datetime.timedelta(hours=90+(u-90)*3)
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours= 90+(u + 1-90)*3)
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_temp_h_accurity(db, initial, odate, 90+(u + 1-90)*3 )
else:
#pdatetime = initialtime001 + datetime.timedelta(hours=6 * (u-(u - 90)/3-90))
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours=6 * (u+1-(u-90)/3-90))
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
#print initial, pdate, odate, 6 * (u + 1 - (u-90)/3-90)
calculate_temp_h_accurity(db, initial, odate,6 * (u + 1 - (u-90)/3-90))
db.close()
if __name__ == "__main__":
# 加日志
logger = logging.getLogger('apscheduler.executors.default')
# 指定logger输出格式
formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')
# 文件日志learning
logfile = '/home/wlan_dev/log/gpu_winter.log'
file_handler = logging.FileHandler(logfile)
file_handler.setFormatter(formatter) # 可以通过setFormatter指定输出格式
# 控制台日志
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter # 也可以直接给formatter赋值
# 为logger添加的日志处理器
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# 指定日志的最低输出级别,默认为WARN级别
logger.setLevel(logging.INFO)
'''
# 设定定时任务,EC数据每天4次,分布为00、06、12、18时
# 计划设定在15点、21点、第二天的3点、9点执行,
暂时线上只算12时的,其中5-9月采用夏季模型,10月到4月采用冬季模型,每天的凌晨3点计算前一天的12时数据。
定时任务用aps
'''
#path='/home/wlan_dev/mosdata'
path='/opt/meteo/cluster/data/ecmwf/orig'
#readymodel(path)
scheduler=BackgroundScheduler()
scheduler.add_job(readymodel,'cron',hour='3,5,8,11,14',args=(path,))
try:
scheduler.start()
while True:
time.sleep(2)
except Exception as ex:
logger.info(ex.message) | db.commit() | random_line_split |
ecxgboost_predicttemp_gpu_multi_live_hourly.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
author: yetao.lu
date: 2018/11/8
description:根据所有模型用一个卡串行,修改为一个模型用一个看,服务器有四个看,至少可以4个并行
"""
import logging,sys,datetime,os,pygrib,numpy,MySQLdb,time,math
import xgboost,multiprocessing
from sklearn.externals import joblib
from apscheduler.schedulers.background import BackgroundScheduler
def getStationList(csvfile):
stationlist=[]
fileread=open(csvfile,'r')
firstline=fileread.readline()
while True:
line=fileread.readline()
perlist=line.split(',')
if len(perlist)>=4:
stationlist.append(perlist)
if not line or line=='':
break
return stationlist
def calculate16gribvalue(latlonArray,indexlat,indexlon,vstring):
vstring.append(latlonArray[indexlat][indexlon])
vstring.append(latlonArray[indexlat][indexlon + 1])
vstring.append(latlonArray[indexlat + 1][indexlon + 1])
vstring.append(latlonArray[indexlat + 1][indexlon])
vstring.append(latlonArray[indexlat - 1][indexlon - 1])
vstring.append(latlonArray[indexlat - 1][indexlon])
vstring.append(latlonArray[indexlat - 1][indexlon + 1])
vstring.append(latlonArray[indexlat - 1][indexlon + 2])
vstring.append(latlonArray[indexlat][indexlon + 2])
vstring.append(latlonArray[indexlat + 1][indexlon + 2])
vstring.append(latlonArray[indexlat + 2][indexlon + 2])
vstring.append(latlonArray[indexlat + 2][indexlon + 1])
vstring.append(latlonArray[indexlat + 2][indexlon])
vstring.append(latlonArray[indexlat + 2][indexlon - 1])
vstring.append(latlonArray[indexlat + 1][indexlon - 1])
vstring.append(latlonArray[indexlat][indexlon - 1])
return vstring
def calculatedemvalue(demcsv):
demdict={}
csvread=open(demcsv,'r')
while True:
line=csvread.readline()
linearray=line.split(',')
if len(linearray)>2:
demdict[linearray[0]]=linearray
if not line:
break
return demdict
'''
遍历文件要放在外层,这里是单个文件进行预测
'''
def predictmodel(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,foretime,gpu):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
allvaluelist=[]
if file[-3:]=='001' and (file[:3]=='D2S' or file[:3]=='D2D'):
grbs=pygrib.open(filefullpath)
grb_2t = grbs.select(name='2 metre temperature')
tempArray = grb_2t[0].values
grb_2d = grbs.select(name='2 metre dewpoint temperature')
dewpointArray = grb_2d[0].values
grb_10u = grbs.select(name='10 metre U wind component')
u10Array = grb_10u[0].values
grb_10v = grbs.select(name='10 metre V wind component')
v10Array = grb_10v[0].values
grb_tcc = grbs.select(name='Total cloud cover')
tccArray = grb_tcc[0].values
grb_lcc = grbs.select(name='Low cloud cover')
lccArray = grb_lcc[0].values
grb_z = grbs.select(name='Geopotential')
geoArray=grb_z[0].values
grb_500rh = grbs.select(name='Relative humidity', level=500)
rh500Array = grb_500rh[0].values
grb_850rh = grbs.select(name='Relative humidity', level=850)
rh850Array = grb_850rh[0].values
#遍历站点->要素遍历、
for i in range(len(stationlist)):
#print len(stationlist)
perlist=stationlist[i]
stationid=perlist[0]
latitude=float(perlist[1])
longitude=float(perlist[2])
alti=float(perlist[3])
#站点左上角点的索引
indexlat = int((60 - latitude) / 0.1)
indexlon = int((longitude -60) / 0.1)
per_station_value_list=[]
calculate16gribvalue(tempArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(dewpointArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(u10Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(v10Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(tccArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(lccArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(geoArray,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(rh500Array,indexlat,indexlon,per_station_value_list)
calculate16gribvalue(rh850Array,indexlat,indexlon,per_station_value_list)
per_station_value_list.append(latitude)
per_station_value_list.append(longitude)
per_station_value_list.append(alti)
# 站点高程:取计算好的站点周边16个点的高程值
demlist = demdict[stationlist[i][0]]
for u in range(1, len(demlist), 1):
per_station_value_list.append(float(demlist[u]))
allvaluelist.append(per_station_value_list)
#print(per_station_value_list)
trainarray=numpy.array(allvaluelist)
params001 = {
'tree_method': 'gpu_hist',
'booster': 'gbtree',
'objective': 'reg:linear', # 线性回归
'gamma': 0.2, # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。
'max_depth': 12, # 构建树的深度,越大越容易过拟合
'lambda': 2, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
'subsample': 0.7, # 随机采样训练样本
'colsample_bytree': 0.7, # 生成树时进行的列采样
'min_child_weight': 3,
# 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
# ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
# 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
'silent': 0, # 设置成1则没有运行信息输出,最好是设置为0.
'eta': 0.01, # 如同学习率
'seed': 1000,
# 'nthread':3,# cpu 线程数,不设置取最大值
# 'eval_metric': 'auc'
'scale_pos_weight': 1,
'n_gpus': 1
}
xgbst=xgboost.Booster(params001)
xgbst.load_model(modelfile)
scaler=joblib.load(scalefile)
#print(modelfile,scalefile)
trainarray_t=scaler.transform(trainarray)
#标准化后的矩阵坑我2次了:看好是标准化后的还是标准化前的
xgbtrain=xgboost.DMatrix(trainarray_t)
result=xgbst.predict(xgbtrain)
#print(result)
logger.info(result)
#结果入库
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',3307)
#db = MySQLdb.connect('192.168.10.84', 'admin', 'moji_China_123','moge')
cursor = db.cursor()
origin = datetime.datetime.strftime(origintime, '%Y-%m-%d %H:%M:%S')
forecast = datetime.datetime.strftime(foretime, '%Y-%m-%d %H:%M:%S')
forecast_year = foretime.year
forecast_month = foretime.month
forecast_day = foretime.day
forecast_hour = foretime.hour
forecast_minute = foretime.minute
timestr = datetime.datetime.strftime(origintime, '%Y%m%d%H%M%S')
# csv = os.path.join(outpath, origin+'_'+forecast + '.csv')
# csvfile = open(csv, 'w')
sql = 'replace into t_r_ec_city_forecast_ele_mos_dem (city_id,initial_time,forecast_time,forecast_year,forecast_month,forecast_day,forecast_hour,temperature)VALUES(%s,%s,%s,%s,%s,%s,%s,%s)'
L = []
for j in range(len(stationlist)):
perstationlist = []
stationid = stationlist[j][0]
temp = result[j]
# 每个站点存储
perstationlist.append(stationid)
perstationlist.append(origin)
perstationlist.append(forecast)
perstationlist.append(forecast_year)
perstationlist.append(forecast_month)
perstationlist.append(forecast_day)
perstationlist.append(forecast_hour)
perstationlist.append(temp)
L.append(perstationlist)
#logger.info(perstationlist)
# # sql='insert into t_r_ec_mos_city_forecast_ele(city_id,initial_time,forecast_time,forecsat_year,forecast_month,forecast_day,forecast_hour,temperature)VALUES ()'
# # sql = 'insert into t_r_ec_city_forecast_ele_mos (city_id,initial_time,forecast_time,forecast_year,forecast_month,forecast_day,forecast_hour,temperature,temp_max_6h,temp_min_6h,rainstate,precipitation)VALUES ("' + stationid + '","' + origin + '","' + str(
# # forecast) + '","' + str(forecast_year) + '","' + str(
# # forecast_month) + '","' + str(forecast_day) + '","' + str(
# # forecast_hour) + '","' + str(temp) + '","' + str(maxtemp)+ '","' + str(mintemp)+'","' + str(rainstate)+'","' + str(prevalue)+ '")
# # csvfile.write(stationid + '","' + origin + '","' + str(
# # forecast) + '","' + str(forecast_year) + '","' + str(
# # forecast_month) + '","' + str(forecast_day) + '","' + str(
# # forecast_hour) + '","' + str(forecast_minute) + '","' + str(
# # temp)+ '","' + str(maxtemp)+ '","' + str(mintemp)+'","' + str(rainstate)+'","' + str(prevalue))
# # csvfile.write('\n')
# # print sql
# # cursor.execute(sql)
cursor.executemany(sql, L)
db.commit()
db.close()
def calculate_avg_temp_24h_from3h_accurity(db, initial, pdate, odate, ii):
initialtime = datetime.datetime.strptime(initial, '%Y-%m-%d %H:%M:%S')
year002 = initialtime.year
month002 = initialtime.month
day002 = initialtime.day
hour002 = initialtime.hour
cursor001 = db.cursor()
#选择MOS统计之后的24小时平均气温、24小时气温最大值,24小时气温最小值
sql = 'select city_id,initial_time,avg(temperature),max(temperature),min(temperature) from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time>"' + pdate + '" and forecast_time<="' + odate + '" group by city_id'
print sql
cursor001.execute(sql)
rows = cursor001.fetchall()
if rows==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#实况数据
sql_live = 'select v01000,AVG(TEM),MAX(TEM_Max),MIN(TEM_Min) from t_r_surf_hour_ele where vdate>"' + pdate + '" and vdate<="' + odate + '" and TEM<90 and TEM_Max<90 and TEM_Min<90 group by v01000;'
print sql_live
#logger.info(sql_live)
cursor = db.cursor()
cursor.execute(sql_live)
rows_live = cursor.fetchall()
#print len(rows_live)
rowdict = {}
nn = 0
# 3度的平均气温准确率
na_avg = 0
# 3度的气温最大值准确率:用逐小时的气温最大值和预报最高气温计算
na_max = 0
# 3度的气温最小值准确率:用逐小时的气温最小值和预报最低气温计算
na_min = 0
#实况数据存入字典
for row_live in rows_live:
rowdict[row_live[0]] = row_live
#预报站点在实况字典里,计算accu
for row in rows:
if row[0] in rowdict.keys():
nn = nn + 1
#print rowdict[row[0]]
avg_temp_live = float(rowdict[row[0]][1])
avg_temp = float(row[2])
if abs(avg_temp_live - avg_temp) < 3:
na_avg = na_avg + 1
avg_mmax_live = float(rowdict[row[0]][2])
max_temp = float(row[3])
if abs(avg_mmax_live - max_temp) < 3:
na_max = na_max + 1
avg_mmin_live = float(rowdict[row[0]][3])
min_temp = float(row[4])
if abs(avg_mmin_live - min_temp) < 3:
na_min = na_min + 1
# 把一条SQL的值加载list中,然后把这个list合并到总的list中
#print na_avg, na_max, na_min, na_mmax, na_mmin
cursor = db.cursor()
sql='replace into t_r_avg_temp_est_daily_mos_dem(initial_time,forecast_days,initial_year,initial_month,initial_day,initial_hour,NN,na_avg,na_max,na_min)VALUES ("'+initial+'","'+str(ii+1)+'","'+str(year002)+'","'+str(month002)+'","'+str(day002)+'","'+str(hour002)+'","'+str(nn)+'","'+str(na_avg)+'","'+str(na_max)+'","'+str(na_min)+'")'
#sql = 'insert into t_r_avg_temp_est_daily_mos(initial_time,forecast_days,initial_year,initial_month,initial_day,initial_hour,NN,na_avg,na_max,na_min,na_mmax,na_mmin)VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
print sql
cursor.execute(sql)
db.commit()
'''
ii表示预报小时数
'''
def calculate_temp3h_accurity(db,initial,pdate,odate,ii):
#72小时气温的准确率
cursor001 = db.cursor()
sql = 'select city_id,initial_time,temperature from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time>"' + pdate + '" and forecast_time<="' + odate + '"'
print sql
cursor001.execute(sql)
rows_f=cursor001.fetchall()
#print rows_f,type(rows_f)
if rows_f==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#取逐小时的共3h的均值
sql_live = 'select v01000,AVG(TEM) from t_r_surf_hour_ele where vdate>"' + pdate + '" and vdate<="' + odate + '" and TEM <80 group by v01000;'
print sql_live
cursor=db.cursor()
cursor.execute(sql_live)
rows_l=cursor.fetchall()
dict_live={}
nn=0
na=0
sum_mae=0
sum_rmse=0
#3h的气温存入字典
for row in rows_l:
dict_live[row[0]]=float(row[1])
#判断预报站号在实况字典中,计算准确率
for row_f in rows_f:
if row_f[0] in dict_live.keys():
nn=nn+1
livetemp=float(dict_live[row_f[0]])
foretemp=float(row_f[2])
if abs(livetemp-foretemp)<3:
na=na+1
sum_mae=sum_mae+abs(livetemp-foretemp)
sum_rmse=sum_rmse+pow(livetemp-foretemp,2)
if nn<>0:
mae=sum_mae/nn
rmse=math.sqrt(sum_rmse/nn)
accu=float(na*100)/float(nn)
sql_into='replace into t_r_3h_temp_accu_mos_dem(initial_time,forecast_hours,temp_accu,temp_mae,temp_rmse)VALUES ("'+initial+'","'+str(ii)+'","'+str(accu)+'","'+str(mae)+'","'+str(rmse)+'")'
print sql_into
#logger.info(sql_into)
cursor=db.cursor()
cursor.execute(sql_into)
db.commit()
#EC每个预报时刻的准确率
def calculate_temp_h_accurity(db,initial,odate,ii):
#计算一个起始预报时次的所有预报的准确率。
cursor001 = db.cursor()
sql = 'select city_id,initial_time,temperature from t_r_ec_city_forecast_ele_mos_dem where initial_time="' + initial + '" and forecast_time="' + odate + '"'
#print sql
cursor001.execute(sql)
rows_f=cursor001.fetchall()
#print rows_f,type(rows_f)
if rows_f==():
#logger.info('该时间段没有数据' +sql)
print '该时间段没有数据'
return
#这里不应该去均值,而是取点值,即预报的时刻的温度值。
sql_live = 'select v01000,TEM from t_r_surf_hour_ele where vdate="' + odate + '" and TEM <80'
#print sql_live
cursor=db.cursor()
cursor.execute(sql_live)
rows_l=cursor.fetchall()
dict_live={}
nn=0
na=0
sum_mae=0
sum_rmse=0
#3h的气温存入字典
for row in rows_l:
dict_live[row[0]]=float(row[1])
#判断预报站号在实况字典中,计算准确率
for row_f in rows_f:
if row_f[0] in dict_live.keys():
nn=nn+1
livetemp=float(dict_live[row_f[0]])
foretemp=float(row_f[2])
if abs(livetemp-foretemp)<3:
na=na+1
sum_mae=sum_mae+abs(livetemp-foretemp)
sum_rmse=sum_rmse+pow(livetemp-foretemp,2)
if nn<>0:
mae=sum_mae/nn
rmse=math.sqrt(sum_rmse/nn)
accu=float(na*100)/float(nn)
sql_into='replace into t_r_3h_temp_accu_mos_dem(initial_time,forecast_hours,temp_accu,temp_mae,temp_rmse)VALUES ("'+initial+'","'+str(ii)+'","'+str(accu)+'","'+str(mae)+'","'+str(rmse)+'")'
#print sql_into
#logger.info(sql_into)
cursor=db.cursor()
cursor.execute(sql_into)
db.commit()
def readymodel(path):
starttime=datetime.datetime.now()
year_a=starttime.year
month_a=starttime.month
day_a=starttime.day
hour_a=starttime.hour
logger.info(starttime,year_a,month_a,day_a,hour_a)
starttime001=datetime.datetime(year_a,month_a,day_a,hour_a,0,0)
#outpath='/home/wlan_dev/mos/mosresult'
csvfile='/home/wlan_dev/mos/moslive_winter/stations.csv'
demcsv='/home/wlan_dev/mos/moslive_winter/dem0.1.csv'
#给定的时间是实时时间凌晨3点,计算的是前一天的12时,相差15个小时
hours001=starttime001.hour
aa=hours001+12
starttime=starttime001+datetime.timedelta(hours=-aa)
yearint=starttime.year
#hours=starttime.hour
midpath = path + '/' + str(yearint)
initial_time=datetime.datetime.strftime(starttime,'%Y-%m-%d %H:%M:%S')
##判断数据库中是否计算过该时次
db = MySQLdb.connect('172.16.8.28', 'admin', 'moji_China_123', 'moge',
3307)
cursor=db.cursor()
sql="SELECT count(*) from t_r_ec_city_forecast_ele_mos_dem where initial_time='"+initial_time+"'"
#print sql
cursor.execute(sql)
data=cursor.fetchall()
#print data
for row in data:
print row[0],int(row[0])
if int(row[0])>=172020:
return
#给定时间大于12时,计算今天的数据,如果小于12计算昨天的数据。
# if hours>=12:
# # # datestr=datetime.datetime.strftime(starttime,'%Y-%m-%d')
# # # else:
# # # nowdate=starttime+datetime.timedelta(days=-1)
# # # datestr=datetime.datetime.strftime(nowdate,'%Y-%m-%d')
datestr=datetime.datetime.strftime(starttime,'%Y-%m-%d')
ecpath=midpath+'/'+datestr
stationlist=getStationList(csvfile)
demdict=calculatedemvalue(demcsv)
#根据文件名称中的时间差来计算要用哪个模型文件和标准化文件
#遍历文件夹放在list列表中
ecfilelist=[]
for root,dirs,files in os.walk(ecpath):
for file in files:
rootpath=root
if file[-3:] == '001' and (file[:3] == 'D2S' or file[:3]=='D2D') and file[7:9]=='12':
ecfilelist.append(file)
#对ec列表进行排序:排序的目的是什么?忘记了
sortecfilelist=sorted(ecfilelist)
now=datetime.datetime.now()
logger.info(now)
#开2个进程池,分别各占一张卡,每次只训练2个模型,显卡的显存限制2个模型是最好的,也就是进程池里面有一个进程
pool001 = multiprocessing.Pool(processes=2)
pool002 = multiprocessing.Pool(processes=2)
for i in range(len(sortecfilelist)):
file=sortecfilelist[i]
# 取文件中时间字段
start = file[3:9]
end = file[11:17]
start001 = str(starttime.year) + start
end001 = str(starttime.year) + end
if start001 > end001:
end001 = str(starttime.year + 1) + end
starttime = datetime.datetime.strptime(start001, '%Y%m%d%H')
origintime = datetime.datetime.strptime(str(starttime.year) + start,
'%Y%m%d%H')
endtime = datetime.datetime.strptime(end001, '%Y%m%d%H')
#计算两个时间差几个小时
d = (endtime - starttime).days
#f = (endtime - starttime).seconds / 3600
hours = (d * 24 + (endtime - starttime).seconds / 3600)
#冬季夏季模型都是以预报小时命名
if hours%3==0:
if hours<10 and hours > 0:
id = '00'+str(hours)
elif hours>=10 and hours<100:
id = '0'+str(hours)
elif hours>=100:
id=str(hours)
else:
index_hour=(int(hours/3)+1)*3
if index_hour<10 and index_hour > 0:
id = '00'+str(index_hour)
elif index_hour>=10 and index_hour<100:
id = '0'+str(index_hour)
elif index_hour>=100:
id=str(index_hour)
#用小时数来定义预报时次,从而确定模型文件和标准化文件
filefullpath=os.path.join(rootpath,file)
datamonth=starttime.month
if datamonth>=5 and datamonth<=9:
modelfile = '/home/wlan_dev/mos/demtemp/temp_model' + id_s + '.model'
scalefile = '/home/wlan_dev/mos/demtemp/temp_scale' + id_s + '.save'
else:
modelfile = '/home/wlan_dev/mos/wintermodel_gpu/temp_model'+id+'.model'
scalefile = '/home/wlan_dev/mos/wintermodel_gpu/temp_scale'+id+'.save'
if os.path.exists(modelfile) and os.path.exists(scalefile):
if int(id)<=90:
#predictmodel(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime)
pool001.apply_async(predictmodel,args=(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime,'0'))
elif int(id)>90:
pool002.apply_async(predictmodel,args=(file,filefullpath,modelfile,scalefile,stationlist,demdict,origintime,endtime,'1'))
pool001.close()
pool002.close()
pool001.join()
pool002.join()
#增加计算准确率函数,计算10天前的准确率
now=datetime.datetime.now()
logger.info(now)
initialtime001=starttime+datetime.timedelta(days=-11)
initial = datetime.datetime.strftime(initialtime001, '%Y-%m-%d %H:%M:%S')
# logger.info(initial) | nnect('172.16.8.28', 'admin', 'moji_China_123', 'moge',3307)
for n in range(10):
print n
pdatetime = initialtime001 + datetime.timedelta(days=n)
pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(days=n + 1)
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_avg_temp_24h_from3h_accurity(db, initial, pdate, odate, n)
#新EC数据前90个预报时次是逐小时的D2S开头,中间18个预报时次是逐3小时的,后面14个预报时次是6小时的
for u in range(90+18+16):
if u <=90:
#pdatetime = initialtime001 + datetime.timedelta(hours=u)
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours= (u + 1))
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_temp_h_accurity(db, initial, odate, u + 1)
elif u>90 and u<=108:
#pdatetime = initialtime001 + datetime.timedelta(hours=90+(u-90)*3)
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours= 90+(u + 1-90)*3)
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
calculate_temp_h_accurity(db, initial, odate, 90+(u + 1-90)*3 )
else:
#pdatetime = initialtime001 + datetime.timedelta(hours=6 * (u-(u - 90)/3-90))
#pdate = datetime.datetime.strftime(pdatetime, '%Y-%m-%d %H:%M:%S')
odatetime = initialtime001 + datetime.timedelta(hours=6 * (u+1-(u-90)/3-90))
odate = datetime.datetime.strftime(odatetime, '%Y-%m-%d %H:%M:%S')
#print initial, pdate, odate, 6 * (u + 1 - (u-90)/3-90)
calculate_temp_h_accurity(db, initial, odate,6 * (u + 1 - (u-90)/3-90))
db.close()
if __name__ == "__main__":
# 加日志
logger = logging.getLogger('apscheduler.executors.default')
# 指定logger输出格式
formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')
# 文件日志learning
logfile = '/home/wlan_dev/log/gpu_winter.log'
file_handler = logging.FileHandler(logfile)
file_handler.setFormatter(formatter) # 可以通过setFormatter指定输出格式
# 控制台日志
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter # 也可以直接给formatter赋值
# 为logger添加的日志处理器
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# 指定日志的最低输出级别,默认为WARN级别
logger.setLevel(logging.INFO)
'''
# 设定定时任务,EC数据每天4次,分布为00、06、12、18时
# 计划设定在15点、21点、第二天的3点、9点执行,
暂时线上只算12时的,其中5-9月采用夏季模型,10月到4月采用冬季模型,每天的凌晨3点计算前一天的12时数据。
定时任务用aps
'''
#path='/home/wlan_dev/mosdata'
path='/opt/meteo/cluster/data/ecmwf/orig'
#readymodel(path)
scheduler=BackgroundScheduler()
scheduler.add_job(readymodel,'cron',hour='3,5,8,11,14',args=(path,))
try:
scheduler.start()
while True:
time.sleep(2)
except Exception as ex:
logger.info(ex.message)
|
db = MySQLdb.co | conditional_block |
levels.go | /*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bytes"
"fmt"
"math"
"math/rand"
"os"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/trace"
"github.com/dgraph-io/badger/pb"
"github.com/dgraph-io/badger/table"
"github.com/dgraph-io/badger/y"
"github.com/pkg/errors"
)
type levelsController struct {
nextFileID uint64 // Atomic
elog trace.EventLog
// The following are initialized once and const.
levels []*levelHandler
kv *DB
cstatus compactStatus
}
var (
// This is for getting timings between stalls.
lastUnstalled time.Time
)
// revertToManifest checks that all necessary table files exist and removes all table files not
// referenced by the manifest. idMap is a set of table file id's that were read from the directory
// listing.
func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {
// 1. Check all files in manifest exist.
for id := range mf.Tables {
if _, ok := idMap[id]; !ok {
return fmt.Errorf("file does not exist for table %d", id)
}
}
// 2. Delete files that shouldn't exist.
for id := range idMap {
if _, ok := mf.Tables[id]; !ok {
kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id)
filename := table.NewFilename(id, kv.opt.Dir)
if err := os.Remove(filename); err != nil {
return y.Wrapf(err, "While removing table %d", id)
}
}
}
return nil
}
func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) {
y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables)
s := &levelsController{
kv: db,
elog: db.elog,
levels: make([]*levelHandler, db.opt.MaxLevels),
}
s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels)
for i := 0; i < db.opt.MaxLevels; i++ {
s.levels[i] = newLevelHandler(db, i)
if i == 0 {
// Do nothing.
} else if i == 1 {
// Level 1 probably shouldn't be too much bigger than level 0.
s.levels[i].maxTotalSize = db.opt.LevelOneSize
} else {
s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier)
}
s.cstatus.levels[i] = new(levelCompactStatus)
}
// Compare manifest against directory, check for existent/non-existent files, and remove.
if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil {
return nil, err
}
// Some files may be deleted. Let's reload.
var flags uint32 = y.Sync
if db.opt.ReadOnly {
flags |= y.ReadOnly
}
var mu sync.Mutex
tables := make([][]*table.Table, db.opt.MaxLevels)
var maxFileID uint64
// We found that using 3 goroutines allows disk throughput to be utilized to its max.
// Disk utilization is the main thing we should focus on, while trying to read the data. That's
// the one factor that remains constant between HDD and SSD.
throttle := y.NewThrottle(3)
start := time.Now()
var numOpened int32
tick := time.NewTicker(3 * time.Second)
defer tick.Stop()
for fileID, tf := range mf.Tables {
fname := table.NewFilename(fileID, db.opt.Dir)
select {
case <-tick.C:
db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened),
len(mf.Tables), time.Since(start).Round(time.Millisecond))
default:
}
if err := throttle.Do(); err != nil {
closeAllTables(tables)
return nil, err
}
if fileID > maxFileID {
maxFileID = fileID
}
go func(fname string, tf TableManifest) {
var rerr error
defer func() {
throttle.Done(rerr)
atomic.AddInt32(&numOpened, 1)
}()
fd, err := y.OpenExistingFile(fname, flags)
if err != nil {
rerr = errors.Wrapf(err, "Opening file: %q", fname)
return
}
t, err := table.OpenTable(fd, db.opt.TableLoadingMode, tf.Checksum)
if err != nil {
if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") {
db.opt.Errorf(err.Error())
db.opt.Errorf("Ignoring table %s", fd.Name())
// Do not set rerr. We will continue without this table.
} else {
rerr = errors.Wrapf(err, "Opening table: %q", fname)
}
return
}
mu.Lock()
tables[tf.Level] = append(tables[tf.Level], t)
mu.Unlock()
}(fname, tf)
}
if err := throttle.Finish(); err != nil {
closeAllTables(tables)
return nil, err
}
db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened),
time.Since(start).Round(time.Millisecond))
s.nextFileID = maxFileID + 1
for i, tbls := range tables {
s.levels[i].initTables(tbls)
}
// Make sure key ranges do not overlap etc.
if err := s.validate(); err != nil {
_ = s.cleanupLevels()
return nil, errors.Wrap(err, "Level validation")
}
// Sync directory (because we have at least removed some files, or previously created the
// manifest file).
if err := syncDir(db.opt.Dir); err != nil {
_ = s.close()
return nil, err
}
return s, nil
}
// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef()
// because that would delete the underlying files.) We ignore errors, which is OK because tables
// are read-only.
func closeAllTables(tables [][]*table.Table) {
for _, tableSlice := range tables {
for _, table := range tableSlice {
_ = table.Close()
}
}
}
func (s *levelsController) cleanupLevels() error {
var firstErr error
for _, l := range s.levels {
if err := l.close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// dropTree picks all tables from all levels, creates a manifest changeset,
// applies it, and then decrements the refs of these tables, which would result
// in their deletion.
func (s *levelsController) dropTree() (int, error) {
// First pick all tables, so we can create a manifest changelog.
var all []*table.Table
for _, l := range s.levels {
l.RLock()
all = append(all, l.tables...)
l.RUnlock()
}
if len(all) == 0 {
return 0, nil
}
// Generate the manifest changes.
changes := []*pb.ManifestChange{}
for _, table := range all {
changes = append(changes, newDeleteChange(table.ID()))
}
changeSet := pb.ManifestChangeSet{Changes: changes}
if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
return 0, err
}
// Now that manifest has been successfully written, we can delete the tables.
for _, l := range s.levels {
l.Lock()
l.totalSize = 0
l.tables = l.tables[:0]
l.Unlock()
}
for _, table := range all {
if err := table.DecrRef(); err != nil {
return 0, err
}
}
return len(all), nil
}
// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the
// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the
// provided prefix. For Li->Li compactions, it picks up the tables which would have the prefix. The
// tables who only have keys with this prefix are quickly dropped. The ones which have other keys
// are run through MergeIterator and compacted to create new tables. All the mechanisms of
// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow.
func (s *levelsController) dropPrefix(prefix []byte) error {
opt := s.kv.opt
for _, l := range s.levels {
l.RLock()
if l.level == 0 {
size := len(l.tables)
l.RUnlock()
if size > 0 {
cp := compactionPriority{
level: 0,
score: 1.74,
// A unique number greater than 1.0 does two things. Helps identify this
// function in logs, and forces a compaction.
dropPrefix: prefix,
}
if err := s.doCompact(cp); err != nil {
opt.Warningf("While compacting level 0: %v", err)
return nil
}
}
continue
}
var tables []*table.Table
for _, table := range l.tables {
var absent bool
switch {
case bytes.HasPrefix(table.Smallest(), prefix):
case bytes.HasPrefix(table.Biggest(), prefix):
case bytes.Compare(prefix, table.Smallest()) > 0 &&
bytes.Compare(prefix, table.Biggest()) < 0:
default:
absent = true
}
if !absent {
tables = append(tables, table)
}
}
l.RUnlock()
if len(tables) == 0 {
continue
}
cd := compactDef{
elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"),
thisLevel: l,
nextLevel: l,
top: []*table.Table{},
bot: tables,
dropPrefix: prefix,
}
if err := s.runCompactDef(l.level, cd); err != nil {
opt.Warningf("While running compact def: %+v. Error: %v", cd, err)
return err
}
}
return nil
}
func (s *levelsController) startCompact(lc *y.Closer) {
n := s.kv.opt.NumCompactors
lc.AddRunning(n - 1)
for i := 0; i < n; i++ {
go s.runWorker(lc)
}
}
func (s *levelsController) runWorker(lc *y.Closer) {
defer lc.Done()
randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond)
select {
case <-randomDelay.C:
case <-lc.HasBeenClosed():
randomDelay.Stop()
return
}
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
// Can add a done channel or other stuff.
case <-ticker.C:
prios := s.pickCompactLevels()
for _, p := range prios {
if err := s.doCompact(p); err == nil {
break
} else if err == errFillTables {
// pass
} else {
s.kv.opt.Warningf("While running doCompact: %v\n", err)
}
}
case <-lc.HasBeenClosed():
return
}
}
}
// Returns true if level zero may be compacted, without accounting for compactions that already
// might be happening.
func (s *levelsController) isLevel0Compactable() bool {
return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables
}
// Returns true if the non-zero level may be compacted. delSize provides the size of the tables
// which are currently being compacted so that we treat them as already having started being
// compacted (because they have been, yet their size is already counted in getTotalSize).
func (l *levelHandler) isCompactable(delSize int64) bool {
return l.getTotalSize()-delSize >= l.maxTotalSize
}
type compactionPriority struct {
level int
score float64
dropPrefix []byte
}
// pickCompactLevel determines which level to compact.
// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction
func (s *levelsController) pickCompactLevels() (prios []compactionPriority) {
// This function must use identical criteria for guaranteeing compaction's progress that
// addLevel0Table uses.
// cstatus is checked to see if level 0's tables are already being compacted
if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() {
pri := compactionPriority{
level: 0,
score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables),
}
prios = append(prios, pri)
}
for i, l := range s.levels[1:] {
// Don't consider those tables that are already being compacted right now.
delSize := s.cstatus.delSize(i + 1)
if l.isCompactable(delSize) {
pri := compactionPriority{
level: i + 1,
score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize),
}
prios = append(prios, pri)
}
}
sort.Slice(prios, func(i, j int) bool {
return prios[i].score > prios[j].score
})
return prios
}
// compactBuildTables merge topTables and botTables to form a list of new tables.
func (s *levelsController) compactBuildTables(
lev int, cd compactDef) ([]*table.Table, func() error, error) {
topTables := cd.top
botTables := cd.bot
var hasOverlap bool
{
kr := getKeyRange(cd.top)
for i, lh := range s.levels {
if i <= lev { // Skip upper levels.
continue
}
lh.RLock()
left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
lh.RUnlock()
if right-left > 0 {
hasOverlap = true
break
}
}
}
// Try to collect stats so that we can inform value log about GC. That would help us find which
// value log file should be GCed.
discardStats := make(map[uint32]int64)
updateStats := func(vs y.ValueStruct) {
if vs.Meta&bitValuePointer > 0 {
var vp valuePointer
vp.Decode(vs.Value)
discardStats[vp.Fid] += int64(vp.Len)
}
}
// Create iterators across all the tables involved first.
var iters []y.Iterator
if lev == 0 {
iters = appendIteratorsReversed(iters, topTables, false)
} else if len(topTables) > 0 {
y.AssertTrue(len(topTables) == 1)
iters = []y.Iterator{topTables[0].NewIterator(false)}
}
// Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap.
var valid []*table.Table
for _, table := range botTables {
if len(cd.dropPrefix) > 0 &&
bytes.HasPrefix(table.Smallest(), cd.dropPrefix) &&
bytes.HasPrefix(table.Biggest(), cd.dropPrefix) {
// All the keys in this table have the dropPrefix. So, this table does not need to be
// in the iterator and can be dropped immediately.
continue
}
valid = append(valid, table)
}
iters = append(iters, table.NewConcatIterator(valid, false))
it := y.NewMergeIterator(iters, false)
defer it.Close() // Important to close the iterator to do ref counting.
it.Rewind()
// Pick a discard ts, so we can discard versions below this ts. We should
// never discard any versions starting from above this timestamp, because
// that would affect the snapshot view guarantee provided by transactions.
discardTs := s.kv.orc.discardAtOrBelow()
// Start generating new tables.
type newTableResult struct {
table *table.Table
err error
}
resultCh := make(chan newTableResult)
var numBuilds, numVersions int
var lastKey, skipKey []byte
for it.Valid() {
timeStart := time.Now()
builder := table.NewTableBuilder()
var numKeys, numSkips uint64
for ; it.Valid(); it.Next() {
// See if we need to skip the prefix.
if len(cd.dropPrefix) > 0 && bytes.HasPrefix(it.Key(), cd.dropPrefix) {
numSkips++
updateStats(it.Value())
continue
}
// See if we need to skip this key.
if len(skipKey) > 0 {
if y.SameKey(it.Key(), skipKey) {
numSkips++
updateStats(it.Value())
continue
} else {
skipKey = skipKey[:0]
}
}
if !y.SameKey(it.Key(), lastKey) {
if builder.ReachedCapacity(s.kv.opt.MaxTableSize) {
// Only break if we are on a different key, and have reached capacity. We want
// to ensure that all versions of the key are stored in the same sstable, and
// not divided across multiple tables at the same level.
break
}
lastKey = y.SafeCopy(lastKey, it.Key())
numVersions = 0
}
vs := it.Value()
version := y.ParseTs(it.Key())
// Do not discard entries inserted by merge operator. These entries will be
// discarded once they're merged
if version <= discardTs && vs.Meta&bitMergeEntry == 0 {
// Keep track of the number of versions encountered for this key. Only consider the
// versions which are below the minReadTs, otherwise, we might end up discarding the
// only valid version for a running transaction.
numVersions++
lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0
if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) ||
numVersions > s.kv.opt.NumVersionsToKeep ||
lastValidVersion {
// If this version of the key is deleted or expired, skip all the rest of the
// versions. Ensure that we're only removing versions below readTs.
skipKey = y.SafeCopy(skipKey, it.Key())
if lastValidVersion {
// Add this key. We have set skipKey, so the following key versions
// would be skipped.
} else if hasOverlap {
// If this key range has overlap with lower levels, then keep the deletion
// marker with the latest version, discarding the rest. We have set skipKey,
// so the following key versions would be skipped.
} else {
// If no overlap, we can skip all the versions, by continuing here.
numSkips++
updateStats(vs)
continue // Skip adding this key.
}
}
}
numKeys++
y.Check(builder.Add(it.Key(), it.Value()))
}
// It was true that it.Valid() at least once in the loop above, which means we
// called Add() at least once, and builder is not Empty().
s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v",
numKeys, numSkips, time.Since(timeStart))
if !builder.Empty() {
numBuilds++
fileID := s.reserveFileID()
go func(builder *table.Builder) {
defer builder.Close()
fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true)
if err != nil {
resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)}
return
}
if _, err := fd.Write(builder.Finish()); err != nil {
resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)}
return
}
tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil)
// decrRef is added below.
resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())}
}(builder)
}
}
newTables := make([]*table.Table, 0, 20)
// Wait for all table builders to finish.
var firstErr error
for x := 0; x < numBuilds; x++ {
res := <-resultCh
newTables = append(newTables, res.table)
if firstErr == nil {
firstErr = res.err
}
}
if firstErr == nil {
// Ensure created files' directory entries are visible. We don't mind the extra latency
// from not doing this ASAP after all file creation has finished because this is a
// background operation.
firstErr = syncDir(s.kv.opt.Dir)
}
if firstErr != nil {
// An error happened. Delete all the newly created table files (by calling DecrRef
// -- we're the only holders of a ref).
for j := 0; j < numBuilds; j++ {
if newTables[j] != nil {
_ = newTables[j].DecrRef()
}
}
errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd)
return nil, nil, errorReturn
}
sort.Slice(newTables, func(i, j int) bool {
return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0
})
if err := s.kv.vlog.updateDiscardStats(discardStats); err != nil {
return nil, nil, errors.Wrap(err, "failed to update discard stats")
}
s.kv.opt.Debugf("Discard stats: %v", discardStats)
return newTables, func() error { return decrRefs(newTables) }, nil
}
func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet {
changes := []*pb.ManifestChange{}
for _, table := range newTables {
changes = append(changes,
newCreateChange(table.ID(), cd.nextLevel.level, table.Checksum))
}
for _, table := range cd.top {
changes = append(changes, newDeleteChange(table.ID()))
}
for _, table := range cd.bot {
changes = append(changes, newDeleteChange(table.ID()))
}
return pb.ManifestChangeSet{Changes: changes}
}
type compactDef struct {
elog trace.Trace
thisLevel *levelHandler
nextLevel *levelHandler
top []*table.Table
bot []*table.Table
thisRange keyRange
nextRange keyRange
thisSize int64
dropPrefix []byte
}
func (cd *compactDef) lockLevels() {
cd.thisLevel.RLock()
cd.nextLevel.RLock()
}
func (cd *compactDef) unlockLevels() {
cd.nextLevel.RUnlock()
cd.thisLevel.RUnlock()
}
func (s *levelsController) fillTablesL0(cd *compactDef) bool {
cd.lockLevels()
defer cd.unlockLevels()
cd.top = make([]*table.Table, len(cd.thisLevel.tables))
copy(cd.top, cd.thisLevel.tables)
if len(cd.top) == 0 {
return false
}
cd.thisRange = infRange
kr := getKeyRange(cd.top)
left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr)
cd.bot = make([]*table.Table, right-left)
copy(cd.bot, cd.nextLevel.tables[left:right])
if len(cd.bot) == 0 {
cd.nextRange = kr
} else {
cd.nextRange = getKeyRange(cd.bot)
}
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
return false
}
return true
}
func (s *levelsController) fillTables(cd *compactDef) bool {
cd.lockLevels()
defer cd.unlockLevels()
tbls := make([]*table.Table, len(cd.thisLevel.tables))
copy(tbls, cd.thisLevel.tables)
if len(tbls) == 0 {
return false
}
// Find the biggest table, and compact that first.
// TODO: Try other table picking strategies.
sort.Slice(tbls, func(i, j int) bool {
return tbls[i].Size() > tbls[j].Size()
})
for _, t := range tbls {
cd.thisSize = t.Size()
cd.thisRange = keyRange{
// We pick all the versions of the smallest and the biggest key.
left: y.KeyWithTs(y.ParseKey(t.Smallest()), math.MaxUint64),
// Note that version zero would be the rightmost key.
right: y.KeyWithTs(y.ParseKey(t.Biggest()), 0),
}
if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) {
continue
}
cd.top = []*table.Table{t}
left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange)
cd.bot = make([]*table.Table, right-left)
copy(cd.bot, cd.nextLevel.tables[left:right])
if len(cd.bot) == 0 {
cd.bot = []*table.Table{}
cd.nextRange = cd.thisRange
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
continue
}
return true
}
cd.nextRange = getKeyRange(cd.bot)
if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) {
continue
} | continue
}
return true
}
return false
}
func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) {
timeStart := time.Now()
thisLevel := cd.thisLevel
nextLevel := cd.nextLevel
// Table should never be moved directly between levels, always be rewritten to allow discarding
// invalid versions.
newTables, decr, err := s.compactBuildTables(l, cd)
if err != nil {
return err
}
defer func() {
// Only assign to err, if it's not already nil.
if decErr := decr(); err == nil {
err = decErr
}
}()
changeSet := buildChangeSet(&cd, newTables)
// We write to the manifest _before_ we delete files (and after we created files)
if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
return err
}
// See comment earlier in this function about the ordering of these ops, and the order in which
// we access levels when reading.
if err := nextLevel.replaceTables(cd.bot, newTables); err != nil {
return err
}
if err := thisLevel.deleteTables(cd.top); err != nil {
return err
}
// Note: For level 0, while doCompact is running, it is possible that new tables are added.
// However, the tables are added only to the end, so it is ok to just delete the first table.
s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n",
thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot),
len(newTables), time.Since(timeStart))
return nil
}
var errFillTables = errors.New("Unable to fill tables")
// doCompact picks some table on level l and compacts it away to the next level.
func (s *levelsController) doCompact(p compactionPriority) error {
l := p.level
y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check.
cd := compactDef{
elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"),
thisLevel: s.levels[l],
nextLevel: s.levels[l+1],
dropPrefix: p.dropPrefix,
}
cd.elog.SetMaxEvents(100)
defer cd.elog.Finish()
s.kv.opt.Infof("Got compaction priority: %+v", p)
// While picking tables to be compacted, both levels' tables are expected to
// remain unchanged.
if l == 0 {
if !s.fillTablesL0(&cd) {
return errFillTables
}
} else {
if !s.fillTables(&cd) {
return errFillTables
}
}
defer s.cstatus.delete(cd) // Remove the ranges from compaction status.
s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level)
s.cstatus.toLog(cd.elog)
if err := s.runCompactDef(l, cd); err != nil {
// This compaction couldn't be done successfully.
s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd)
return err
}
s.cstatus.toLog(cd.elog)
s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level)
return nil
}
func (s *levelsController) addLevel0Table(t *table.Table) error {
// We update the manifest _before_ the table becomes part of a levelHandler, because at that
// point it could get used in some compaction. This ensures the manifest file gets updated in
// the proper order. (That means this update happens before that of some compaction which
// deletes the table.)
err := s.kv.manifest.addChanges([]*pb.ManifestChange{
newCreateChange(t.ID(), 0, t.Checksum),
})
if err != nil {
return err
}
for !s.levels[0].tryAddLevel0Table(t) {
// Stall. Make sure all levels are healthy before we unstall.
var timeStart time.Time
{
s.elog.Printf("STALLED STALLED STALLED: %v\n", time.Since(lastUnstalled))
s.cstatus.RLock()
for i := 0; i < s.kv.opt.MaxLevels; i++ {
s.elog.Printf("level=%d. Status=%s Size=%d\n",
i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize())
}
s.cstatus.RUnlock()
timeStart = time.Now()
}
// Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we
// will very quickly fill up level 0 again and if the compaction strategy favors level 0,
// then level 1 is going to super full.
for i := 0; ; i++ {
// Passing 0 for delSize to compactable means we're treating incomplete compactions as
// not having finished -- we wait for them to finish. Also, it's crucial this behavior
// replicates pickCompactLevels' behavior in computing compactability in order to
// guarantee progress.
if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) {
break
}
time.Sleep(10 * time.Millisecond)
if i%100 == 0 {
prios := s.pickCompactLevels()
s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios)
i = 0
}
}
{
s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart))
lastUnstalled = time.Now()
}
}
return nil
}
func (s *levelsController) close() error {
err := s.cleanupLevels()
return errors.Wrap(err, "levelsController.Close")
}
// get returns the found value if any. If not found, we return nil.
func (s *levelsController) get(key []byte, maxVs *y.ValueStruct) (y.ValueStruct, error) {
// It's important that we iterate the levels from 0 on upward. The reason is, if we iterated
// in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could
// read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do
// parallelize this, we will need to call the h.RLock() function by increasing order of level
// number.)
version := y.ParseTs(key)
for _, h := range s.levels {
vs, err := h.get(key) // Calls h.RLock() and h.RUnlock().
if err != nil {
return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key)
}
if vs.Value == nil && vs.Meta == 0 {
continue
}
if maxVs == nil || vs.Version == version {
return vs, nil
}
if maxVs.Version < vs.Version {
*maxVs = vs
}
}
if maxVs != nil {
return *maxVs, nil
}
return y.ValueStruct{}, nil
}
func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator {
for i := len(th) - 1; i >= 0; i-- {
// This will increment the reference of the table handler.
out = append(out, th[i].NewIterator(reversed))
}
return out
}
// appendIterators appends iterators to an array of iterators, for merging.
// Note: This obtains references for the table handlers. Remember to close these iterators.
func (s *levelsController) appendIterators(
iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
// Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing
// data when there's a compaction.
for _, level := range s.levels {
iters = level.appendIterators(iters, opt)
}
return iters
}
// TableInfo represents the information about a table.
type TableInfo struct {
ID uint64
Level int
Left []byte
Right []byte
KeyCount uint64 // Number of keys in the table
}
func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) {
for _, l := range s.levels {
l.RLock()
for _, t := range l.tables {
var count uint64
if withKeysCount {
it := t.NewIterator(false)
for it.Rewind(); it.Valid(); it.Next() {
count++
}
}
info := TableInfo{
ID: t.ID(),
Level: l.level,
Left: t.Smallest(),
Right: t.Biggest(),
KeyCount: count,
}
result = append(result, info)
}
l.RUnlock()
}
sort.Slice(result, func(i, j int) bool {
if result[i].Level != result[j].Level {
return result[i].Level < result[j].Level
}
return result[i].ID < result[j].ID
})
return
} | if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { | random_line_split |
levels.go | /*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bytes"
"fmt"
"math"
"math/rand"
"os"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/trace"
"github.com/dgraph-io/badger/pb"
"github.com/dgraph-io/badger/table"
"github.com/dgraph-io/badger/y"
"github.com/pkg/errors"
)
type levelsController struct {
nextFileID uint64 // Atomic
elog trace.EventLog
// The following are initialized once and const.
levels []*levelHandler
kv *DB
cstatus compactStatus
}
var (
// This is for getting timings between stalls.
lastUnstalled time.Time
)
// revertToManifest checks that all necessary table files exist and removes all table files not
// referenced by the manifest. idMap is a set of table file id's that were read from the directory
// listing.
func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {
// 1. Check all files in manifest exist.
for id := range mf.Tables {
if _, ok := idMap[id]; !ok {
return fmt.Errorf("file does not exist for table %d", id)
}
}
// 2. Delete files that shouldn't exist.
for id := range idMap {
if _, ok := mf.Tables[id]; !ok {
kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id)
filename := table.NewFilename(id, kv.opt.Dir)
if err := os.Remove(filename); err != nil {
return y.Wrapf(err, "While removing table %d", id)
}
}
}
return nil
}
func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) {
y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables)
s := &levelsController{
kv: db,
elog: db.elog,
levels: make([]*levelHandler, db.opt.MaxLevels),
}
s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels)
for i := 0; i < db.opt.MaxLevels; i++ {
s.levels[i] = newLevelHandler(db, i)
if i == 0 {
// Do nothing.
} else if i == 1 {
// Level 1 probably shouldn't be too much bigger than level 0.
s.levels[i].maxTotalSize = db.opt.LevelOneSize
} else {
s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier)
}
s.cstatus.levels[i] = new(levelCompactStatus)
}
// Compare manifest against directory, check for existent/non-existent files, and remove.
if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil {
return nil, err
}
// Some files may be deleted. Let's reload.
var flags uint32 = y.Sync
if db.opt.ReadOnly {
flags |= y.ReadOnly
}
var mu sync.Mutex
tables := make([][]*table.Table, db.opt.MaxLevels)
var maxFileID uint64
// We found that using 3 goroutines allows disk throughput to be utilized to its max.
// Disk utilization is the main thing we should focus on, while trying to read the data. That's
// the one factor that remains constant between HDD and SSD.
throttle := y.NewThrottle(3)
start := time.Now()
var numOpened int32
tick := time.NewTicker(3 * time.Second)
defer tick.Stop()
for fileID, tf := range mf.Tables {
fname := table.NewFilename(fileID, db.opt.Dir)
select {
case <-tick.C:
db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened),
len(mf.Tables), time.Since(start).Round(time.Millisecond))
default:
}
if err := throttle.Do(); err != nil {
closeAllTables(tables)
return nil, err
}
if fileID > maxFileID {
maxFileID = fileID
}
go func(fname string, tf TableManifest) {
var rerr error
defer func() {
throttle.Done(rerr)
atomic.AddInt32(&numOpened, 1)
}()
fd, err := y.OpenExistingFile(fname, flags)
if err != nil {
rerr = errors.Wrapf(err, "Opening file: %q", fname)
return
}
t, err := table.OpenTable(fd, db.opt.TableLoadingMode, tf.Checksum)
if err != nil {
if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") {
db.opt.Errorf(err.Error())
db.opt.Errorf("Ignoring table %s", fd.Name())
// Do not set rerr. We will continue without this table.
} else {
rerr = errors.Wrapf(err, "Opening table: %q", fname)
}
return
}
mu.Lock()
tables[tf.Level] = append(tables[tf.Level], t)
mu.Unlock()
}(fname, tf)
}
if err := throttle.Finish(); err != nil {
closeAllTables(tables)
return nil, err
}
db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened),
time.Since(start).Round(time.Millisecond))
s.nextFileID = maxFileID + 1
for i, tbls := range tables {
s.levels[i].initTables(tbls)
}
// Make sure key ranges do not overlap etc.
if err := s.validate(); err != nil {
_ = s.cleanupLevels()
return nil, errors.Wrap(err, "Level validation")
}
// Sync directory (because we have at least removed some files, or previously created the
// manifest file).
if err := syncDir(db.opt.Dir); err != nil {
_ = s.close()
return nil, err
}
return s, nil
}
// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef()
// because that would delete the underlying files.) We ignore errors, which is OK because tables
// are read-only.
func closeAllTables(tables [][]*table.Table) {
for _, tableSlice := range tables {
for _, table := range tableSlice {
_ = table.Close()
}
}
}
func (s *levelsController) cleanupLevels() error {
var firstErr error
for _, l := range s.levels {
if err := l.close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// dropTree picks all tables from all levels, creates a manifest changeset,
// applies it, and then decrements the refs of these tables, which would result
// in their deletion.
func (s *levelsController) dropTree() (int, error) {
// First pick all tables, so we can create a manifest changelog.
var all []*table.Table
for _, l := range s.levels {
l.RLock()
all = append(all, l.tables...)
l.RUnlock()
}
if len(all) == 0 {
return 0, nil
}
// Generate the manifest changes.
changes := []*pb.ManifestChange{}
for _, table := range all {
changes = append(changes, newDeleteChange(table.ID()))
}
changeSet := pb.ManifestChangeSet{Changes: changes}
if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
return 0, err
}
// Now that manifest has been successfully written, we can delete the tables.
for _, l := range s.levels {
l.Lock()
l.totalSize = 0
l.tables = l.tables[:0]
l.Unlock()
}
for _, table := range all {
if err := table.DecrRef(); err != nil {
return 0, err
}
}
return len(all), nil
}
// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the
// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the
// provided prefix. For Li->Li compactions, it picks up the tables which would have the prefix. The
// tables who only have keys with this prefix are quickly dropped. The ones which have other keys
// are run through MergeIterator and compacted to create new tables. All the mechanisms of
// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow.
func (s *levelsController) dropPrefix(prefix []byte) error {
opt := s.kv.opt
for _, l := range s.levels {
l.RLock()
if l.level == 0 {
size := len(l.tables)
l.RUnlock()
if size > 0 {
cp := compactionPriority{
level: 0,
score: 1.74,
// A unique number greater than 1.0 does two things. Helps identify this
// function in logs, and forces a compaction.
dropPrefix: prefix,
}
if err := s.doCompact(cp); err != nil {
opt.Warningf("While compacting level 0: %v", err)
return nil
}
}
continue
}
var tables []*table.Table
for _, table := range l.tables {
var absent bool
switch {
case bytes.HasPrefix(table.Smallest(), prefix):
case bytes.HasPrefix(table.Biggest(), prefix):
case bytes.Compare(prefix, table.Smallest()) > 0 &&
bytes.Compare(prefix, table.Biggest()) < 0:
default:
absent = true
}
if !absent {
tables = append(tables, table)
}
}
l.RUnlock()
if len(tables) == 0 {
continue
}
cd := compactDef{
elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"),
thisLevel: l,
nextLevel: l,
top: []*table.Table{},
bot: tables,
dropPrefix: prefix,
}
if err := s.runCompactDef(l.level, cd); err != nil {
opt.Warningf("While running compact def: %+v. Error: %v", cd, err)
return err
}
}
return nil
}
func (s *levelsController) startCompact(lc *y.Closer) {
n := s.kv.opt.NumCompactors
lc.AddRunning(n - 1)
for i := 0; i < n; i++ {
go s.runWorker(lc)
}
}
func (s *levelsController) runWorker(lc *y.Closer) {
defer lc.Done()
randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond)
select {
case <-randomDelay.C:
case <-lc.HasBeenClosed():
randomDelay.Stop()
return
}
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
// Can add a done channel or other stuff.
case <-ticker.C:
prios := s.pickCompactLevels()
for _, p := range prios {
if err := s.doCompact(p); err == nil {
break
} else if err == errFillTables {
// pass
} else {
s.kv.opt.Warningf("While running doCompact: %v\n", err)
}
}
case <-lc.HasBeenClosed():
return
}
}
}
// Returns true if level zero may be compacted, without accounting for compactions that already
// might be happening.
func (s *levelsController) isLevel0Compactable() bool {
return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables
}
// Returns true if the non-zero level may be compacted. delSize provides the size of the tables
// which are currently being compacted so that we treat them as already having started being
// compacted (because they have been, yet their size is already counted in getTotalSize).
func (l *levelHandler) isCompactable(delSize int64) bool {
return l.getTotalSize()-delSize >= l.maxTotalSize
}
type compactionPriority struct {
level int
score float64
dropPrefix []byte
}
// pickCompactLevel determines which level to compact.
// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction
func (s *levelsController) pickCompactLevels() (prios []compactionPriority) {
// This function must use identical criteria for guaranteeing compaction's progress that
// addLevel0Table uses.
// cstatus is checked to see if level 0's tables are already being compacted
if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() {
pri := compactionPriority{
level: 0,
score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables),
}
prios = append(prios, pri)
}
for i, l := range s.levels[1:] {
// Don't consider those tables that are already being compacted right now.
delSize := s.cstatus.delSize(i + 1)
if l.isCompactable(delSize) {
pri := compactionPriority{
level: i + 1,
score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize),
}
prios = append(prios, pri)
}
}
sort.Slice(prios, func(i, j int) bool {
return prios[i].score > prios[j].score
})
return prios
}
// compactBuildTables merge topTables and botTables to form a list of new tables.
func (s *levelsController) compactBuildTables(
lev int, cd compactDef) ([]*table.Table, func() error, error) {
topTables := cd.top
botTables := cd.bot
var hasOverlap bool
{
kr := getKeyRange(cd.top)
for i, lh := range s.levels {
if i <= lev { // Skip upper levels.
continue
}
lh.RLock()
left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
lh.RUnlock()
if right-left > 0 {
hasOverlap = true
break
}
}
}
// Try to collect stats so that we can inform value log about GC. That would help us find which
// value log file should be GCed.
discardStats := make(map[uint32]int64)
updateStats := func(vs y.ValueStruct) {
if vs.Meta&bitValuePointer > 0 {
var vp valuePointer
vp.Decode(vs.Value)
discardStats[vp.Fid] += int64(vp.Len)
}
}
// Create iterators across all the tables involved first.
var iters []y.Iterator
if lev == 0 {
iters = appendIteratorsReversed(iters, topTables, false)
} else if len(topTables) > 0 {
y.AssertTrue(len(topTables) == 1)
iters = []y.Iterator{topTables[0].NewIterator(false)}
}
// Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap.
var valid []*table.Table
for _, table := range botTables {
if len(cd.dropPrefix) > 0 &&
bytes.HasPrefix(table.Smallest(), cd.dropPrefix) &&
bytes.HasPrefix(table.Biggest(), cd.dropPrefix) {
// All the keys in this table have the dropPrefix. So, this table does not need to be
// in the iterator and can be dropped immediately.
continue
}
valid = append(valid, table)
}
iters = append(iters, table.NewConcatIterator(valid, false))
it := y.NewMergeIterator(iters, false)
defer it.Close() // Important to close the iterator to do ref counting.
it.Rewind()
// Pick a discard ts, so we can discard versions below this ts. We should
// never discard any versions starting from above this timestamp, because
// that would affect the snapshot view guarantee provided by transactions.
discardTs := s.kv.orc.discardAtOrBelow()
// Start generating new tables.
type newTableResult struct {
table *table.Table
err error
}
resultCh := make(chan newTableResult)
var numBuilds, numVersions int
var lastKey, skipKey []byte
for it.Valid() {
timeStart := time.Now()
builder := table.NewTableBuilder()
var numKeys, numSkips uint64
for ; it.Valid(); it.Next() {
// See if we need to skip the prefix.
if len(cd.dropPrefix) > 0 && bytes.HasPrefix(it.Key(), cd.dropPrefix) {
numSkips++
updateStats(it.Value())
continue
}
// See if we need to skip this key.
if len(skipKey) > 0 {
if y.SameKey(it.Key(), skipKey) {
numSkips++
updateStats(it.Value())
continue
} else {
skipKey = skipKey[:0]
}
}
if !y.SameKey(it.Key(), lastKey) {
if builder.ReachedCapacity(s.kv.opt.MaxTableSize) {
// Only break if we are on a different key, and have reached capacity. We want
// to ensure that all versions of the key are stored in the same sstable, and
// not divided across multiple tables at the same level.
break
}
lastKey = y.SafeCopy(lastKey, it.Key())
numVersions = 0
}
vs := it.Value()
version := y.ParseTs(it.Key())
// Do not discard entries inserted by merge operator. These entries will be
// discarded once they're merged
if version <= discardTs && vs.Meta&bitMergeEntry == 0 {
// Keep track of the number of versions encountered for this key. Only consider the
// versions which are below the minReadTs, otherwise, we might end up discarding the
// only valid version for a running transaction.
numVersions++
lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0
if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) ||
numVersions > s.kv.opt.NumVersionsToKeep ||
lastValidVersion {
// If this version of the key is deleted or expired, skip all the rest of the
// versions. Ensure that we're only removing versions below readTs.
skipKey = y.SafeCopy(skipKey, it.Key())
if lastValidVersion {
// Add this key. We have set skipKey, so the following key versions
// would be skipped.
} else if hasOverlap {
// If this key range has overlap with lower levels, then keep the deletion
// marker with the latest version, discarding the rest. We have set skipKey,
// so the following key versions would be skipped.
} else {
// If no overlap, we can skip all the versions, by continuing here.
numSkips++
updateStats(vs)
continue // Skip adding this key.
}
}
}
numKeys++
y.Check(builder.Add(it.Key(), it.Value()))
}
// It was true that it.Valid() at least once in the loop above, which means we
// called Add() at least once, and builder is not Empty().
s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v",
numKeys, numSkips, time.Since(timeStart))
if !builder.Empty() {
numBuilds++
fileID := s.reserveFileID()
go func(builder *table.Builder) {
defer builder.Close()
fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true)
if err != nil {
resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)}
return
}
if _, err := fd.Write(builder.Finish()); err != nil {
resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)}
return
}
tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil)
// decrRef is added below.
resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())}
}(builder)
}
}
newTables := make([]*table.Table, 0, 20)
// Wait for all table builders to finish.
var firstErr error
for x := 0; x < numBuilds; x++ {
res := <-resultCh
newTables = append(newTables, res.table)
if firstErr == nil {
firstErr = res.err
}
}
if firstErr == nil {
// Ensure created files' directory entries are visible. We don't mind the extra latency
// from not doing this ASAP after all file creation has finished because this is a
// background operation.
firstErr = syncDir(s.kv.opt.Dir)
}
if firstErr != nil {
// An error happened. Delete all the newly created table files (by calling DecrRef
// -- we're the only holders of a ref).
for j := 0; j < numBuilds; j++ {
if newTables[j] != nil {
_ = newTables[j].DecrRef()
}
}
errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd)
return nil, nil, errorReturn
}
sort.Slice(newTables, func(i, j int) bool {
return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0
})
if err := s.kv.vlog.updateDiscardStats(discardStats); err != nil {
return nil, nil, errors.Wrap(err, "failed to update discard stats")
}
s.kv.opt.Debugf("Discard stats: %v", discardStats)
return newTables, func() error { return decrRefs(newTables) }, nil
}
func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet {
changes := []*pb.ManifestChange{}
for _, table := range newTables {
changes = append(changes,
newCreateChange(table.ID(), cd.nextLevel.level, table.Checksum))
}
for _, table := range cd.top {
changes = append(changes, newDeleteChange(table.ID()))
}
for _, table := range cd.bot {
changes = append(changes, newDeleteChange(table.ID()))
}
return pb.ManifestChangeSet{Changes: changes}
}
type compactDef struct {
elog trace.Trace
thisLevel *levelHandler
nextLevel *levelHandler
top []*table.Table
bot []*table.Table
thisRange keyRange
nextRange keyRange
thisSize int64
dropPrefix []byte
}
func (cd *compactDef) lockLevels() {
cd.thisLevel.RLock()
cd.nextLevel.RLock()
}
func (cd *compactDef) unlockLevels() {
cd.nextLevel.RUnlock()
cd.thisLevel.RUnlock()
}
func (s *levelsController) fillTablesL0(cd *compactDef) bool {
cd.lockLevels()
defer cd.unlockLevels()
cd.top = make([]*table.Table, len(cd.thisLevel.tables))
copy(cd.top, cd.thisLevel.tables)
if len(cd.top) == 0 {
return false
}
cd.thisRange = infRange
kr := getKeyRange(cd.top)
left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr)
cd.bot = make([]*table.Table, right-left)
copy(cd.bot, cd.nextLevel.tables[left:right])
if len(cd.bot) == 0 {
cd.nextRange = kr
} else {
cd.nextRange = getKeyRange(cd.bot)
}
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
return false
}
return true
}
func (s *levelsController) fillTables(cd *compactDef) bool {
cd.lockLevels()
defer cd.unlockLevels()
tbls := make([]*table.Table, len(cd.thisLevel.tables))
copy(tbls, cd.thisLevel.tables)
if len(tbls) == 0 {
return false
}
// Find the biggest table, and compact that first.
// TODO: Try other table picking strategies.
sort.Slice(tbls, func(i, j int) bool {
return tbls[i].Size() > tbls[j].Size()
})
for _, t := range tbls {
cd.thisSize = t.Size()
cd.thisRange = keyRange{
// We pick all the versions of the smallest and the biggest key.
left: y.KeyWithTs(y.ParseKey(t.Smallest()), math.MaxUint64),
// Note that version zero would be the rightmost key.
right: y.KeyWithTs(y.ParseKey(t.Biggest()), 0),
}
if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) {
continue
}
cd.top = []*table.Table{t}
left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange)
cd.bot = make([]*table.Table, right-left)
copy(cd.bot, cd.nextLevel.tables[left:right])
if len(cd.bot) == 0 {
cd.bot = []*table.Table{}
cd.nextRange = cd.thisRange
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
continue
}
return true
}
cd.nextRange = getKeyRange(cd.bot)
if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) {
continue
}
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
continue
}
return true
}
return false
}
func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) {
timeStart := time.Now()
thisLevel := cd.thisLevel
nextLevel := cd.nextLevel
// Table should never be moved directly between levels, always be rewritten to allow discarding
// invalid versions.
newTables, decr, err := s.compactBuildTables(l, cd)
if err != nil {
return err
}
defer func() {
// Only assign to err, if it's not already nil.
if decErr := decr(); err == nil {
err = decErr
}
}()
changeSet := buildChangeSet(&cd, newTables)
// We write to the manifest _before_ we delete files (and after we created files)
if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
return err
}
// See comment earlier in this function about the ordering of these ops, and the order in which
// we access levels when reading.
if err := nextLevel.replaceTables(cd.bot, newTables); err != nil {
return err
}
if err := thisLevel.deleteTables(cd.top); err != nil {
return err
}
// Note: For level 0, while doCompact is running, it is possible that new tables are added.
// However, the tables are added only to the end, so it is ok to just delete the first table.
s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n",
thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot),
len(newTables), time.Since(timeStart))
return nil
}
var errFillTables = errors.New("Unable to fill tables")
// doCompact picks some table on level l and compacts it away to the next level.
func (s *levelsController) doCompact(p compactionPriority) error {
l := p.level
y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check.
cd := compactDef{
elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"),
thisLevel: s.levels[l],
nextLevel: s.levels[l+1],
dropPrefix: p.dropPrefix,
}
cd.elog.SetMaxEvents(100)
defer cd.elog.Finish()
s.kv.opt.Infof("Got compaction priority: %+v", p)
// While picking tables to be compacted, both levels' tables are expected to
// remain unchanged.
if l == 0 {
if !s.fillTablesL0(&cd) {
return errFillTables
}
} else {
if !s.fillTables(&cd) {
return errFillTables
}
}
defer s.cstatus.delete(cd) // Remove the ranges from compaction status.
s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level)
s.cstatus.toLog(cd.elog)
if err := s.runCompactDef(l, cd); err != nil {
// This compaction couldn't be done successfully.
s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd)
return err
}
s.cstatus.toLog(cd.elog)
s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level)
return nil
}
func (s *levelsController) addLevel0Table(t *table.Table) error {
// We update the manifest _before_ the table becomes part of a levelHandler, because at that
// point it could get used in some compaction. This ensures the manifest file gets updated in
// the proper order. (That means this update happens before that of some compaction which
// deletes the table.)
err := s.kv.manifest.addChanges([]*pb.ManifestChange{
newCreateChange(t.ID(), 0, t.Checksum),
})
if err != nil {
return err
}
for !s.levels[0].tryAddLevel0Table(t) {
// Stall. Make sure all levels are healthy before we unstall.
var timeStart time.Time
{
s.elog.Printf("STALLED STALLED STALLED: %v\n", time.Since(lastUnstalled))
s.cstatus.RLock()
for i := 0; i < s.kv.opt.MaxLevels; i++ {
s.elog.Printf("level=%d. Status=%s Size=%d\n",
i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize())
}
s.cstatus.RUnlock()
timeStart = time.Now()
}
// Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we
// will very quickly fill up level 0 again and if the compaction strategy favors level 0,
// then level 1 is going to super full.
for i := 0; ; i++ {
// Passing 0 for delSize to compactable means we're treating incomplete compactions as
// not having finished -- we wait for them to finish. Also, it's crucial this behavior
// replicates pickCompactLevels' behavior in computing compactability in order to
// guarantee progress.
if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) {
break
}
time.Sleep(10 * time.Millisecond)
if i%100 == 0 {
prios := s.pickCompactLevels()
s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios)
i = 0
}
}
{
s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart))
lastUnstalled = time.Now()
}
}
return nil
}
func (s *levelsController) close() error {
err := s.cleanupLevels()
return errors.Wrap(err, "levelsController.Close")
}
// get returns the found value if any. If not found, we return nil.
func (s *levelsController) get(key []byte, maxVs *y.ValueStruct) (y.ValueStruct, error) {
// It's important that we iterate the levels from 0 on upward. The reason is, if we iterated
// in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could
// read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do
// parallelize this, we will need to call the h.RLock() function by increasing order of level
// number.)
version := y.ParseTs(key)
for _, h := range s.levels {
vs, err := h.get(key) // Calls h.RLock() and h.RUnlock().
if err != nil {
return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key)
}
if vs.Value == nil && vs.Meta == 0 {
continue
}
if maxVs == nil || vs.Version == version {
return vs, nil
}
if maxVs.Version < vs.Version {
*maxVs = vs
}
}
if maxVs != nil {
return *maxVs, nil
}
return y.ValueStruct{}, nil
}
func | (out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator {
for i := len(th) - 1; i >= 0; i-- {
// This will increment the reference of the table handler.
out = append(out, th[i].NewIterator(reversed))
}
return out
}
// appendIterators appends iterators to an array of iterators, for merging.
// Note: This obtains references for the table handlers. Remember to close these iterators.
func (s *levelsController) appendIterators(
iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
// Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing
// data when there's a compaction.
for _, level := range s.levels {
iters = level.appendIterators(iters, opt)
}
return iters
}
// TableInfo represents the information about a table.
type TableInfo struct {
ID uint64
Level int
Left []byte
Right []byte
KeyCount uint64 // Number of keys in the table
}
func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) {
for _, l := range s.levels {
l.RLock()
for _, t := range l.tables {
var count uint64
if withKeysCount {
it := t.NewIterator(false)
for it.Rewind(); it.Valid(); it.Next() {
count++
}
}
info := TableInfo{
ID: t.ID(),
Level: l.level,
Left: t.Smallest(),
Right: t.Biggest(),
KeyCount: count,
}
result = append(result, info)
}
l.RUnlock()
}
sort.Slice(result, func(i, j int) bool {
if result[i].Level != result[j].Level {
return result[i].Level < result[j].Level
}
return result[i].ID < result[j].ID
})
return
}
| appendIteratorsReversed | identifier_name |
levels.go | /*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bytes"
"fmt"
"math"
"math/rand"
"os"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/trace"
"github.com/dgraph-io/badger/pb"
"github.com/dgraph-io/badger/table"
"github.com/dgraph-io/badger/y"
"github.com/pkg/errors"
)
type levelsController struct {
nextFileID uint64 // Atomic
elog trace.EventLog
// The following are initialized once and const.
levels []*levelHandler
kv *DB
cstatus compactStatus
}
var (
// This is for getting timings between stalls.
lastUnstalled time.Time
)
// revertToManifest checks that all necessary table files exist and removes all table files not
// referenced by the manifest. idMap is a set of table file id's that were read from the directory
// listing.
func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {
// 1. Check all files in manifest exist.
for id := range mf.Tables {
if _, ok := idMap[id]; !ok {
return fmt.Errorf("file does not exist for table %d", id)
}
}
// 2. Delete files that shouldn't exist.
for id := range idMap {
if _, ok := mf.Tables[id]; !ok {
kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id)
filename := table.NewFilename(id, kv.opt.Dir)
if err := os.Remove(filename); err != nil {
return y.Wrapf(err, "While removing table %d", id)
}
}
}
return nil
}
func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) {
y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables)
s := &levelsController{
kv: db,
elog: db.elog,
levels: make([]*levelHandler, db.opt.MaxLevels),
}
s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels)
for i := 0; i < db.opt.MaxLevels; i++ {
s.levels[i] = newLevelHandler(db, i)
if i == 0 {
// Do nothing.
} else if i == 1 {
// Level 1 probably shouldn't be too much bigger than level 0.
s.levels[i].maxTotalSize = db.opt.LevelOneSize
} else {
s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier)
}
s.cstatus.levels[i] = new(levelCompactStatus)
}
// Compare manifest against directory, check for existent/non-existent files, and remove.
if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil {
return nil, err
}
// Some files may be deleted. Let's reload.
var flags uint32 = y.Sync
if db.opt.ReadOnly {
flags |= y.ReadOnly
}
var mu sync.Mutex
tables := make([][]*table.Table, db.opt.MaxLevels)
var maxFileID uint64
// We found that using 3 goroutines allows disk throughput to be utilized to its max.
// Disk utilization is the main thing we should focus on, while trying to read the data. That's
// the one factor that remains constant between HDD and SSD.
throttle := y.NewThrottle(3)
start := time.Now()
var numOpened int32
tick := time.NewTicker(3 * time.Second)
defer tick.Stop()
for fileID, tf := range mf.Tables {
fname := table.NewFilename(fileID, db.opt.Dir)
select {
case <-tick.C:
db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened),
len(mf.Tables), time.Since(start).Round(time.Millisecond))
default:
}
if err := throttle.Do(); err != nil {
closeAllTables(tables)
return nil, err
}
if fileID > maxFileID {
maxFileID = fileID
}
go func(fname string, tf TableManifest) {
var rerr error
defer func() {
throttle.Done(rerr)
atomic.AddInt32(&numOpened, 1)
}()
fd, err := y.OpenExistingFile(fname, flags)
if err != nil {
rerr = errors.Wrapf(err, "Opening file: %q", fname)
return
}
t, err := table.OpenTable(fd, db.opt.TableLoadingMode, tf.Checksum)
if err != nil {
if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") {
db.opt.Errorf(err.Error())
db.opt.Errorf("Ignoring table %s", fd.Name())
// Do not set rerr. We will continue without this table.
} else {
rerr = errors.Wrapf(err, "Opening table: %q", fname)
}
return
}
mu.Lock()
tables[tf.Level] = append(tables[tf.Level], t)
mu.Unlock()
}(fname, tf)
}
if err := throttle.Finish(); err != nil {
closeAllTables(tables)
return nil, err
}
db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened),
time.Since(start).Round(time.Millisecond))
s.nextFileID = maxFileID + 1
for i, tbls := range tables {
s.levels[i].initTables(tbls)
}
// Make sure key ranges do not overlap etc.
if err := s.validate(); err != nil {
_ = s.cleanupLevels()
return nil, errors.Wrap(err, "Level validation")
}
// Sync directory (because we have at least removed some files, or previously created the
// manifest file).
if err := syncDir(db.opt.Dir); err != nil {
_ = s.close()
return nil, err
}
return s, nil
}
// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef()
// because that would delete the underlying files.) We ignore errors, which is OK because tables
// are read-only.
func closeAllTables(tables [][]*table.Table) {
for _, tableSlice := range tables {
for _, table := range tableSlice {
_ = table.Close()
}
}
}
func (s *levelsController) cleanupLevels() error {
var firstErr error
for _, l := range s.levels {
if err := l.close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// dropTree picks all tables from all levels, creates a manifest changeset,
// applies it, and then decrements the refs of these tables, which would result
// in their deletion.
func (s *levelsController) dropTree() (int, error) {
// First pick all tables, so we can create a manifest changelog.
var all []*table.Table
for _, l := range s.levels {
l.RLock()
all = append(all, l.tables...)
l.RUnlock()
}
if len(all) == 0 {
return 0, nil
}
// Generate the manifest changes.
changes := []*pb.ManifestChange{}
for _, table := range all {
changes = append(changes, newDeleteChange(table.ID()))
}
changeSet := pb.ManifestChangeSet{Changes: changes}
if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
return 0, err
}
// Now that manifest has been successfully written, we can delete the tables.
for _, l := range s.levels {
l.Lock()
l.totalSize = 0
l.tables = l.tables[:0]
l.Unlock()
}
for _, table := range all {
if err := table.DecrRef(); err != nil {
return 0, err
}
}
return len(all), nil
}
// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the
// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the
// provided prefix. For Li->Li compactions, it picks up the tables which would have the prefix. The
// tables who only have keys with this prefix are quickly dropped. The ones which have other keys
// are run through MergeIterator and compacted to create new tables. All the mechanisms of
// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow.
func (s *levelsController) dropPrefix(prefix []byte) error {
opt := s.kv.opt
for _, l := range s.levels {
l.RLock()
if l.level == 0 {
size := len(l.tables)
l.RUnlock()
if size > 0 {
cp := compactionPriority{
level: 0,
score: 1.74,
// A unique number greater than 1.0 does two things. Helps identify this
// function in logs, and forces a compaction.
dropPrefix: prefix,
}
if err := s.doCompact(cp); err != nil {
opt.Warningf("While compacting level 0: %v", err)
return nil
}
}
continue
}
var tables []*table.Table
for _, table := range l.tables {
var absent bool
switch {
case bytes.HasPrefix(table.Smallest(), prefix):
case bytes.HasPrefix(table.Biggest(), prefix):
case bytes.Compare(prefix, table.Smallest()) > 0 &&
bytes.Compare(prefix, table.Biggest()) < 0:
default:
absent = true
}
if !absent {
tables = append(tables, table)
}
}
l.RUnlock()
if len(tables) == 0 {
continue
}
cd := compactDef{
elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"),
thisLevel: l,
nextLevel: l,
top: []*table.Table{},
bot: tables,
dropPrefix: prefix,
}
if err := s.runCompactDef(l.level, cd); err != nil {
opt.Warningf("While running compact def: %+v. Error: %v", cd, err)
return err
}
}
return nil
}
func (s *levelsController) startCompact(lc *y.Closer) {
n := s.kv.opt.NumCompactors
lc.AddRunning(n - 1)
for i := 0; i < n; i++ {
go s.runWorker(lc)
}
}
func (s *levelsController) runWorker(lc *y.Closer) {
defer lc.Done()
randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond)
select {
case <-randomDelay.C:
case <-lc.HasBeenClosed():
randomDelay.Stop()
return
}
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
// Can add a done channel or other stuff.
case <-ticker.C:
prios := s.pickCompactLevels()
for _, p := range prios {
if err := s.doCompact(p); err == nil {
break
} else if err == errFillTables {
// pass
} else {
s.kv.opt.Warningf("While running doCompact: %v\n", err)
}
}
case <-lc.HasBeenClosed():
return
}
}
}
// Returns true if level zero may be compacted, without accounting for compactions that already
// might be happening.
func (s *levelsController) isLevel0Compactable() bool {
return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables
}
// Returns true if the non-zero level may be compacted. delSize provides the size of the tables
// which are currently being compacted so that we treat them as already having started being
// compacted (because they have been, yet their size is already counted in getTotalSize).
func (l *levelHandler) isCompactable(delSize int64) bool {
return l.getTotalSize()-delSize >= l.maxTotalSize
}
type compactionPriority struct {
level int
score float64
dropPrefix []byte
}
// pickCompactLevel determines which level to compact.
// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction
func (s *levelsController) pickCompactLevels() (prios []compactionPriority) {
// This function must use identical criteria for guaranteeing compaction's progress that
// addLevel0Table uses.
// cstatus is checked to see if level 0's tables are already being compacted
if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() {
pri := compactionPriority{
level: 0,
score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables),
}
prios = append(prios, pri)
}
for i, l := range s.levels[1:] {
// Don't consider those tables that are already being compacted right now.
delSize := s.cstatus.delSize(i + 1)
if l.isCompactable(delSize) {
pri := compactionPriority{
level: i + 1,
score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize),
}
prios = append(prios, pri)
}
}
sort.Slice(prios, func(i, j int) bool {
return prios[i].score > prios[j].score
})
return prios
}
// compactBuildTables merge topTables and botTables to form a list of new tables.
func (s *levelsController) compactBuildTables(
lev int, cd compactDef) ([]*table.Table, func() error, error) {
topTables := cd.top
botTables := cd.bot
var hasOverlap bool
{
kr := getKeyRange(cd.top)
for i, lh := range s.levels {
if i <= lev { // Skip upper levels.
continue
}
lh.RLock()
left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
lh.RUnlock()
if right-left > 0 {
hasOverlap = true
break
}
}
}
// Try to collect stats so that we can inform value log about GC. That would help us find which
// value log file should be GCed.
discardStats := make(map[uint32]int64)
updateStats := func(vs y.ValueStruct) {
if vs.Meta&bitValuePointer > 0 {
var vp valuePointer
vp.Decode(vs.Value)
discardStats[vp.Fid] += int64(vp.Len)
}
}
// Create iterators across all the tables involved first.
var iters []y.Iterator
if lev == 0 {
iters = appendIteratorsReversed(iters, topTables, false)
} else if len(topTables) > 0 {
y.AssertTrue(len(topTables) == 1)
iters = []y.Iterator{topTables[0].NewIterator(false)}
}
// Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap.
var valid []*table.Table
for _, table := range botTables {
if len(cd.dropPrefix) > 0 &&
bytes.HasPrefix(table.Smallest(), cd.dropPrefix) &&
bytes.HasPrefix(table.Biggest(), cd.dropPrefix) {
// All the keys in this table have the dropPrefix. So, this table does not need to be
// in the iterator and can be dropped immediately.
continue
}
valid = append(valid, table)
}
iters = append(iters, table.NewConcatIterator(valid, false))
it := y.NewMergeIterator(iters, false)
defer it.Close() // Important to close the iterator to do ref counting.
it.Rewind()
// Pick a discard ts, so we can discard versions below this ts. We should
// never discard any versions starting from above this timestamp, because
// that would affect the snapshot view guarantee provided by transactions.
discardTs := s.kv.orc.discardAtOrBelow()
// Start generating new tables.
type newTableResult struct {
table *table.Table
err error
}
resultCh := make(chan newTableResult)
var numBuilds, numVersions int
var lastKey, skipKey []byte
for it.Valid() {
timeStart := time.Now()
builder := table.NewTableBuilder()
var numKeys, numSkips uint64
for ; it.Valid(); it.Next() {
// See if we need to skip the prefix.
if len(cd.dropPrefix) > 0 && bytes.HasPrefix(it.Key(), cd.dropPrefix) {
numSkips++
updateStats(it.Value())
continue
}
// See if we need to skip this key.
if len(skipKey) > 0 {
if y.SameKey(it.Key(), skipKey) {
numSkips++
updateStats(it.Value())
continue
} else {
skipKey = skipKey[:0]
}
}
if !y.SameKey(it.Key(), lastKey) {
if builder.ReachedCapacity(s.kv.opt.MaxTableSize) {
// Only break if we are on a different key, and have reached capacity. We want
// to ensure that all versions of the key are stored in the same sstable, and
// not divided across multiple tables at the same level.
break
}
lastKey = y.SafeCopy(lastKey, it.Key())
numVersions = 0
}
vs := it.Value()
version := y.ParseTs(it.Key())
// Do not discard entries inserted by merge operator. These entries will be
// discarded once they're merged
if version <= discardTs && vs.Meta&bitMergeEntry == 0 {
// Keep track of the number of versions encountered for this key. Only consider the
// versions which are below the minReadTs, otherwise, we might end up discarding the
// only valid version for a running transaction.
numVersions++
lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0
if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) ||
numVersions > s.kv.opt.NumVersionsToKeep ||
lastValidVersion {
// If this version of the key is deleted or expired, skip all the rest of the
// versions. Ensure that we're only removing versions below readTs.
skipKey = y.SafeCopy(skipKey, it.Key())
if lastValidVersion {
// Add this key. We have set skipKey, so the following key versions
// would be skipped.
} else if hasOverlap {
// If this key range has overlap with lower levels, then keep the deletion
// marker with the latest version, discarding the rest. We have set skipKey,
// so the following key versions would be skipped.
} else {
// If no overlap, we can skip all the versions, by continuing here.
numSkips++
updateStats(vs)
continue // Skip adding this key.
}
}
}
numKeys++
y.Check(builder.Add(it.Key(), it.Value()))
}
// It was true that it.Valid() at least once in the loop above, which means we
// called Add() at least once, and builder is not Empty().
s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v",
numKeys, numSkips, time.Since(timeStart))
if !builder.Empty() {
numBuilds++
fileID := s.reserveFileID()
go func(builder *table.Builder) {
defer builder.Close()
fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true)
if err != nil {
resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)}
return
}
if _, err := fd.Write(builder.Finish()); err != nil {
resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)}
return
}
tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil)
// decrRef is added below.
resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())}
}(builder)
}
}
newTables := make([]*table.Table, 0, 20)
// Wait for all table builders to finish.
var firstErr error
for x := 0; x < numBuilds; x++ {
res := <-resultCh
newTables = append(newTables, res.table)
if firstErr == nil {
firstErr = res.err
}
}
if firstErr == nil {
// Ensure created files' directory entries are visible. We don't mind the extra latency
// from not doing this ASAP after all file creation has finished because this is a
// background operation.
firstErr = syncDir(s.kv.opt.Dir)
}
if firstErr != nil {
// An error happened. Delete all the newly created table files (by calling DecrRef
// -- we're the only holders of a ref).
for j := 0; j < numBuilds; j++ {
if newTables[j] != nil {
_ = newTables[j].DecrRef()
}
}
errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd)
return nil, nil, errorReturn
}
sort.Slice(newTables, func(i, j int) bool {
return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0
})
if err := s.kv.vlog.updateDiscardStats(discardStats); err != nil {
return nil, nil, errors.Wrap(err, "failed to update discard stats")
}
s.kv.opt.Debugf("Discard stats: %v", discardStats)
return newTables, func() error { return decrRefs(newTables) }, nil
}
func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet {
changes := []*pb.ManifestChange{}
for _, table := range newTables {
changes = append(changes,
newCreateChange(table.ID(), cd.nextLevel.level, table.Checksum))
}
for _, table := range cd.top {
changes = append(changes, newDeleteChange(table.ID()))
}
for _, table := range cd.bot {
changes = append(changes, newDeleteChange(table.ID()))
}
return pb.ManifestChangeSet{Changes: changes}
}
type compactDef struct {
elog trace.Trace
thisLevel *levelHandler
nextLevel *levelHandler
top []*table.Table
bot []*table.Table
thisRange keyRange
nextRange keyRange
thisSize int64
dropPrefix []byte
}
func (cd *compactDef) lockLevels() {
cd.thisLevel.RLock()
cd.nextLevel.RLock()
}
func (cd *compactDef) unlockLevels() {
cd.nextLevel.RUnlock()
cd.thisLevel.RUnlock()
}
func (s *levelsController) fillTablesL0(cd *compactDef) bool {
cd.lockLevels()
defer cd.unlockLevels()
cd.top = make([]*table.Table, len(cd.thisLevel.tables))
copy(cd.top, cd.thisLevel.tables)
if len(cd.top) == 0 {
return false
}
cd.thisRange = infRange
kr := getKeyRange(cd.top)
left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr)
cd.bot = make([]*table.Table, right-left)
copy(cd.bot, cd.nextLevel.tables[left:right])
if len(cd.bot) == 0 {
cd.nextRange = kr
} else {
cd.nextRange = getKeyRange(cd.bot)
}
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
return false
}
return true
}
func (s *levelsController) fillTables(cd *compactDef) bool {
cd.lockLevels()
defer cd.unlockLevels()
tbls := make([]*table.Table, len(cd.thisLevel.tables))
copy(tbls, cd.thisLevel.tables)
if len(tbls) == 0 {
return false
}
// Find the biggest table, and compact that first.
// TODO: Try other table picking strategies.
sort.Slice(tbls, func(i, j int) bool {
return tbls[i].Size() > tbls[j].Size()
})
for _, t := range tbls {
cd.thisSize = t.Size()
cd.thisRange = keyRange{
// We pick all the versions of the smallest and the biggest key.
left: y.KeyWithTs(y.ParseKey(t.Smallest()), math.MaxUint64),
// Note that version zero would be the rightmost key.
right: y.KeyWithTs(y.ParseKey(t.Biggest()), 0),
}
if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) {
continue
}
cd.top = []*table.Table{t}
left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange)
cd.bot = make([]*table.Table, right-left)
copy(cd.bot, cd.nextLevel.tables[left:right])
if len(cd.bot) == 0 {
cd.bot = []*table.Table{}
cd.nextRange = cd.thisRange
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
continue
}
return true
}
cd.nextRange = getKeyRange(cd.bot)
if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) {
continue
}
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
continue
}
return true
}
return false
}
func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) {
timeStart := time.Now()
thisLevel := cd.thisLevel
nextLevel := cd.nextLevel
// Table should never be moved directly between levels, always be rewritten to allow discarding
// invalid versions.
newTables, decr, err := s.compactBuildTables(l, cd)
if err != nil {
return err
}
defer func() {
// Only assign to err, if it's not already nil.
if decErr := decr(); err == nil {
err = decErr
}
}()
changeSet := buildChangeSet(&cd, newTables)
// We write to the manifest _before_ we delete files (and after we created files)
if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
return err
}
// See comment earlier in this function about the ordering of these ops, and the order in which
// we access levels when reading.
if err := nextLevel.replaceTables(cd.bot, newTables); err != nil {
return err
}
if err := thisLevel.deleteTables(cd.top); err != nil {
return err
}
// Note: For level 0, while doCompact is running, it is possible that new tables are added.
// However, the tables are added only to the end, so it is ok to just delete the first table.
s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n",
thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot),
len(newTables), time.Since(timeStart))
return nil
}
var errFillTables = errors.New("Unable to fill tables")
// doCompact picks some table on level l and compacts it away to the next level.
func (s *levelsController) doCompact(p compactionPriority) error {
l := p.level
y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check.
cd := compactDef{
elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"),
thisLevel: s.levels[l],
nextLevel: s.levels[l+1],
dropPrefix: p.dropPrefix,
}
cd.elog.SetMaxEvents(100)
defer cd.elog.Finish()
s.kv.opt.Infof("Got compaction priority: %+v", p)
// While picking tables to be compacted, both levels' tables are expected to
// remain unchanged.
if l == 0 {
if !s.fillTablesL0(&cd) {
return errFillTables
}
} else {
if !s.fillTables(&cd) {
return errFillTables
}
}
defer s.cstatus.delete(cd) // Remove the ranges from compaction status.
s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level)
s.cstatus.toLog(cd.elog)
if err := s.runCompactDef(l, cd); err != nil {
// This compaction couldn't be done successfully.
s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd)
return err
}
s.cstatus.toLog(cd.elog)
s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level)
return nil
}
func (s *levelsController) addLevel0Table(t *table.Table) error {
// We update the manifest _before_ the table becomes part of a levelHandler, because at that
// point it could get used in some compaction. This ensures the manifest file gets updated in
// the proper order. (That means this update happens before that of some compaction which
// deletes the table.)
err := s.kv.manifest.addChanges([]*pb.ManifestChange{
newCreateChange(t.ID(), 0, t.Checksum),
})
if err != nil {
return err
}
for !s.levels[0].tryAddLevel0Table(t) {
// Stall. Make sure all levels are healthy before we unstall.
var timeStart time.Time
{
s.elog.Printf("STALLED STALLED STALLED: %v\n", time.Since(lastUnstalled))
s.cstatus.RLock()
for i := 0; i < s.kv.opt.MaxLevels; i++ {
s.elog.Printf("level=%d. Status=%s Size=%d\n",
i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize())
}
s.cstatus.RUnlock()
timeStart = time.Now()
}
// Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we
// will very quickly fill up level 0 again and if the compaction strategy favors level 0,
// then level 1 is going to super full.
for i := 0; ; i++ {
// Passing 0 for delSize to compactable means we're treating incomplete compactions as
// not having finished -- we wait for them to finish. Also, it's crucial this behavior
// replicates pickCompactLevels' behavior in computing compactability in order to
// guarantee progress.
if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) {
break
}
time.Sleep(10 * time.Millisecond)
if i%100 == 0 {
prios := s.pickCompactLevels()
s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios)
i = 0
}
}
{
s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart))
lastUnstalled = time.Now()
}
}
return nil
}
func (s *levelsController) close() error {
err := s.cleanupLevels()
return errors.Wrap(err, "levelsController.Close")
}
// get returns the found value if any. If not found, we return nil.
func (s *levelsController) get(key []byte, maxVs *y.ValueStruct) (y.ValueStruct, error) {
// It's important that we iterate the levels from 0 on upward. The reason is, if we iterated
// in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could
// read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do
// parallelize this, we will need to call the h.RLock() function by increasing order of level
// number.)
version := y.ParseTs(key)
for _, h := range s.levels {
vs, err := h.get(key) // Calls h.RLock() and h.RUnlock().
if err != nil {
return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key)
}
if vs.Value == nil && vs.Meta == 0 {
continue
}
if maxVs == nil || vs.Version == version {
return vs, nil
}
if maxVs.Version < vs.Version {
*maxVs = vs
}
}
if maxVs != nil |
return y.ValueStruct{}, nil
}
func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator {
for i := len(th) - 1; i >= 0; i-- {
// This will increment the reference of the table handler.
out = append(out, th[i].NewIterator(reversed))
}
return out
}
// appendIterators appends iterators to an array of iterators, for merging.
// Note: This obtains references for the table handlers. Remember to close these iterators.
func (s *levelsController) appendIterators(
iters []y.Iterator, opt *IteratorOptions) []y.Iterator {
// Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing
// data when there's a compaction.
for _, level := range s.levels {
iters = level.appendIterators(iters, opt)
}
return iters
}
// TableInfo represents the information about a table.
type TableInfo struct {
ID uint64
Level int
Left []byte
Right []byte
KeyCount uint64 // Number of keys in the table
}
func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) {
for _, l := range s.levels {
l.RLock()
for _, t := range l.tables {
var count uint64
if withKeysCount {
it := t.NewIterator(false)
for it.Rewind(); it.Valid(); it.Next() {
count++
}
}
info := TableInfo{
ID: t.ID(),
Level: l.level,
Left: t.Smallest(),
Right: t.Biggest(),
KeyCount: count,
}
result = append(result, info)
}
l.RUnlock()
}
sort.Slice(result, func(i, j int) bool {
if result[i].Level != result[j].Level {
return result[i].Level < result[j].Level
}
return result[i].ID < result[j].ID
})
return
}
| {
return *maxVs, nil
} | conditional_block |
levels.go | /*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bytes"
"fmt"
"math"
"math/rand"
"os"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/trace"
"github.com/dgraph-io/badger/pb"
"github.com/dgraph-io/badger/table"
"github.com/dgraph-io/badger/y"
"github.com/pkg/errors"
)
type levelsController struct {
nextFileID uint64 // Atomic
elog trace.EventLog
// The following are initialized once and const.
levels []*levelHandler
kv *DB
cstatus compactStatus
}
var (
// This is for getting timings between stalls.
lastUnstalled time.Time
)
// revertToManifest checks that all necessary table files exist and removes all table files not
// referenced by the manifest. idMap is a set of table file id's that were read from the directory
// listing.
func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error {
// 1. Check all files in manifest exist.
for id := range mf.Tables {
if _, ok := idMap[id]; !ok {
return fmt.Errorf("file does not exist for table %d", id)
}
}
// 2. Delete files that shouldn't exist.
for id := range idMap {
if _, ok := mf.Tables[id]; !ok {
kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id)
filename := table.NewFilename(id, kv.opt.Dir)
if err := os.Remove(filename); err != nil {
return y.Wrapf(err, "While removing table %d", id)
}
}
}
return nil
}
func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) {
y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables)
s := &levelsController{
kv: db,
elog: db.elog,
levels: make([]*levelHandler, db.opt.MaxLevels),
}
s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels)
for i := 0; i < db.opt.MaxLevels; i++ {
s.levels[i] = newLevelHandler(db, i)
if i == 0 {
// Do nothing.
} else if i == 1 {
// Level 1 probably shouldn't be too much bigger than level 0.
s.levels[i].maxTotalSize = db.opt.LevelOneSize
} else {
s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier)
}
s.cstatus.levels[i] = new(levelCompactStatus)
}
// Compare manifest against directory, check for existent/non-existent files, and remove.
if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil {
return nil, err
}
// Some files may be deleted. Let's reload.
var flags uint32 = y.Sync
if db.opt.ReadOnly {
flags |= y.ReadOnly
}
var mu sync.Mutex
tables := make([][]*table.Table, db.opt.MaxLevels)
var maxFileID uint64
// We found that using 3 goroutines allows disk throughput to be utilized to its max.
// Disk utilization is the main thing we should focus on, while trying to read the data. That's
// the one factor that remains constant between HDD and SSD.
throttle := y.NewThrottle(3)
start := time.Now()
var numOpened int32
tick := time.NewTicker(3 * time.Second)
defer tick.Stop()
for fileID, tf := range mf.Tables {
fname := table.NewFilename(fileID, db.opt.Dir)
select {
case <-tick.C:
db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened),
len(mf.Tables), time.Since(start).Round(time.Millisecond))
default:
}
if err := throttle.Do(); err != nil {
closeAllTables(tables)
return nil, err
}
if fileID > maxFileID {
maxFileID = fileID
}
go func(fname string, tf TableManifest) {
var rerr error
defer func() {
throttle.Done(rerr)
atomic.AddInt32(&numOpened, 1)
}()
fd, err := y.OpenExistingFile(fname, flags)
if err != nil {
rerr = errors.Wrapf(err, "Opening file: %q", fname)
return
}
t, err := table.OpenTable(fd, db.opt.TableLoadingMode, tf.Checksum)
if err != nil {
if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") {
db.opt.Errorf(err.Error())
db.opt.Errorf("Ignoring table %s", fd.Name())
// Do not set rerr. We will continue without this table.
} else {
rerr = errors.Wrapf(err, "Opening table: %q", fname)
}
return
}
mu.Lock()
tables[tf.Level] = append(tables[tf.Level], t)
mu.Unlock()
}(fname, tf)
}
if err := throttle.Finish(); err != nil {
closeAllTables(tables)
return nil, err
}
db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened),
time.Since(start).Round(time.Millisecond))
s.nextFileID = maxFileID + 1
for i, tbls := range tables {
s.levels[i].initTables(tbls)
}
// Make sure key ranges do not overlap etc.
if err := s.validate(); err != nil {
_ = s.cleanupLevels()
return nil, errors.Wrap(err, "Level validation")
}
// Sync directory (because we have at least removed some files, or previously created the
// manifest file).
if err := syncDir(db.opt.Dir); err != nil {
_ = s.close()
return nil, err
}
return s, nil
}
// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef()
// because that would delete the underlying files.) We ignore errors, which is OK because tables
// are read-only.
func closeAllTables(tables [][]*table.Table) {
for _, tableSlice := range tables {
for _, table := range tableSlice {
_ = table.Close()
}
}
}
func (s *levelsController) cleanupLevels() error {
var firstErr error
for _, l := range s.levels {
if err := l.close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// dropTree picks all tables from all levels, creates a manifest changeset,
// applies it, and then decrements the refs of these tables, which would result
// in their deletion.
func (s *levelsController) dropTree() (int, error) {
// First pick all tables, so we can create a manifest changelog.
var all []*table.Table
for _, l := range s.levels {
l.RLock()
all = append(all, l.tables...)
l.RUnlock()
}
if len(all) == 0 {
return 0, nil
}
// Generate the manifest changes.
changes := []*pb.ManifestChange{}
for _, table := range all {
changes = append(changes, newDeleteChange(table.ID()))
}
changeSet := pb.ManifestChangeSet{Changes: changes}
if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
return 0, err
}
// Now that manifest has been successfully written, we can delete the tables.
for _, l := range s.levels {
l.Lock()
l.totalSize = 0
l.tables = l.tables[:0]
l.Unlock()
}
for _, table := range all {
if err := table.DecrRef(); err != nil {
return 0, err
}
}
return len(all), nil
}
// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the
// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the
// provided prefix. For Li->Li compactions, it picks up the tables which would have the prefix. The
// tables who only have keys with this prefix are quickly dropped. The ones which have other keys
// are run through MergeIterator and compacted to create new tables. All the mechanisms of
// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow.
func (s *levelsController) dropPrefix(prefix []byte) error {
opt := s.kv.opt
for _, l := range s.levels {
l.RLock()
if l.level == 0 {
size := len(l.tables)
l.RUnlock()
if size > 0 {
cp := compactionPriority{
level: 0,
score: 1.74,
// A unique number greater than 1.0 does two things. Helps identify this
// function in logs, and forces a compaction.
dropPrefix: prefix,
}
if err := s.doCompact(cp); err != nil {
opt.Warningf("While compacting level 0: %v", err)
return nil
}
}
continue
}
var tables []*table.Table
for _, table := range l.tables {
var absent bool
switch {
case bytes.HasPrefix(table.Smallest(), prefix):
case bytes.HasPrefix(table.Biggest(), prefix):
case bytes.Compare(prefix, table.Smallest()) > 0 &&
bytes.Compare(prefix, table.Biggest()) < 0:
default:
absent = true
}
if !absent {
tables = append(tables, table)
}
}
l.RUnlock()
if len(tables) == 0 {
continue
}
cd := compactDef{
elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"),
thisLevel: l,
nextLevel: l,
top: []*table.Table{},
bot: tables,
dropPrefix: prefix,
}
if err := s.runCompactDef(l.level, cd); err != nil {
opt.Warningf("While running compact def: %+v. Error: %v", cd, err)
return err
}
}
return nil
}
func (s *levelsController) startCompact(lc *y.Closer) {
n := s.kv.opt.NumCompactors
lc.AddRunning(n - 1)
for i := 0; i < n; i++ {
go s.runWorker(lc)
}
}
func (s *levelsController) runWorker(lc *y.Closer) {
defer lc.Done()
randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond)
select {
case <-randomDelay.C:
case <-lc.HasBeenClosed():
randomDelay.Stop()
return
}
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
// Can add a done channel or other stuff.
case <-ticker.C:
prios := s.pickCompactLevels()
for _, p := range prios {
if err := s.doCompact(p); err == nil {
break
} else if err == errFillTables {
// pass
} else {
s.kv.opt.Warningf("While running doCompact: %v\n", err)
}
}
case <-lc.HasBeenClosed():
return
}
}
}
// Returns true if level zero may be compacted, without accounting for compactions that already
// might be happening.
func (s *levelsController) isLevel0Compactable() bool {
return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables
}
// Returns true if the non-zero level may be compacted. delSize provides the size of the tables
// which are currently being compacted so that we treat them as already having started being
// compacted (because they have been, yet their size is already counted in getTotalSize).
func (l *levelHandler) isCompactable(delSize int64) bool {
return l.getTotalSize()-delSize >= l.maxTotalSize
}
type compactionPriority struct {
level int
score float64
dropPrefix []byte
}
// pickCompactLevel determines which level to compact.
// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction
func (s *levelsController) pickCompactLevels() (prios []compactionPriority) {
// This function must use identical criteria for guaranteeing compaction's progress that
// addLevel0Table uses.
// cstatus is checked to see if level 0's tables are already being compacted
if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() {
pri := compactionPriority{
level: 0,
score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables),
}
prios = append(prios, pri)
}
for i, l := range s.levels[1:] {
// Don't consider those tables that are already being compacted right now.
delSize := s.cstatus.delSize(i + 1)
if l.isCompactable(delSize) {
pri := compactionPriority{
level: i + 1,
score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize),
}
prios = append(prios, pri)
}
}
sort.Slice(prios, func(i, j int) bool {
return prios[i].score > prios[j].score
})
return prios
}
// compactBuildTables merge topTables and botTables to form a list of new tables.
func (s *levelsController) compactBuildTables(
lev int, cd compactDef) ([]*table.Table, func() error, error) {
topTables := cd.top
botTables := cd.bot
var hasOverlap bool
{
kr := getKeyRange(cd.top)
for i, lh := range s.levels {
if i <= lev { // Skip upper levels.
continue
}
lh.RLock()
left, right := lh.overlappingTables(levelHandlerRLocked{}, kr)
lh.RUnlock()
if right-left > 0 {
hasOverlap = true
break
}
}
}
// Try to collect stats so that we can inform value log about GC. That would help us find which
// value log file should be GCed.
discardStats := make(map[uint32]int64)
updateStats := func(vs y.ValueStruct) {
if vs.Meta&bitValuePointer > 0 {
var vp valuePointer
vp.Decode(vs.Value)
discardStats[vp.Fid] += int64(vp.Len)
}
}
// Create iterators across all the tables involved first.
var iters []y.Iterator
if lev == 0 {
iters = appendIteratorsReversed(iters, topTables, false)
} else if len(topTables) > 0 {
y.AssertTrue(len(topTables) == 1)
iters = []y.Iterator{topTables[0].NewIterator(false)}
}
// Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap.
var valid []*table.Table
for _, table := range botTables {
if len(cd.dropPrefix) > 0 &&
bytes.HasPrefix(table.Smallest(), cd.dropPrefix) &&
bytes.HasPrefix(table.Biggest(), cd.dropPrefix) {
// All the keys in this table have the dropPrefix. So, this table does not need to be
// in the iterator and can be dropped immediately.
continue
}
valid = append(valid, table)
}
iters = append(iters, table.NewConcatIterator(valid, false))
it := y.NewMergeIterator(iters, false)
defer it.Close() // Important to close the iterator to do ref counting.
it.Rewind()
// Pick a discard ts, so we can discard versions below this ts. We should
// never discard any versions starting from above this timestamp, because
// that would affect the snapshot view guarantee provided by transactions.
discardTs := s.kv.orc.discardAtOrBelow()
// Start generating new tables.
type newTableResult struct {
table *table.Table
err error
}
resultCh := make(chan newTableResult)
var numBuilds, numVersions int
var lastKey, skipKey []byte
for it.Valid() {
timeStart := time.Now()
builder := table.NewTableBuilder()
var numKeys, numSkips uint64
for ; it.Valid(); it.Next() {
// See if we need to skip the prefix.
if len(cd.dropPrefix) > 0 && bytes.HasPrefix(it.Key(), cd.dropPrefix) {
numSkips++
updateStats(it.Value())
continue
}
// See if we need to skip this key.
if len(skipKey) > 0 {
if y.SameKey(it.Key(), skipKey) {
numSkips++
updateStats(it.Value())
continue
} else {
skipKey = skipKey[:0]
}
}
if !y.SameKey(it.Key(), lastKey) {
if builder.ReachedCapacity(s.kv.opt.MaxTableSize) {
// Only break if we are on a different key, and have reached capacity. We want
// to ensure that all versions of the key are stored in the same sstable, and
// not divided across multiple tables at the same level.
break
}
lastKey = y.SafeCopy(lastKey, it.Key())
numVersions = 0
}
vs := it.Value()
version := y.ParseTs(it.Key())
// Do not discard entries inserted by merge operator. These entries will be
// discarded once they're merged
if version <= discardTs && vs.Meta&bitMergeEntry == 0 {
// Keep track of the number of versions encountered for this key. Only consider the
// versions which are below the minReadTs, otherwise, we might end up discarding the
// only valid version for a running transaction.
numVersions++
lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0
if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) ||
numVersions > s.kv.opt.NumVersionsToKeep ||
lastValidVersion {
// If this version of the key is deleted or expired, skip all the rest of the
// versions. Ensure that we're only removing versions below readTs.
skipKey = y.SafeCopy(skipKey, it.Key())
if lastValidVersion {
// Add this key. We have set skipKey, so the following key versions
// would be skipped.
} else if hasOverlap {
// If this key range has overlap with lower levels, then keep the deletion
// marker with the latest version, discarding the rest. We have set skipKey,
// so the following key versions would be skipped.
} else {
// If no overlap, we can skip all the versions, by continuing here.
numSkips++
updateStats(vs)
continue // Skip adding this key.
}
}
}
numKeys++
y.Check(builder.Add(it.Key(), it.Value()))
}
// It was true that it.Valid() at least once in the loop above, which means we
// called Add() at least once, and builder is not Empty().
s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v",
numKeys, numSkips, time.Since(timeStart))
if !builder.Empty() {
numBuilds++
fileID := s.reserveFileID()
go func(builder *table.Builder) {
defer builder.Close()
fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true)
if err != nil {
resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)}
return
}
if _, err := fd.Write(builder.Finish()); err != nil {
resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)}
return
}
tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil)
// decrRef is added below.
resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())}
}(builder)
}
}
newTables := make([]*table.Table, 0, 20)
// Wait for all table builders to finish.
var firstErr error
for x := 0; x < numBuilds; x++ {
res := <-resultCh
newTables = append(newTables, res.table)
if firstErr == nil {
firstErr = res.err
}
}
if firstErr == nil {
// Ensure created files' directory entries are visible. We don't mind the extra latency
// from not doing this ASAP after all file creation has finished because this is a
// background operation.
firstErr = syncDir(s.kv.opt.Dir)
}
if firstErr != nil {
// An error happened. Delete all the newly created table files (by calling DecrRef
// -- we're the only holders of a ref).
for j := 0; j < numBuilds; j++ {
if newTables[j] != nil {
_ = newTables[j].DecrRef()
}
}
errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd)
return nil, nil, errorReturn
}
sort.Slice(newTables, func(i, j int) bool {
return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0
})
if err := s.kv.vlog.updateDiscardStats(discardStats); err != nil {
return nil, nil, errors.Wrap(err, "failed to update discard stats")
}
s.kv.opt.Debugf("Discard stats: %v", discardStats)
return newTables, func() error { return decrRefs(newTables) }, nil
}
func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet {
changes := []*pb.ManifestChange{}
for _, table := range newTables {
changes = append(changes,
newCreateChange(table.ID(), cd.nextLevel.level, table.Checksum))
}
for _, table := range cd.top {
changes = append(changes, newDeleteChange(table.ID()))
}
for _, table := range cd.bot {
changes = append(changes, newDeleteChange(table.ID()))
}
return pb.ManifestChangeSet{Changes: changes}
}
type compactDef struct {
elog trace.Trace
thisLevel *levelHandler
nextLevel *levelHandler
top []*table.Table
bot []*table.Table
thisRange keyRange
nextRange keyRange
thisSize int64
dropPrefix []byte
}
func (cd *compactDef) lockLevels() {
cd.thisLevel.RLock()
cd.nextLevel.RLock()
}
func (cd *compactDef) unlockLevels() {
cd.nextLevel.RUnlock()
cd.thisLevel.RUnlock()
}
func (s *levelsController) fillTablesL0(cd *compactDef) bool {
cd.lockLevels()
defer cd.unlockLevels()
cd.top = make([]*table.Table, len(cd.thisLevel.tables))
copy(cd.top, cd.thisLevel.tables)
if len(cd.top) == 0 {
return false
}
cd.thisRange = infRange
kr := getKeyRange(cd.top)
left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr)
cd.bot = make([]*table.Table, right-left)
copy(cd.bot, cd.nextLevel.tables[left:right])
if len(cd.bot) == 0 {
cd.nextRange = kr
} else {
cd.nextRange = getKeyRange(cd.bot)
}
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
return false
}
return true
}
func (s *levelsController) fillTables(cd *compactDef) bool {
cd.lockLevels()
defer cd.unlockLevels()
tbls := make([]*table.Table, len(cd.thisLevel.tables))
copy(tbls, cd.thisLevel.tables)
if len(tbls) == 0 {
return false
}
// Find the biggest table, and compact that first.
// TODO: Try other table picking strategies.
sort.Slice(tbls, func(i, j int) bool {
return tbls[i].Size() > tbls[j].Size()
})
for _, t := range tbls {
cd.thisSize = t.Size()
cd.thisRange = keyRange{
// We pick all the versions of the smallest and the biggest key.
left: y.KeyWithTs(y.ParseKey(t.Smallest()), math.MaxUint64),
// Note that version zero would be the rightmost key.
right: y.KeyWithTs(y.ParseKey(t.Biggest()), 0),
}
if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) {
continue
}
cd.top = []*table.Table{t}
left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange)
cd.bot = make([]*table.Table, right-left)
copy(cd.bot, cd.nextLevel.tables[left:right])
if len(cd.bot) == 0 {
cd.bot = []*table.Table{}
cd.nextRange = cd.thisRange
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
continue
}
return true
}
cd.nextRange = getKeyRange(cd.bot)
if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) {
continue
}
if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) {
continue
}
return true
}
return false
}
func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) {
timeStart := time.Now()
thisLevel := cd.thisLevel
nextLevel := cd.nextLevel
// Table should never be moved directly between levels, always be rewritten to allow discarding
// invalid versions.
newTables, decr, err := s.compactBuildTables(l, cd)
if err != nil {
return err
}
defer func() {
// Only assign to err, if it's not already nil.
if decErr := decr(); err == nil {
err = decErr
}
}()
changeSet := buildChangeSet(&cd, newTables)
// We write to the manifest _before_ we delete files (and after we created files)
if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil {
return err
}
// See comment earlier in this function about the ordering of these ops, and the order in which
// we access levels when reading.
if err := nextLevel.replaceTables(cd.bot, newTables); err != nil {
return err
}
if err := thisLevel.deleteTables(cd.top); err != nil {
return err
}
// Note: For level 0, while doCompact is running, it is possible that new tables are added.
// However, the tables are added only to the end, so it is ok to just delete the first table.
s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n",
thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot),
len(newTables), time.Since(timeStart))
return nil
}
var errFillTables = errors.New("Unable to fill tables")
// doCompact picks some table on level l and compacts it away to the next level.
func (s *levelsController) doCompact(p compactionPriority) error {
l := p.level
y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check.
cd := compactDef{
elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"),
thisLevel: s.levels[l],
nextLevel: s.levels[l+1],
dropPrefix: p.dropPrefix,
}
cd.elog.SetMaxEvents(100)
defer cd.elog.Finish()
s.kv.opt.Infof("Got compaction priority: %+v", p)
// While picking tables to be compacted, both levels' tables are expected to
// remain unchanged.
if l == 0 {
if !s.fillTablesL0(&cd) {
return errFillTables
}
} else {
if !s.fillTables(&cd) {
return errFillTables
}
}
defer s.cstatus.delete(cd) // Remove the ranges from compaction status.
s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level)
s.cstatus.toLog(cd.elog)
if err := s.runCompactDef(l, cd); err != nil {
// This compaction couldn't be done successfully.
s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd)
return err
}
s.cstatus.toLog(cd.elog)
s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level)
return nil
}
func (s *levelsController) addLevel0Table(t *table.Table) error {
// We update the manifest _before_ the table becomes part of a levelHandler, because at that
// point it could get used in some compaction. This ensures the manifest file gets updated in
// the proper order. (That means this update happens before that of some compaction which
// deletes the table.)
err := s.kv.manifest.addChanges([]*pb.ManifestChange{
newCreateChange(t.ID(), 0, t.Checksum),
})
if err != nil {
return err
}
for !s.levels[0].tryAddLevel0Table(t) {
// Stall. Make sure all levels are healthy before we unstall.
var timeStart time.Time
{
s.elog.Printf("STALLED STALLED STALLED: %v\n", time.Since(lastUnstalled))
s.cstatus.RLock()
for i := 0; i < s.kv.opt.MaxLevels; i++ {
s.elog.Printf("level=%d. Status=%s Size=%d\n",
i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize())
}
s.cstatus.RUnlock()
timeStart = time.Now()
}
// Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we
// will very quickly fill up level 0 again and if the compaction strategy favors level 0,
// then level 1 is going to super full.
for i := 0; ; i++ {
// Passing 0 for delSize to compactable means we're treating incomplete compactions as
// not having finished -- we wait for them to finish. Also, it's crucial this behavior
// replicates pickCompactLevels' behavior in computing compactability in order to
// guarantee progress.
if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) {
break
}
time.Sleep(10 * time.Millisecond)
if i%100 == 0 {
prios := s.pickCompactLevels()
s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios)
i = 0
}
}
{
s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart))
lastUnstalled = time.Now()
}
}
return nil
}
func (s *levelsController) close() error {
err := s.cleanupLevels()
return errors.Wrap(err, "levelsController.Close")
}
// get returns the found value if any. If not found, we return nil.
func (s *levelsController) get(key []byte, maxVs *y.ValueStruct) (y.ValueStruct, error) {
// It's important that we iterate the levels from 0 on upward. The reason is, if we iterated
// in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could
// read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do
// parallelize this, we will need to call the h.RLock() function by increasing order of level
// number.)
version := y.ParseTs(key)
for _, h := range s.levels {
vs, err := h.get(key) // Calls h.RLock() and h.RUnlock().
if err != nil {
return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key)
}
if vs.Value == nil && vs.Meta == 0 {
continue
}
if maxVs == nil || vs.Version == version {
return vs, nil
}
if maxVs.Version < vs.Version {
*maxVs = vs
}
}
if maxVs != nil {
return *maxVs, nil
}
return y.ValueStruct{}, nil
}
func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator {
for i := len(th) - 1; i >= 0; i-- {
// This will increment the reference of the table handler.
out = append(out, th[i].NewIterator(reversed))
}
return out
}
// appendIterators appends iterators to an array of iterators, for merging.
// Note: This obtains references for the table handlers. Remember to close these iterators.
func (s *levelsController) appendIterators(
iters []y.Iterator, opt *IteratorOptions) []y.Iterator |
// TableInfo represents the information about a table.
type TableInfo struct {
ID uint64
Level int
Left []byte
Right []byte
KeyCount uint64 // Number of keys in the table
}
func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) {
for _, l := range s.levels {
l.RLock()
for _, t := range l.tables {
var count uint64
if withKeysCount {
it := t.NewIterator(false)
for it.Rewind(); it.Valid(); it.Next() {
count++
}
}
info := TableInfo{
ID: t.ID(),
Level: l.level,
Left: t.Smallest(),
Right: t.Biggest(),
KeyCount: count,
}
result = append(result, info)
}
l.RUnlock()
}
sort.Slice(result, func(i, j int) bool {
if result[i].Level != result[j].Level {
return result[i].Level < result[j].Level
}
return result[i].ID < result[j].ID
})
return
}
| {
// Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing
// data when there's a compaction.
for _, level := range s.levels {
iters = level.appendIterators(iters, opt)
}
return iters
} | identifier_body |
DataHost.py | from WMCore.REST.Auth import authz_match
from WMCore.REST.Server import RESTEntity, restcall
from WMCore.REST.Error import MissingObject, InvalidParameter
from WMCore.REST.Validation import validate_strlist, \
_validate_one, _validate_all
from WMCore.REST.Tools import tools
from Overview.IPInfo import IPInfo, IPResolver, HostInfo
from Overview.Debug import debug
from threading import Thread, Condition
from collections import namedtuple
from netaddr import IPAddress
import cjson, re, cherrypy, time, random
RXIP = re.compile(r"^[.0-9]+$")
RXHOST = re.compile(r"^(?:[-a-z0-9]+\.)+[a-z]{2,5}$")
Value = namedtuple("Value", ["expires", "data"])
Task = namedtuple("Task", ["kind", "hosts", "reply"])
def _check_ip(argname, val):
if not isinstance(val, str) or not RXIP.match(val):
raise InvalidParameter("Incorrect '%s' parameter" % argname)
try:
val = IPAddress(val)
except:
raise InvalidParameter("Incorrect '%s' parameter" % argname)
return val
def validate_iplist(argname, param, safe):
"""Validates that an argument is an array of strings, each of which
is a valid IP address.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which is convertible to an `IPAddress`
object. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ip)
def validate_ip(argname, param, safe, optional = False):
"""Validates that an argument is a valid IP address.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string convertible to an `IPAddress` object. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_ip, optional)
class Reply:
result = None
kind = None
error = None
signal = None
submitted = False
finished = False
pending = None
until = 0
def _ipinfo(self, i):
return { "ip": str(i.ip), "cidr": str(i.cidr),
"domain": i.domain, "hostname": i.hostname,
"cidrhost": i.cidrhost, "wildhost": i.wildhost,
"asn": {
"cc": i.asn.cc, "asn": i.asn.asn, "country": i.asn.country,
"rir": i.asn.rir, "org": i.asn.org, "desc": i.asn.desc,
"date": i.asn.date
},
"geoip": {
"cc": i.geoip.cc, "country": i.geoip.country,
"continent": i.geoip.continent, "region": i.geoip.region,
"city": i.geoip.city, "lat": i.geoip.lat, "lon": i.geoip.long
}
}
def _hostinfo(self, info):
return { "hostname": info.hostname,
"ipaddrs": [self._ipinfo(i) for i in info.ipaddrs.values()],
"dnsinfo": { "CNAME": dict((k, v) for k, v in info.cnames.iteritems() if v),
"A": dict((k, map(str, v)) for k, v in info.addrs.iteritems() if v) },
"names": [x for x in info.all_names],
"addrs": [x for x in info.all_addrs] }
def __call__(self, info, origin, remain):
debug("HOSTDATA", 3, "replied to %s", self)
if self.kind == "name":
if isinstance(info, HostInfo):
if remain:
debug("HOSTDATA", 2,
"host %s: %d out of %d host addresses, waiting for remaining %d",
info.hostname, len(info.ipaddrs), len(info.all_addrs), remain)
else:
assert info.hostname in self.pending
self.pending.remove(info.hostname)
debug("HOSTDATA", 1,
"host %s: all %d addresses resolved, %d requests remain",
info.hostname, len(info.ipaddrs), len(self.pending))
with self.signal:
if not self.result:
self.result = []
self.result.append(self._hostinfo(info))
if not self.pending:
self.signal.notifyAll()
else:
debug("HOSTDATA", 1, "%s: ignoring address update for %s",
(origin and origin.hostname), info.ip)
elif self.kind == "ip":
assert isinstance(info, IPInfo)
assert info.ip in self.pending
assert not remain
self.pending.remove(info.ip)
debug("HOSTDATA", 1, "ip %s: address resolved, %d requests remain",
info.ip, len(self.pending))
with self.signal:
if not self.result:
self.result = []
self.result.append(self._ipinfo(info))
if not self.pending:
self.signal.notifyAll()
else:
assert False, "internal error, lookup neither host nor ip"
class HostCache(Thread):
"""Utility to resolve host information."""
_PURGE_INTERVAL = 4*3600
_NUM_SIGS = 8
def __init__(self, statedir):
Thread.__init__(self, name = "HostCache")
self._ip2i = IPResolver(cachedir = statedir, maxtime=15)
self._cv = Condition()
self._stopme = False
self._requests = []
self._last_purge = time.time()
self._signals = map(lambda x: Condition(), xrange(0, self._NUM_SIGS))
cherrypy.engine.subscribe('start', self.start)
cherrypy.engine.subscribe('stop', self.stop, priority=100)
def _purge(self):
now = time.time()
debug("HOSTDATA", 1, "purging address resolver")
self._last_purge = time.time()
self._ip2i.purge()
def statistics(self):
with self._cv:
return self._ip2i.statistics()
def reset_statistics(self):
with self._cv:
self._ip2i.reset_statistics()
def purge(self):
with self._cv:
self._purge()
def stop(self):
debug("HOSTDATA", 1, "requesting to stop resolved thread")
with self._cv:
self._stopme = True
self._cv.notifyAll()
def lookup(self, kind, hosts, maxwait=30):
"""
Lookup information either by IP address or host name.
:arg str kind: "ip" or "name"
:arg list hosts: list of host name string, ip address or a real name
:arg float maxwait: maximum time in seconds to wait for a result.
"""
reply = Reply()
reply.kind = kind
reply.until = time.time() + maxwait
reply.signal = random.choice(self._signals)
reply.pending = set(hosts)
with self._cv:
self._requests.append(Task(kind, hosts, reply))
self._cv.notifyAll()
with reply.signal:
while True:
if self._stopme:
raise RuntimeError("server stopped")
elif reply.error:
raise reply.error
elif not reply.pending:
reply.finished = True
return reply.result
else:
reply.signal.wait()
def | (self):
with self._cv:
while not self._stopme:
npending = 0
ncurreq = len(self._requests)
# Insert any new requests. If they fail, remember the error.
for r in self._requests:
if not r.reply.submitted:
debug("HOSTDATA", 1, "submitting request: %s %s", r.kind, r.hosts)
r.reply.submitted = True
try:
self._ip2i.submit(r.hosts, kind=r.kind, callback=r.reply)
except Exception, e:
r.reply.error = e
# Pump any pending lookups for up to .25 seconds. Note that this
# will wait only as long as needed, and will quit immediately
# if there is no work at all. It's not unusual we need to wait
# longer than this for final results; see the check further on.
try:
self._cv.release()
npending = self._ip2i.process(.25)
finally:
self._cv.acquire()
# Post-process requests. Remove fully completed, expired and
# failed lookups from the request queue.
nmodified = 0
now = time.time()
for r in self._requests[:]:
rr = r.reply
if rr.finished:
debug("HOSTDATA", 2, "request completed: %s %s", r.kind, r.hosts)
self._requests.remove(r)
nmodified += 1
elif rr.submitted and rr.until < now:
debug("HOSTDATA", 1, "request has expired: %s %s", r.kind, r.hosts)
self._requests.remove(r)
with rr.signal:
rr.error = RuntimeError("maximum wait time exhausted")
rr.signal.notifyAll()
nmodified += 1
elif rr.submitted and rr.error:
debug("HOSTDATA", 1, "request failed: %s %s", r.kind, r.hosts)
self._requests.remove(r)
with rr.signal:
rr.signal.notifyAll()
nmodified += 1
# Wait to be notified, but only if we don't already have work to do.
skipwait = (self._stopme or npending or nmodified
or len(self._requests) != ncurreq)
debug("HOSTDATA", 2, ("wait for signal, %d pending, %d requests"
" now vs. %d before, %d modified: %s"),
npending, len(self._requests), ncurreq, nmodified,
(skipwait and "skipping unnecessary wait") or "waiting")
if not skipwait:
if now - self._last_purge > self._PURGE_INTERVAL:
self._purge()
self._cv.wait((self._requests and 0.25) or None)
debug("HOSTDATA", 2, "wait done")
debug("HOSTDATA", 1, "server thread stopped")
class HostData(RESTEntity):
"""REST entity object for Host information."""
def __init__(self, app, api, config, mount):
RESTEntity.__init__(self, app, api, config, mount)
self._cache = HostCache(app.statedir)
def validate(self, apiobj, method, api, param, safe):
if method == "GET":
if not len(param.args) or param.args[0] not in ("ip", "name", "stats"):
raise InvalidParameter("Missing or wrong ip/host category")
safe.kwargs["kind"] = kind = param.args.pop(0)
if kind == "ip":
validate_iplist("host", param, safe)
elif kind == "name":
validate_strlist("host", param, safe, RXHOST)
elif kind == "stats":
authz_match(role=["Global Admin"], group=["global"])
safe.kwargs["host"] = []
elif method == "POST":
authz_match(role=["Global Admin"], group=["global"])
if not len(param.args) or param.args[0] not in ("stats", "purge"):
raise InvalidParameter("Invalid operation")
safe.kwargs["operation"] = param.args.pop(0)
def _statistics(self):
stats = self._cache.statistics()
cherrypy.request.rest_generate_preamble = \
{ "columns": ["count", "response", "code"] }
return [[stats[key], key[1], key[0]]
for key in sorted(stats.keys())]
@restcall
@tools.expires(secs=12*3600)
def get(self, kind, host):
if kind == "stats":
return self._statistics()
else:
return self._cache.lookup(kind, host)
@restcall
def post(self, operation):
if operation == "purge":
self._cache.purge()
return ["ok"]
else:
stats = self._statistics()
self._cache.reset_statistics()
return stats
| run | identifier_name |
DataHost.py | from WMCore.REST.Auth import authz_match
from WMCore.REST.Server import RESTEntity, restcall
from WMCore.REST.Error import MissingObject, InvalidParameter
from WMCore.REST.Validation import validate_strlist, \
_validate_one, _validate_all
from WMCore.REST.Tools import tools
from Overview.IPInfo import IPInfo, IPResolver, HostInfo
from Overview.Debug import debug
from threading import Thread, Condition
from collections import namedtuple
from netaddr import IPAddress
import cjson, re, cherrypy, time, random
RXIP = re.compile(r"^[.0-9]+$")
RXHOST = re.compile(r"^(?:[-a-z0-9]+\.)+[a-z]{2,5}$")
Value = namedtuple("Value", ["expires", "data"])
Task = namedtuple("Task", ["kind", "hosts", "reply"])
def _check_ip(argname, val):
if not isinstance(val, str) or not RXIP.match(val):
raise InvalidParameter("Incorrect '%s' parameter" % argname)
try:
val = IPAddress(val)
except:
raise InvalidParameter("Incorrect '%s' parameter" % argname)
return val
def validate_iplist(argname, param, safe):
"""Validates that an argument is an array of strings, each of which
is a valid IP address.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which is convertible to an `IPAddress`
object. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ip)
def validate_ip(argname, param, safe, optional = False):
"""Validates that an argument is a valid IP address.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string convertible to an `IPAddress` object. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_ip, optional)
class Reply:
result = None
kind = None
error = None
signal = None
submitted = False
finished = False
pending = None
until = 0
def _ipinfo(self, i):
return { "ip": str(i.ip), "cidr": str(i.cidr),
"domain": i.domain, "hostname": i.hostname,
"cidrhost": i.cidrhost, "wildhost": i.wildhost,
"asn": {
"cc": i.asn.cc, "asn": i.asn.asn, "country": i.asn.country,
"rir": i.asn.rir, "org": i.asn.org, "desc": i.asn.desc,
"date": i.asn.date
},
"geoip": {
"cc": i.geoip.cc, "country": i.geoip.country,
"continent": i.geoip.continent, "region": i.geoip.region,
"city": i.geoip.city, "lat": i.geoip.lat, "lon": i.geoip.long
}
}
def _hostinfo(self, info):
return { "hostname": info.hostname,
"ipaddrs": [self._ipinfo(i) for i in info.ipaddrs.values()],
"dnsinfo": { "CNAME": dict((k, v) for k, v in info.cnames.iteritems() if v),
"A": dict((k, map(str, v)) for k, v in info.addrs.iteritems() if v) },
"names": [x for x in info.all_names],
"addrs": [x for x in info.all_addrs] }
def __call__(self, info, origin, remain):
debug("HOSTDATA", 3, "replied to %s", self)
if self.kind == "name":
if isinstance(info, HostInfo):
if remain:
debug("HOSTDATA", 2,
"host %s: %d out of %d host addresses, waiting for remaining %d",
info.hostname, len(info.ipaddrs), len(info.all_addrs), remain)
else:
assert info.hostname in self.pending
self.pending.remove(info.hostname)
debug("HOSTDATA", 1,
"host %s: all %d addresses resolved, %d requests remain",
info.hostname, len(info.ipaddrs), len(self.pending))
with self.signal:
if not self.result:
self.result = []
self.result.append(self._hostinfo(info))
if not self.pending:
self.signal.notifyAll()
else:
debug("HOSTDATA", 1, "%s: ignoring address update for %s",
(origin and origin.hostname), info.ip)
elif self.kind == "ip":
assert isinstance(info, IPInfo)
assert info.ip in self.pending
assert not remain
self.pending.remove(info.ip)
debug("HOSTDATA", 1, "ip %s: address resolved, %d requests remain",
info.ip, len(self.pending))
with self.signal:
if not self.result:
self.result = []
self.result.append(self._ipinfo(info))
if not self.pending:
self.signal.notifyAll()
else:
assert False, "internal error, lookup neither host nor ip"
class HostCache(Thread):
"""Utility to resolve host information."""
_PURGE_INTERVAL = 4*3600
_NUM_SIGS = 8
def __init__(self, statedir):
Thread.__init__(self, name = "HostCache")
self._ip2i = IPResolver(cachedir = statedir, maxtime=15)
self._cv = Condition()
self._stopme = False
self._requests = []
self._last_purge = time.time()
self._signals = map(lambda x: Condition(), xrange(0, self._NUM_SIGS))
cherrypy.engine.subscribe('start', self.start)
cherrypy.engine.subscribe('stop', self.stop, priority=100)
def _purge(self):
now = time.time()
debug("HOSTDATA", 1, "purging address resolver")
self._last_purge = time.time()
self._ip2i.purge()
def statistics(self):
with self._cv:
return self._ip2i.statistics()
def reset_statistics(self):
with self._cv:
self._ip2i.reset_statistics()
def purge(self):
with self._cv:
self._purge()
def stop(self):
debug("HOSTDATA", 1, "requesting to stop resolved thread")
with self._cv:
self._stopme = True
self._cv.notifyAll()
def lookup(self, kind, hosts, maxwait=30):
"""
Lookup information either by IP address or host name. | :arg float maxwait: maximum time in seconds to wait for a result.
"""
reply = Reply()
reply.kind = kind
reply.until = time.time() + maxwait
reply.signal = random.choice(self._signals)
reply.pending = set(hosts)
with self._cv:
self._requests.append(Task(kind, hosts, reply))
self._cv.notifyAll()
with reply.signal:
while True:
if self._stopme:
raise RuntimeError("server stopped")
elif reply.error:
raise reply.error
elif not reply.pending:
reply.finished = True
return reply.result
else:
reply.signal.wait()
def run(self):
with self._cv:
while not self._stopme:
npending = 0
ncurreq = len(self._requests)
# Insert any new requests. If they fail, remember the error.
for r in self._requests:
if not r.reply.submitted:
debug("HOSTDATA", 1, "submitting request: %s %s", r.kind, r.hosts)
r.reply.submitted = True
try:
self._ip2i.submit(r.hosts, kind=r.kind, callback=r.reply)
except Exception, e:
r.reply.error = e
# Pump any pending lookups for up to .25 seconds. Note that this
# will wait only as long as needed, and will quit immediately
# if there is no work at all. It's not unusual we need to wait
# longer than this for final results; see the check further on.
try:
self._cv.release()
npending = self._ip2i.process(.25)
finally:
self._cv.acquire()
# Post-process requests. Remove fully completed, expired and
# failed lookups from the request queue.
nmodified = 0
now = time.time()
for r in self._requests[:]:
rr = r.reply
if rr.finished:
debug("HOSTDATA", 2, "request completed: %s %s", r.kind, r.hosts)
self._requests.remove(r)
nmodified += 1
elif rr.submitted and rr.until < now:
debug("HOSTDATA", 1, "request has expired: %s %s", r.kind, r.hosts)
self._requests.remove(r)
with rr.signal:
rr.error = RuntimeError("maximum wait time exhausted")
rr.signal.notifyAll()
nmodified += 1
elif rr.submitted and rr.error:
debug("HOSTDATA", 1, "request failed: %s %s", r.kind, r.hosts)
self._requests.remove(r)
with rr.signal:
rr.signal.notifyAll()
nmodified += 1
# Wait to be notified, but only if we don't already have work to do.
skipwait = (self._stopme or npending or nmodified
or len(self._requests) != ncurreq)
debug("HOSTDATA", 2, ("wait for signal, %d pending, %d requests"
" now vs. %d before, %d modified: %s"),
npending, len(self._requests), ncurreq, nmodified,
(skipwait and "skipping unnecessary wait") or "waiting")
if not skipwait:
if now - self._last_purge > self._PURGE_INTERVAL:
self._purge()
self._cv.wait((self._requests and 0.25) or None)
debug("HOSTDATA", 2, "wait done")
debug("HOSTDATA", 1, "server thread stopped")
class HostData(RESTEntity):
"""REST entity object for Host information."""
def __init__(self, app, api, config, mount):
RESTEntity.__init__(self, app, api, config, mount)
self._cache = HostCache(app.statedir)
def validate(self, apiobj, method, api, param, safe):
if method == "GET":
if not len(param.args) or param.args[0] not in ("ip", "name", "stats"):
raise InvalidParameter("Missing or wrong ip/host category")
safe.kwargs["kind"] = kind = param.args.pop(0)
if kind == "ip":
validate_iplist("host", param, safe)
elif kind == "name":
validate_strlist("host", param, safe, RXHOST)
elif kind == "stats":
authz_match(role=["Global Admin"], group=["global"])
safe.kwargs["host"] = []
elif method == "POST":
authz_match(role=["Global Admin"], group=["global"])
if not len(param.args) or param.args[0] not in ("stats", "purge"):
raise InvalidParameter("Invalid operation")
safe.kwargs["operation"] = param.args.pop(0)
def _statistics(self):
stats = self._cache.statistics()
cherrypy.request.rest_generate_preamble = \
{ "columns": ["count", "response", "code"] }
return [[stats[key], key[1], key[0]]
for key in sorted(stats.keys())]
@restcall
@tools.expires(secs=12*3600)
def get(self, kind, host):
if kind == "stats":
return self._statistics()
else:
return self._cache.lookup(kind, host)
@restcall
def post(self, operation):
if operation == "purge":
self._cache.purge()
return ["ok"]
else:
stats = self._statistics()
self._cache.reset_statistics()
return stats |
:arg str kind: "ip" or "name"
:arg list hosts: list of host name string, ip address or a real name | random_line_split |
DataHost.py | from WMCore.REST.Auth import authz_match
from WMCore.REST.Server import RESTEntity, restcall
from WMCore.REST.Error import MissingObject, InvalidParameter
from WMCore.REST.Validation import validate_strlist, \
_validate_one, _validate_all
from WMCore.REST.Tools import tools
from Overview.IPInfo import IPInfo, IPResolver, HostInfo
from Overview.Debug import debug
from threading import Thread, Condition
from collections import namedtuple
from netaddr import IPAddress
import cjson, re, cherrypy, time, random
RXIP = re.compile(r"^[.0-9]+$")
RXHOST = re.compile(r"^(?:[-a-z0-9]+\.)+[a-z]{2,5}$")
Value = namedtuple("Value", ["expires", "data"])
Task = namedtuple("Task", ["kind", "hosts", "reply"])
def _check_ip(argname, val):
if not isinstance(val, str) or not RXIP.match(val):
raise InvalidParameter("Incorrect '%s' parameter" % argname)
try:
val = IPAddress(val)
except:
raise InvalidParameter("Incorrect '%s' parameter" % argname)
return val
def validate_iplist(argname, param, safe):
"""Validates that an argument is an array of strings, each of which
is a valid IP address.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which is convertible to an `IPAddress`
object. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ip)
def validate_ip(argname, param, safe, optional = False):
"""Validates that an argument is a valid IP address.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string convertible to an `IPAddress` object. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_ip, optional)
class Reply:
result = None
kind = None
error = None
signal = None
submitted = False
finished = False
pending = None
until = 0
def _ipinfo(self, i):
return { "ip": str(i.ip), "cidr": str(i.cidr),
"domain": i.domain, "hostname": i.hostname,
"cidrhost": i.cidrhost, "wildhost": i.wildhost,
"asn": {
"cc": i.asn.cc, "asn": i.asn.asn, "country": i.asn.country,
"rir": i.asn.rir, "org": i.asn.org, "desc": i.asn.desc,
"date": i.asn.date
},
"geoip": {
"cc": i.geoip.cc, "country": i.geoip.country,
"continent": i.geoip.continent, "region": i.geoip.region,
"city": i.geoip.city, "lat": i.geoip.lat, "lon": i.geoip.long
}
}
def _hostinfo(self, info):
return { "hostname": info.hostname,
"ipaddrs": [self._ipinfo(i) for i in info.ipaddrs.values()],
"dnsinfo": { "CNAME": dict((k, v) for k, v in info.cnames.iteritems() if v),
"A": dict((k, map(str, v)) for k, v in info.addrs.iteritems() if v) },
"names": [x for x in info.all_names],
"addrs": [x for x in info.all_addrs] }
def __call__(self, info, origin, remain):
debug("HOSTDATA", 3, "replied to %s", self)
if self.kind == "name":
if isinstance(info, HostInfo):
if remain:
debug("HOSTDATA", 2,
"host %s: %d out of %d host addresses, waiting for remaining %d",
info.hostname, len(info.ipaddrs), len(info.all_addrs), remain)
else:
assert info.hostname in self.pending
self.pending.remove(info.hostname)
debug("HOSTDATA", 1,
"host %s: all %d addresses resolved, %d requests remain",
info.hostname, len(info.ipaddrs), len(self.pending))
with self.signal:
if not self.result:
self.result = []
self.result.append(self._hostinfo(info))
if not self.pending:
self.signal.notifyAll()
else:
debug("HOSTDATA", 1, "%s: ignoring address update for %s",
(origin and origin.hostname), info.ip)
elif self.kind == "ip":
assert isinstance(info, IPInfo)
assert info.ip in self.pending
assert not remain
self.pending.remove(info.ip)
debug("HOSTDATA", 1, "ip %s: address resolved, %d requests remain",
info.ip, len(self.pending))
with self.signal:
if not self.result:
self.result = []
self.result.append(self._ipinfo(info))
if not self.pending:
self.signal.notifyAll()
else:
assert False, "internal error, lookup neither host nor ip"
class HostCache(Thread):
"""Utility to resolve host information."""
_PURGE_INTERVAL = 4*3600
_NUM_SIGS = 8
def __init__(self, statedir):
Thread.__init__(self, name = "HostCache")
self._ip2i = IPResolver(cachedir = statedir, maxtime=15)
self._cv = Condition()
self._stopme = False
self._requests = []
self._last_purge = time.time()
self._signals = map(lambda x: Condition(), xrange(0, self._NUM_SIGS))
cherrypy.engine.subscribe('start', self.start)
cherrypy.engine.subscribe('stop', self.stop, priority=100)
def _purge(self):
now = time.time()
debug("HOSTDATA", 1, "purging address resolver")
self._last_purge = time.time()
self._ip2i.purge()
def statistics(self):
with self._cv:
return self._ip2i.statistics()
def reset_statistics(self):
with self._cv:
self._ip2i.reset_statistics()
def purge(self):
with self._cv:
self._purge()
def stop(self):
debug("HOSTDATA", 1, "requesting to stop resolved thread")
with self._cv:
self._stopme = True
self._cv.notifyAll()
def lookup(self, kind, hosts, maxwait=30):
"""
Lookup information either by IP address or host name.
:arg str kind: "ip" or "name"
:arg list hosts: list of host name string, ip address or a real name
:arg float maxwait: maximum time in seconds to wait for a result.
"""
reply = Reply()
reply.kind = kind
reply.until = time.time() + maxwait
reply.signal = random.choice(self._signals)
reply.pending = set(hosts)
with self._cv:
self._requests.append(Task(kind, hosts, reply))
self._cv.notifyAll()
with reply.signal:
while True:
if self._stopme:
raise RuntimeError("server stopped")
elif reply.error:
raise reply.error
elif not reply.pending:
reply.finished = True
return reply.result
else:
reply.signal.wait()
def run(self):
with self._cv:
while not self._stopme:
npending = 0
ncurreq = len(self._requests)
# Insert any new requests. If they fail, remember the error.
for r in self._requests:
|
# Pump any pending lookups for up to .25 seconds. Note that this
# will wait only as long as needed, and will quit immediately
# if there is no work at all. It's not unusual we need to wait
# longer than this for final results; see the check further on.
try:
self._cv.release()
npending = self._ip2i.process(.25)
finally:
self._cv.acquire()
# Post-process requests. Remove fully completed, expired and
# failed lookups from the request queue.
nmodified = 0
now = time.time()
for r in self._requests[:]:
rr = r.reply
if rr.finished:
debug("HOSTDATA", 2, "request completed: %s %s", r.kind, r.hosts)
self._requests.remove(r)
nmodified += 1
elif rr.submitted and rr.until < now:
debug("HOSTDATA", 1, "request has expired: %s %s", r.kind, r.hosts)
self._requests.remove(r)
with rr.signal:
rr.error = RuntimeError("maximum wait time exhausted")
rr.signal.notifyAll()
nmodified += 1
elif rr.submitted and rr.error:
debug("HOSTDATA", 1, "request failed: %s %s", r.kind, r.hosts)
self._requests.remove(r)
with rr.signal:
rr.signal.notifyAll()
nmodified += 1
# Wait to be notified, but only if we don't already have work to do.
skipwait = (self._stopme or npending or nmodified
or len(self._requests) != ncurreq)
debug("HOSTDATA", 2, ("wait for signal, %d pending, %d requests"
" now vs. %d before, %d modified: %s"),
npending, len(self._requests), ncurreq, nmodified,
(skipwait and "skipping unnecessary wait") or "waiting")
if not skipwait:
if now - self._last_purge > self._PURGE_INTERVAL:
self._purge()
self._cv.wait((self._requests and 0.25) or None)
debug("HOSTDATA", 2, "wait done")
debug("HOSTDATA", 1, "server thread stopped")
class HostData(RESTEntity):
"""REST entity object for Host information."""
def __init__(self, app, api, config, mount):
RESTEntity.__init__(self, app, api, config, mount)
self._cache = HostCache(app.statedir)
def validate(self, apiobj, method, api, param, safe):
if method == "GET":
if not len(param.args) or param.args[0] not in ("ip", "name", "stats"):
raise InvalidParameter("Missing or wrong ip/host category")
safe.kwargs["kind"] = kind = param.args.pop(0)
if kind == "ip":
validate_iplist("host", param, safe)
elif kind == "name":
validate_strlist("host", param, safe, RXHOST)
elif kind == "stats":
authz_match(role=["Global Admin"], group=["global"])
safe.kwargs["host"] = []
elif method == "POST":
authz_match(role=["Global Admin"], group=["global"])
if not len(param.args) or param.args[0] not in ("stats", "purge"):
raise InvalidParameter("Invalid operation")
safe.kwargs["operation"] = param.args.pop(0)
def _statistics(self):
stats = self._cache.statistics()
cherrypy.request.rest_generate_preamble = \
{ "columns": ["count", "response", "code"] }
return [[stats[key], key[1], key[0]]
for key in sorted(stats.keys())]
@restcall
@tools.expires(secs=12*3600)
def get(self, kind, host):
if kind == "stats":
return self._statistics()
else:
return self._cache.lookup(kind, host)
@restcall
def post(self, operation):
if operation == "purge":
self._cache.purge()
return ["ok"]
else:
stats = self._statistics()
self._cache.reset_statistics()
return stats
| if not r.reply.submitted:
debug("HOSTDATA", 1, "submitting request: %s %s", r.kind, r.hosts)
r.reply.submitted = True
try:
self._ip2i.submit(r.hosts, kind=r.kind, callback=r.reply)
except Exception, e:
r.reply.error = e | conditional_block |
DataHost.py | from WMCore.REST.Auth import authz_match
from WMCore.REST.Server import RESTEntity, restcall
from WMCore.REST.Error import MissingObject, InvalidParameter
from WMCore.REST.Validation import validate_strlist, \
_validate_one, _validate_all
from WMCore.REST.Tools import tools
from Overview.IPInfo import IPInfo, IPResolver, HostInfo
from Overview.Debug import debug
from threading import Thread, Condition
from collections import namedtuple
from netaddr import IPAddress
import cjson, re, cherrypy, time, random
RXIP = re.compile(r"^[.0-9]+$")
RXHOST = re.compile(r"^(?:[-a-z0-9]+\.)+[a-z]{2,5}$")
Value = namedtuple("Value", ["expires", "data"])
Task = namedtuple("Task", ["kind", "hosts", "reply"])
def _check_ip(argname, val):
if not isinstance(val, str) or not RXIP.match(val):
raise InvalidParameter("Incorrect '%s' parameter" % argname)
try:
val = IPAddress(val)
except:
raise InvalidParameter("Incorrect '%s' parameter" % argname)
return val
def validate_iplist(argname, param, safe):
"""Validates that an argument is an array of strings, each of which
is a valid IP address.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which is convertible to an `IPAddress`
object. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ip)
def validate_ip(argname, param, safe, optional = False):
"""Validates that an argument is a valid IP address.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string convertible to an `IPAddress` object. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_ip, optional)
class Reply:
result = None
kind = None
error = None
signal = None
submitted = False
finished = False
pending = None
until = 0
def _ipinfo(self, i):
return { "ip": str(i.ip), "cidr": str(i.cidr),
"domain": i.domain, "hostname": i.hostname,
"cidrhost": i.cidrhost, "wildhost": i.wildhost,
"asn": {
"cc": i.asn.cc, "asn": i.asn.asn, "country": i.asn.country,
"rir": i.asn.rir, "org": i.asn.org, "desc": i.asn.desc,
"date": i.asn.date
},
"geoip": {
"cc": i.geoip.cc, "country": i.geoip.country,
"continent": i.geoip.continent, "region": i.geoip.region,
"city": i.geoip.city, "lat": i.geoip.lat, "lon": i.geoip.long
}
}
def _hostinfo(self, info):
return { "hostname": info.hostname,
"ipaddrs": [self._ipinfo(i) for i in info.ipaddrs.values()],
"dnsinfo": { "CNAME": dict((k, v) for k, v in info.cnames.iteritems() if v),
"A": dict((k, map(str, v)) for k, v in info.addrs.iteritems() if v) },
"names": [x for x in info.all_names],
"addrs": [x for x in info.all_addrs] }
def __call__(self, info, origin, remain):
debug("HOSTDATA", 3, "replied to %s", self)
if self.kind == "name":
if isinstance(info, HostInfo):
if remain:
debug("HOSTDATA", 2,
"host %s: %d out of %d host addresses, waiting for remaining %d",
info.hostname, len(info.ipaddrs), len(info.all_addrs), remain)
else:
assert info.hostname in self.pending
self.pending.remove(info.hostname)
debug("HOSTDATA", 1,
"host %s: all %d addresses resolved, %d requests remain",
info.hostname, len(info.ipaddrs), len(self.pending))
with self.signal:
if not self.result:
self.result = []
self.result.append(self._hostinfo(info))
if not self.pending:
self.signal.notifyAll()
else:
debug("HOSTDATA", 1, "%s: ignoring address update for %s",
(origin and origin.hostname), info.ip)
elif self.kind == "ip":
assert isinstance(info, IPInfo)
assert info.ip in self.pending
assert not remain
self.pending.remove(info.ip)
debug("HOSTDATA", 1, "ip %s: address resolved, %d requests remain",
info.ip, len(self.pending))
with self.signal:
if not self.result:
self.result = []
self.result.append(self._ipinfo(info))
if not self.pending:
self.signal.notifyAll()
else:
assert False, "internal error, lookup neither host nor ip"
class HostCache(Thread):
|
class HostData(RESTEntity):
"""REST entity object for Host information."""
def __init__(self, app, api, config, mount):
RESTEntity.__init__(self, app, api, config, mount)
self._cache = HostCache(app.statedir)
def validate(self, apiobj, method, api, param, safe):
if method == "GET":
if not len(param.args) or param.args[0] not in ("ip", "name", "stats"):
raise InvalidParameter("Missing or wrong ip/host category")
safe.kwargs["kind"] = kind = param.args.pop(0)
if kind == "ip":
validate_iplist("host", param, safe)
elif kind == "name":
validate_strlist("host", param, safe, RXHOST)
elif kind == "stats":
authz_match(role=["Global Admin"], group=["global"])
safe.kwargs["host"] = []
elif method == "POST":
authz_match(role=["Global Admin"], group=["global"])
if not len(param.args) or param.args[0] not in ("stats", "purge"):
raise InvalidParameter("Invalid operation")
safe.kwargs["operation"] = param.args.pop(0)
def _statistics(self):
stats = self._cache.statistics()
cherrypy.request.rest_generate_preamble = \
{ "columns": ["count", "response", "code"] }
return [[stats[key], key[1], key[0]]
for key in sorted(stats.keys())]
@restcall
@tools.expires(secs=12*3600)
def get(self, kind, host):
if kind == "stats":
return self._statistics()
else:
return self._cache.lookup(kind, host)
@restcall
def post(self, operation):
if operation == "purge":
self._cache.purge()
return ["ok"]
else:
stats = self._statistics()
self._cache.reset_statistics()
return stats
| """Utility to resolve host information."""
_PURGE_INTERVAL = 4*3600
_NUM_SIGS = 8
def __init__(self, statedir):
Thread.__init__(self, name = "HostCache")
self._ip2i = IPResolver(cachedir = statedir, maxtime=15)
self._cv = Condition()
self._stopme = False
self._requests = []
self._last_purge = time.time()
self._signals = map(lambda x: Condition(), xrange(0, self._NUM_SIGS))
cherrypy.engine.subscribe('start', self.start)
cherrypy.engine.subscribe('stop', self.stop, priority=100)
def _purge(self):
now = time.time()
debug("HOSTDATA", 1, "purging address resolver")
self._last_purge = time.time()
self._ip2i.purge()
def statistics(self):
with self._cv:
return self._ip2i.statistics()
def reset_statistics(self):
with self._cv:
self._ip2i.reset_statistics()
def purge(self):
with self._cv:
self._purge()
def stop(self):
debug("HOSTDATA", 1, "requesting to stop resolved thread")
with self._cv:
self._stopme = True
self._cv.notifyAll()
def lookup(self, kind, hosts, maxwait=30):
"""
Lookup information either by IP address or host name.
:arg str kind: "ip" or "name"
:arg list hosts: list of host name string, ip address or a real name
:arg float maxwait: maximum time in seconds to wait for a result.
"""
reply = Reply()
reply.kind = kind
reply.until = time.time() + maxwait
reply.signal = random.choice(self._signals)
reply.pending = set(hosts)
with self._cv:
self._requests.append(Task(kind, hosts, reply))
self._cv.notifyAll()
with reply.signal:
while True:
if self._stopme:
raise RuntimeError("server stopped")
elif reply.error:
raise reply.error
elif not reply.pending:
reply.finished = True
return reply.result
else:
reply.signal.wait()
def run(self):
with self._cv:
while not self._stopme:
npending = 0
ncurreq = len(self._requests)
# Insert any new requests. If they fail, remember the error.
for r in self._requests:
if not r.reply.submitted:
debug("HOSTDATA", 1, "submitting request: %s %s", r.kind, r.hosts)
r.reply.submitted = True
try:
self._ip2i.submit(r.hosts, kind=r.kind, callback=r.reply)
except Exception, e:
r.reply.error = e
# Pump any pending lookups for up to .25 seconds. Note that this
# will wait only as long as needed, and will quit immediately
# if there is no work at all. It's not unusual we need to wait
# longer than this for final results; see the check further on.
try:
self._cv.release()
npending = self._ip2i.process(.25)
finally:
self._cv.acquire()
# Post-process requests. Remove fully completed, expired and
# failed lookups from the request queue.
nmodified = 0
now = time.time()
for r in self._requests[:]:
rr = r.reply
if rr.finished:
debug("HOSTDATA", 2, "request completed: %s %s", r.kind, r.hosts)
self._requests.remove(r)
nmodified += 1
elif rr.submitted and rr.until < now:
debug("HOSTDATA", 1, "request has expired: %s %s", r.kind, r.hosts)
self._requests.remove(r)
with rr.signal:
rr.error = RuntimeError("maximum wait time exhausted")
rr.signal.notifyAll()
nmodified += 1
elif rr.submitted and rr.error:
debug("HOSTDATA", 1, "request failed: %s %s", r.kind, r.hosts)
self._requests.remove(r)
with rr.signal:
rr.signal.notifyAll()
nmodified += 1
# Wait to be notified, but only if we don't already have work to do.
skipwait = (self._stopme or npending or nmodified
or len(self._requests) != ncurreq)
debug("HOSTDATA", 2, ("wait for signal, %d pending, %d requests"
" now vs. %d before, %d modified: %s"),
npending, len(self._requests), ncurreq, nmodified,
(skipwait and "skipping unnecessary wait") or "waiting")
if not skipwait:
if now - self._last_purge > self._PURGE_INTERVAL:
self._purge()
self._cv.wait((self._requests and 0.25) or None)
debug("HOSTDATA", 2, "wait done")
debug("HOSTDATA", 1, "server thread stopped") | identifier_body |
datacreator.py | import argparse
import datetime
import os
import random
import sys
from collections import defaultdict
PROJECT_PATH = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-2])
sys.path.append(PROJECT_PATH)
os.environ['DJANGO_SETTINGS_MODULE'] = 'fufufuu.settings'
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.contrib.webdesign import lorem_ipsum
from fufufuu.comment.models import Comment
from fufufuu.blog.models import BlogEntry
from fufufuu.account.models import User
from fufufuu.core.languages import Language
from fufufuu.core.enums import SiteSettingKey
from fufufuu.core.models import SiteSetting
from fufufuu.core.utils import slugify, convert_markdown
from fufufuu.dmca.models import DmcaAccount
from fufufuu.manga.enums import MangaCategory, MangaStatus
from fufufuu.manga.models import Manga, MangaTag, MangaPage
from fufufuu.report.enums import ReportStatus, ReportMangaType
from fufufuu.report.models import ReportManga
from fufufuu.tag.enums import TagType
from fufufuu.tag.models import Tag, TagData, TagAlias
#-------------------------------------------------------------------------------
CONFIGURATION = {
'default': {
'BLOG': 30,
'COMMENTS': [0, 0, 0, 0, 0, 1, 2, 3],
'MANGA': 3000,
'REPORTS': 300,
'TAGS': 600,
'TAGS_FK': 30,
'USERS': 5,
},
'test': {
'BLOG': 1,
'COLLECTIONS': 1,
'COMMENTS': [1],
'MANGA': 1,
'REPORTS': 1,
'TAGS': 1,
'TAGS_FK': 1,
'USERS': 1,
}
}
CHUNK_SIZE = 100
#-------------------------------------------------------------------------------
def timed(func):
"""
use @timed to decorate a function that will print out the time it took
for this function to run.
"""
def inner(*args, **kwargs):
start = datetime.datetime.now()
result = func(*args, **kwargs)
finish = datetime.datetime.now()
print('\t{} - {}'.format(func.__name__, finish-start))
return result
return inner
#-------------------------------------------------------------------------------
class DataCreator:
def __init__(self, configuration):
self.config = CONFIGURATION[configuration]
@timed
def create_users(self):
def create_user_helper(username, **kwargs):
user_data = {'username': username}
user_data.update(**kwargs)
user = User(**user_data)
user.set_password('password')
user.save()
return user
self.user = create_user_helper('testuser', is_staff=True, is_moderator=True)
self.user.dmca_account = DmcaAccount.objects.create(
name='Sample DMCA Account',
email='dmca@example.com',
website='http://example.com/dmca',
)
for i in range(self.config['USERS']):
create_user_helper('testuser{}'.format(i))
@timed
def create_tags(self):
tag_list = []
for tag_type in TagType.manga_m2m:
for i in range(1, self.config['TAGS']+1):
name = '{} {}'.format(TagType.choices_dict[tag_type], i)
tag = Tag(tag_type=tag_type, name=name, slug=slugify(name), created_by=self.user, updated_by=self.user)
tag_list.append(tag)
Tag.objects.bulk_create(tag_list)
@timed
def create_tag_aliases(self):
tag_alias_list = []
for tag in Tag.objects.all():
i = 1
while random.random() < 0.2:
language = random.choice([Language.ENGLISH, Language.JAPANESE])
tag_alias = TagAlias(tag=tag, language=language, name='{} - Alias {}'.format(tag.name, i))
tag_alias_list.append(tag_alias)
i += 1
TagAlias.objects.bulk_create(tag_alias_list)
@timed
def create_tag_data(self):
for language in ['en', 'ja']:
tag_data_list = []
for tag in Tag.objects.all():
tag_data_list.append(TagData(
tag=tag,
language=language,
markdown='Tag Data - {} - {}'.format(tag.name, language),
html='Tag Data - {} - {}'.format(tag.name, language),
created_by=self.user,
updated_by=self.user,
))
TagData.objects.bulk_create(tag_data_list)
@timed
def create_manga(self):
manga_category_keys = list(MangaCategory.choices_dict)
manga_list = []
for i in range(1, self.config['MANGA']+1):
title = 'Test Manga {}'.format(i)
manga = Manga(
title=title,
slug=slugify(title),
status=MangaStatus.PUBLISHED,
category=random.choice(manga_category_keys),
language=random.choice(['en'] * 9 + ['ja'] * 1),
uncensored=random.random() < 0.05,
published_on=timezone.now(),
created_by=self.user,
updated_by=self.user,
)
manga_list.append(manga)
Manga.objects.bulk_create(manga_list)
two_days_ago = timezone.now() - timezone.timedelta(days=2)
Manga.objects.update(created_on=two_days_ago, updated_on=two_days_ago, published_on=two_days_ago)
@timed
def assign_manga_tank(self):
manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
tank_name = 'Tank {}'.format(i)
tank = Tag(tag_type=TagType.TANK, name=tank_name, slug=slugify(tank_name), created_by=self.user, updated_by=self.user)
tank.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
chapter_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
chapter_dict[manga.language] += 1
manga.tank = tank
manga.tank_chapter = chapter_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id)
@timed
def assign_manga_collection(self):
manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
collection_name = 'Collection {}'.format(i)
collection = Tag(tag_type=TagType.COLLECTION, name=collection_name, slug=slugify(collection_name), created_by=self.user, updated_by=self.user)
collection.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
part_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
part_dict[manga.language] += 1
manga.collection = collection
manga.collection_part = part_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id)
@timed
def create_manga_tags(self):
tag_dict = defaultdict(list)
for tag in Tag.objects.all():
tag_dict[tag.tag_type].append(tag)
tag_content_count = len(tag_dict[TagType.CONTENT])
def _create_manga_tags(manga_list):
manga_tag_list = []
for manga in manga_list:
tag_list = []
for tag_type in [TagType.AUTHOR, TagType.CIRCLE, TagType.EVENT, TagType.MAGAZINE, TagType.PARODY, TagType.SCANLATOR]:
if random.random() < 0.5: tag_list.append(random.choice(tag_dict[tag_type]))
tag_list.extend(random.sample(tag_dict[TagType.CONTENT], random.randint(1, min(10, tag_content_count))))
manga_tag_list.extend(map(lambda tag: MangaTag(manga=manga, tag=tag), tag_list))
MangaTag.objects.bulk_create(manga_tag_list)
for i in range(0, Manga.objects.count(), CHUNK_SIZE):
_create_manga_tags(Manga.objects.all()[i:i+CHUNK_SIZE])
@timed
def create_manga_pages(self):
manga_page_list = []
for manga in Manga.objects.all():
manga_page_list.append(MangaPage(
manga=manga,
page=1,
name='001.jpg',
))
MangaPage.objects.bulk_create(manga_page_list)
@timed
def create_comments(self):
user_list = User.objects.all()
comment_list = []
for manga in Manga.published.all():
for i in range(random.choice(self.config['COMMENTS'])):
comment = lorem_ipsum.words(random.randint(1, 15), common=False)
comment_list.append(Comment(
content_type=ContentType.objects.get_for_model(manga),
object_id=manga.id,
markdown=comment,
html='<p>{}</p>'.format(comment),
created_by=random.choice(user_list),
))
Comment.objects.bulk_create(comment_list)
@timed
def create_manga_reports(self):
user_id_list = User.objects.all().values_list('id', flat=True)
manga_id_list = Manga.objects.all().values_list('id', flat=True)[:self.config['REPORTS']]
type_list = list(ReportMangaType.choices_dict.keys())
report_manga_list = []
for i in range(self.config['REPORTS']):
report_manga_list.append(ReportManga(
manga_id=random.choice(manga_id_list),
status=ReportStatus.OPEN,
type=random.choice(type_list),
comment=lorem_ipsum.sentence(),
weight=random.randint(1, 25),
created_by_id=random.choice(user_id_list),
))
ReportManga.all.bulk_create(report_manga_list)
@timed
def | (self):
blog_entry_list = []
for i in range(self.config['BLOG']):
title = lorem_ipsum.sentence()
markdown = '\n\n'.join(lorem_ipsum.paragraphs(random.randint(1, 3)))
blog_entry = BlogEntry(
title=title,
slug=slugify(title),
markdown=markdown,
html=convert_markdown(markdown),
created_by=self.user,
)
blog_entry_list.append(blog_entry)
BlogEntry.objects.bulk_create(blog_entry_list)
@timed
def create_settings(self):
settings = (
(SiteSettingKey.ENABLE_COMMENTS, 'True'),
(SiteSettingKey.ENABLE_DOWNLOADS, 'True'),
(SiteSettingKey.ENABLE_REGISTRATION, 'True'),
(SiteSettingKey.ENABLE_UPLOADS, 'True'),
)
for k, v in settings: SiteSetting.set_val(k, v, self.user)
def run(self):
print('-'*80)
print('datacreator.py started')
start = datetime.datetime.now()
self.create_users()
self.create_tags()
self.create_tag_aliases()
self.create_tag_data()
self.create_manga()
self.assign_manga_tank()
self.assign_manga_collection()
self.create_manga_tags()
self.create_manga_pages()
self.create_comments()
self.create_manga_reports()
self.create_blog_entries()
self.create_settings()
finish = datetime.datetime.now()
print('datacreator.py finished in {}'.format(finish-start))
print('-'*80)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Datacreator utility for Fufufuu')
parser.add_argument('--config', dest='config', default='default', help='specify the configuration for datacreator to use (optional)')
arg_dict = vars(parser.parse_args())
dc = DataCreator(arg_dict['config'])
dc.run()
| create_blog_entries | identifier_name |
datacreator.py | import argparse
import datetime
import os
import random
import sys
from collections import defaultdict
PROJECT_PATH = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-2])
sys.path.append(PROJECT_PATH)
os.environ['DJANGO_SETTINGS_MODULE'] = 'fufufuu.settings'
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.contrib.webdesign import lorem_ipsum
from fufufuu.comment.models import Comment
from fufufuu.blog.models import BlogEntry
from fufufuu.account.models import User
from fufufuu.core.languages import Language
from fufufuu.core.enums import SiteSettingKey
from fufufuu.core.models import SiteSetting
from fufufuu.core.utils import slugify, convert_markdown
from fufufuu.dmca.models import DmcaAccount
from fufufuu.manga.enums import MangaCategory, MangaStatus
from fufufuu.manga.models import Manga, MangaTag, MangaPage
from fufufuu.report.enums import ReportStatus, ReportMangaType
from fufufuu.report.models import ReportManga
from fufufuu.tag.enums import TagType
from fufufuu.tag.models import Tag, TagData, TagAlias
#-------------------------------------------------------------------------------
CONFIGURATION = {
'default': {
'BLOG': 30,
'COMMENTS': [0, 0, 0, 0, 0, 1, 2, 3],
'MANGA': 3000,
'REPORTS': 300,
'TAGS': 600,
'TAGS_FK': 30,
'USERS': 5,
},
'test': {
'BLOG': 1,
'COLLECTIONS': 1,
'COMMENTS': [1],
'MANGA': 1,
'REPORTS': 1,
'TAGS': 1,
'TAGS_FK': 1,
'USERS': 1,
}
}
CHUNK_SIZE = 100
#-------------------------------------------------------------------------------
def timed(func):
"""
use @timed to decorate a function that will print out the time it took
for this function to run.
"""
def inner(*args, **kwargs):
start = datetime.datetime.now()
result = func(*args, **kwargs)
finish = datetime.datetime.now()
print('\t{} - {}'.format(func.__name__, finish-start))
return result
return inner
#-------------------------------------------------------------------------------
class DataCreator:
def __init__(self, configuration):
self.config = CONFIGURATION[configuration]
@timed
def create_users(self):
def create_user_helper(username, **kwargs):
user_data = {'username': username}
user_data.update(**kwargs)
user = User(**user_data)
user.set_password('password')
user.save()
return user
self.user = create_user_helper('testuser', is_staff=True, is_moderator=True)
self.user.dmca_account = DmcaAccount.objects.create(
name='Sample DMCA Account',
email='dmca@example.com',
website='http://example.com/dmca',
)
for i in range(self.config['USERS']):
|
@timed
def create_tags(self):
tag_list = []
for tag_type in TagType.manga_m2m:
for i in range(1, self.config['TAGS']+1):
name = '{} {}'.format(TagType.choices_dict[tag_type], i)
tag = Tag(tag_type=tag_type, name=name, slug=slugify(name), created_by=self.user, updated_by=self.user)
tag_list.append(tag)
Tag.objects.bulk_create(tag_list)
@timed
def create_tag_aliases(self):
tag_alias_list = []
for tag in Tag.objects.all():
i = 1
while random.random() < 0.2:
language = random.choice([Language.ENGLISH, Language.JAPANESE])
tag_alias = TagAlias(tag=tag, language=language, name='{} - Alias {}'.format(tag.name, i))
tag_alias_list.append(tag_alias)
i += 1
TagAlias.objects.bulk_create(tag_alias_list)
@timed
def create_tag_data(self):
for language in ['en', 'ja']:
tag_data_list = []
for tag in Tag.objects.all():
tag_data_list.append(TagData(
tag=tag,
language=language,
markdown='Tag Data - {} - {}'.format(tag.name, language),
html='Tag Data - {} - {}'.format(tag.name, language),
created_by=self.user,
updated_by=self.user,
))
TagData.objects.bulk_create(tag_data_list)
@timed
def create_manga(self):
manga_category_keys = list(MangaCategory.choices_dict)
manga_list = []
for i in range(1, self.config['MANGA']+1):
title = 'Test Manga {}'.format(i)
manga = Manga(
title=title,
slug=slugify(title),
status=MangaStatus.PUBLISHED,
category=random.choice(manga_category_keys),
language=random.choice(['en'] * 9 + ['ja'] * 1),
uncensored=random.random() < 0.05,
published_on=timezone.now(),
created_by=self.user,
updated_by=self.user,
)
manga_list.append(manga)
Manga.objects.bulk_create(manga_list)
two_days_ago = timezone.now() - timezone.timedelta(days=2)
Manga.objects.update(created_on=two_days_ago, updated_on=two_days_ago, published_on=two_days_ago)
@timed
def assign_manga_tank(self):
manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
tank_name = 'Tank {}'.format(i)
tank = Tag(tag_type=TagType.TANK, name=tank_name, slug=slugify(tank_name), created_by=self.user, updated_by=self.user)
tank.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
chapter_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
chapter_dict[manga.language] += 1
manga.tank = tank
manga.tank_chapter = chapter_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id)
@timed
def assign_manga_collection(self):
manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
collection_name = 'Collection {}'.format(i)
collection = Tag(tag_type=TagType.COLLECTION, name=collection_name, slug=slugify(collection_name), created_by=self.user, updated_by=self.user)
collection.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
part_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
part_dict[manga.language] += 1
manga.collection = collection
manga.collection_part = part_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id)
@timed
def create_manga_tags(self):
tag_dict = defaultdict(list)
for tag in Tag.objects.all():
tag_dict[tag.tag_type].append(tag)
tag_content_count = len(tag_dict[TagType.CONTENT])
def _create_manga_tags(manga_list):
manga_tag_list = []
for manga in manga_list:
tag_list = []
for tag_type in [TagType.AUTHOR, TagType.CIRCLE, TagType.EVENT, TagType.MAGAZINE, TagType.PARODY, TagType.SCANLATOR]:
if random.random() < 0.5: tag_list.append(random.choice(tag_dict[tag_type]))
tag_list.extend(random.sample(tag_dict[TagType.CONTENT], random.randint(1, min(10, tag_content_count))))
manga_tag_list.extend(map(lambda tag: MangaTag(manga=manga, tag=tag), tag_list))
MangaTag.objects.bulk_create(manga_tag_list)
for i in range(0, Manga.objects.count(), CHUNK_SIZE):
_create_manga_tags(Manga.objects.all()[i:i+CHUNK_SIZE])
@timed
def create_manga_pages(self):
manga_page_list = []
for manga in Manga.objects.all():
manga_page_list.append(MangaPage(
manga=manga,
page=1,
name='001.jpg',
))
MangaPage.objects.bulk_create(manga_page_list)
@timed
def create_comments(self):
user_list = User.objects.all()
comment_list = []
for manga in Manga.published.all():
for i in range(random.choice(self.config['COMMENTS'])):
comment = lorem_ipsum.words(random.randint(1, 15), common=False)
comment_list.append(Comment(
content_type=ContentType.objects.get_for_model(manga),
object_id=manga.id,
markdown=comment,
html='<p>{}</p>'.format(comment),
created_by=random.choice(user_list),
))
Comment.objects.bulk_create(comment_list)
@timed
def create_manga_reports(self):
user_id_list = User.objects.all().values_list('id', flat=True)
manga_id_list = Manga.objects.all().values_list('id', flat=True)[:self.config['REPORTS']]
type_list = list(ReportMangaType.choices_dict.keys())
report_manga_list = []
for i in range(self.config['REPORTS']):
report_manga_list.append(ReportManga(
manga_id=random.choice(manga_id_list),
status=ReportStatus.OPEN,
type=random.choice(type_list),
comment=lorem_ipsum.sentence(),
weight=random.randint(1, 25),
created_by_id=random.choice(user_id_list),
))
ReportManga.all.bulk_create(report_manga_list)
@timed
def create_blog_entries(self):
blog_entry_list = []
for i in range(self.config['BLOG']):
title = lorem_ipsum.sentence()
markdown = '\n\n'.join(lorem_ipsum.paragraphs(random.randint(1, 3)))
blog_entry = BlogEntry(
title=title,
slug=slugify(title),
markdown=markdown,
html=convert_markdown(markdown),
created_by=self.user,
)
blog_entry_list.append(blog_entry)
BlogEntry.objects.bulk_create(blog_entry_list)
@timed
def create_settings(self):
settings = (
(SiteSettingKey.ENABLE_COMMENTS, 'True'),
(SiteSettingKey.ENABLE_DOWNLOADS, 'True'),
(SiteSettingKey.ENABLE_REGISTRATION, 'True'),
(SiteSettingKey.ENABLE_UPLOADS, 'True'),
)
for k, v in settings: SiteSetting.set_val(k, v, self.user)
def run(self):
print('-'*80)
print('datacreator.py started')
start = datetime.datetime.now()
self.create_users()
self.create_tags()
self.create_tag_aliases()
self.create_tag_data()
self.create_manga()
self.assign_manga_tank()
self.assign_manga_collection()
self.create_manga_tags()
self.create_manga_pages()
self.create_comments()
self.create_manga_reports()
self.create_blog_entries()
self.create_settings()
finish = datetime.datetime.now()
print('datacreator.py finished in {}'.format(finish-start))
print('-'*80)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Datacreator utility for Fufufuu')
parser.add_argument('--config', dest='config', default='default', help='specify the configuration for datacreator to use (optional)')
arg_dict = vars(parser.parse_args())
dc = DataCreator(arg_dict['config'])
dc.run()
| create_user_helper('testuser{}'.format(i)) | conditional_block |
datacreator.py | import argparse
import datetime
import os
import random
import sys
from collections import defaultdict
PROJECT_PATH = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-2])
sys.path.append(PROJECT_PATH)
os.environ['DJANGO_SETTINGS_MODULE'] = 'fufufuu.settings'
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.contrib.webdesign import lorem_ipsum
from fufufuu.comment.models import Comment
from fufufuu.blog.models import BlogEntry
from fufufuu.account.models import User
from fufufuu.core.languages import Language
from fufufuu.core.enums import SiteSettingKey
from fufufuu.core.models import SiteSetting
from fufufuu.core.utils import slugify, convert_markdown
from fufufuu.dmca.models import DmcaAccount
from fufufuu.manga.enums import MangaCategory, MangaStatus
from fufufuu.manga.models import Manga, MangaTag, MangaPage
from fufufuu.report.enums import ReportStatus, ReportMangaType
from fufufuu.report.models import ReportManga
from fufufuu.tag.enums import TagType
from fufufuu.tag.models import Tag, TagData, TagAlias
#-------------------------------------------------------------------------------
CONFIGURATION = {
'default': {
'BLOG': 30,
'COMMENTS': [0, 0, 0, 0, 0, 1, 2, 3],
'MANGA': 3000,
'REPORTS': 300,
'TAGS': 600,
'TAGS_FK': 30,
'USERS': 5,
},
'test': {
'BLOG': 1,
'COLLECTIONS': 1,
'COMMENTS': [1],
'MANGA': 1,
'REPORTS': 1,
'TAGS': 1,
'TAGS_FK': 1,
'USERS': 1,
}
}
CHUNK_SIZE = 100
#-------------------------------------------------------------------------------
def timed(func):
"""
use @timed to decorate a function that will print out the time it took
for this function to run.
"""
def inner(*args, **kwargs):
start = datetime.datetime.now()
result = func(*args, **kwargs)
finish = datetime.datetime.now()
print('\t{} - {}'.format(func.__name__, finish-start))
return result
return inner
#-------------------------------------------------------------------------------
class DataCreator:
def __init__(self, configuration):
self.config = CONFIGURATION[configuration]
@timed
def create_users(self):
def create_user_helper(username, **kwargs):
user_data = {'username': username}
user_data.update(**kwargs)
user = User(**user_data)
user.set_password('password')
user.save()
return user
self.user = create_user_helper('testuser', is_staff=True, is_moderator=True)
self.user.dmca_account = DmcaAccount.objects.create(
name='Sample DMCA Account',
email='dmca@example.com',
website='http://example.com/dmca',
)
for i in range(self.config['USERS']):
create_user_helper('testuser{}'.format(i))
@timed
def create_tags(self):
tag_list = []
for tag_type in TagType.manga_m2m:
for i in range(1, self.config['TAGS']+1):
name = '{} {}'.format(TagType.choices_dict[tag_type], i)
tag = Tag(tag_type=tag_type, name=name, slug=slugify(name), created_by=self.user, updated_by=self.user)
tag_list.append(tag)
Tag.objects.bulk_create(tag_list)
@timed
def create_tag_aliases(self):
tag_alias_list = []
for tag in Tag.objects.all():
i = 1
while random.random() < 0.2:
language = random.choice([Language.ENGLISH, Language.JAPANESE])
tag_alias = TagAlias(tag=tag, language=language, name='{} - Alias {}'.format(tag.name, i))
tag_alias_list.append(tag_alias)
i += 1
TagAlias.objects.bulk_create(tag_alias_list)
@timed
def create_tag_data(self):
for language in ['en', 'ja']:
tag_data_list = []
for tag in Tag.objects.all():
tag_data_list.append(TagData(
tag=tag,
language=language,
markdown='Tag Data - {} - {}'.format(tag.name, language),
html='Tag Data - {} - {}'.format(tag.name, language),
created_by=self.user,
updated_by=self.user,
))
TagData.objects.bulk_create(tag_data_list)
@timed
def create_manga(self):
manga_category_keys = list(MangaCategory.choices_dict)
manga_list = []
for i in range(1, self.config['MANGA']+1):
title = 'Test Manga {}'.format(i)
manga = Manga(
title=title,
slug=slugify(title),
status=MangaStatus.PUBLISHED,
category=random.choice(manga_category_keys),
language=random.choice(['en'] * 9 + ['ja'] * 1),
uncensored=random.random() < 0.05,
published_on=timezone.now(),
created_by=self.user,
updated_by=self.user,
)
manga_list.append(manga)
Manga.objects.bulk_create(manga_list)
two_days_ago = timezone.now() - timezone.timedelta(days=2)
Manga.objects.update(created_on=two_days_ago, updated_on=two_days_ago, published_on=two_days_ago)
@timed
def assign_manga_tank(self):
|
@timed
def assign_manga_collection(self):
manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
collection_name = 'Collection {}'.format(i)
collection = Tag(tag_type=TagType.COLLECTION, name=collection_name, slug=slugify(collection_name), created_by=self.user, updated_by=self.user)
collection.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
part_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
part_dict[manga.language] += 1
manga.collection = collection
manga.collection_part = part_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id)
@timed
def create_manga_tags(self):
tag_dict = defaultdict(list)
for tag in Tag.objects.all():
tag_dict[tag.tag_type].append(tag)
tag_content_count = len(tag_dict[TagType.CONTENT])
def _create_manga_tags(manga_list):
manga_tag_list = []
for manga in manga_list:
tag_list = []
for tag_type in [TagType.AUTHOR, TagType.CIRCLE, TagType.EVENT, TagType.MAGAZINE, TagType.PARODY, TagType.SCANLATOR]:
if random.random() < 0.5: tag_list.append(random.choice(tag_dict[tag_type]))
tag_list.extend(random.sample(tag_dict[TagType.CONTENT], random.randint(1, min(10, tag_content_count))))
manga_tag_list.extend(map(lambda tag: MangaTag(manga=manga, tag=tag), tag_list))
MangaTag.objects.bulk_create(manga_tag_list)
for i in range(0, Manga.objects.count(), CHUNK_SIZE):
_create_manga_tags(Manga.objects.all()[i:i+CHUNK_SIZE])
@timed
def create_manga_pages(self):
manga_page_list = []
for manga in Manga.objects.all():
manga_page_list.append(MangaPage(
manga=manga,
page=1,
name='001.jpg',
))
MangaPage.objects.bulk_create(manga_page_list)
@timed
def create_comments(self):
user_list = User.objects.all()
comment_list = []
for manga in Manga.published.all():
for i in range(random.choice(self.config['COMMENTS'])):
comment = lorem_ipsum.words(random.randint(1, 15), common=False)
comment_list.append(Comment(
content_type=ContentType.objects.get_for_model(manga),
object_id=manga.id,
markdown=comment,
html='<p>{}</p>'.format(comment),
created_by=random.choice(user_list),
))
Comment.objects.bulk_create(comment_list)
@timed
def create_manga_reports(self):
user_id_list = User.objects.all().values_list('id', flat=True)
manga_id_list = Manga.objects.all().values_list('id', flat=True)[:self.config['REPORTS']]
type_list = list(ReportMangaType.choices_dict.keys())
report_manga_list = []
for i in range(self.config['REPORTS']):
report_manga_list.append(ReportManga(
manga_id=random.choice(manga_id_list),
status=ReportStatus.OPEN,
type=random.choice(type_list),
comment=lorem_ipsum.sentence(),
weight=random.randint(1, 25),
created_by_id=random.choice(user_id_list),
))
ReportManga.all.bulk_create(report_manga_list)
@timed
def create_blog_entries(self):
blog_entry_list = []
for i in range(self.config['BLOG']):
title = lorem_ipsum.sentence()
markdown = '\n\n'.join(lorem_ipsum.paragraphs(random.randint(1, 3)))
blog_entry = BlogEntry(
title=title,
slug=slugify(title),
markdown=markdown,
html=convert_markdown(markdown),
created_by=self.user,
)
blog_entry_list.append(blog_entry)
BlogEntry.objects.bulk_create(blog_entry_list)
@timed
def create_settings(self):
settings = (
(SiteSettingKey.ENABLE_COMMENTS, 'True'),
(SiteSettingKey.ENABLE_DOWNLOADS, 'True'),
(SiteSettingKey.ENABLE_REGISTRATION, 'True'),
(SiteSettingKey.ENABLE_UPLOADS, 'True'),
)
for k, v in settings: SiteSetting.set_val(k, v, self.user)
def run(self):
print('-'*80)
print('datacreator.py started')
start = datetime.datetime.now()
self.create_users()
self.create_tags()
self.create_tag_aliases()
self.create_tag_data()
self.create_manga()
self.assign_manga_tank()
self.assign_manga_collection()
self.create_manga_tags()
self.create_manga_pages()
self.create_comments()
self.create_manga_reports()
self.create_blog_entries()
self.create_settings()
finish = datetime.datetime.now()
print('datacreator.py finished in {}'.format(finish-start))
print('-'*80)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Datacreator utility for Fufufuu')
parser.add_argument('--config', dest='config', default='default', help='specify the configuration for datacreator to use (optional)')
arg_dict = vars(parser.parse_args())
dc = DataCreator(arg_dict['config'])
dc.run()
| manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
tank_name = 'Tank {}'.format(i)
tank = Tag(tag_type=TagType.TANK, name=tank_name, slug=slugify(tank_name), created_by=self.user, updated_by=self.user)
tank.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
chapter_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
chapter_dict[manga.language] += 1
manga.tank = tank
manga.tank_chapter = chapter_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id) | identifier_body |
datacreator.py | import argparse
import datetime
import os
import random
import sys
from collections import defaultdict
PROJECT_PATH = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-2])
sys.path.append(PROJECT_PATH)
os.environ['DJANGO_SETTINGS_MODULE'] = 'fufufuu.settings'
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.contrib.webdesign import lorem_ipsum
from fufufuu.comment.models import Comment
from fufufuu.blog.models import BlogEntry
from fufufuu.account.models import User
from fufufuu.core.languages import Language
from fufufuu.core.enums import SiteSettingKey
from fufufuu.core.models import SiteSetting
from fufufuu.core.utils import slugify, convert_markdown
from fufufuu.dmca.models import DmcaAccount
from fufufuu.manga.enums import MangaCategory, MangaStatus
from fufufuu.manga.models import Manga, MangaTag, MangaPage
from fufufuu.report.enums import ReportStatus, ReportMangaType
from fufufuu.report.models import ReportManga
from fufufuu.tag.enums import TagType
from fufufuu.tag.models import Tag, TagData, TagAlias
#-------------------------------------------------------------------------------
CONFIGURATION = {
'default': {
'BLOG': 30,
'COMMENTS': [0, 0, 0, 0, 0, 1, 2, 3],
'MANGA': 3000,
'REPORTS': 300,
'TAGS': 600,
'TAGS_FK': 30,
'USERS': 5,
},
'test': {
'BLOG': 1,
'COLLECTIONS': 1,
'COMMENTS': [1],
'MANGA': 1,
'REPORTS': 1,
'TAGS': 1,
'TAGS_FK': 1,
'USERS': 1,
}
}
CHUNK_SIZE = 100
#-------------------------------------------------------------------------------
def timed(func):
"""
use @timed to decorate a function that will print out the time it took
for this function to run.
"""
def inner(*args, **kwargs):
start = datetime.datetime.now()
result = func(*args, **kwargs)
finish = datetime.datetime.now()
print('\t{} - {}'.format(func.__name__, finish-start))
return result
return inner
#-------------------------------------------------------------------------------
class DataCreator:
def __init__(self, configuration):
self.config = CONFIGURATION[configuration]
@timed
def create_users(self):
def create_user_helper(username, **kwargs):
user_data = {'username': username}
user_data.update(**kwargs)
user = User(**user_data)
user.set_password('password')
user.save()
return user
self.user = create_user_helper('testuser', is_staff=True, is_moderator=True)
self.user.dmca_account = DmcaAccount.objects.create(
name='Sample DMCA Account',
email='dmca@example.com',
website='http://example.com/dmca',
)
for i in range(self.config['USERS']):
create_user_helper('testuser{}'.format(i))
@timed
def create_tags(self):
tag_list = []
for tag_type in TagType.manga_m2m:
for i in range(1, self.config['TAGS']+1):
name = '{} {}'.format(TagType.choices_dict[tag_type], i)
tag = Tag(tag_type=tag_type, name=name, slug=slugify(name), created_by=self.user, updated_by=self.user)
tag_list.append(tag)
Tag.objects.bulk_create(tag_list)
@timed
def create_tag_aliases(self):
tag_alias_list = []
for tag in Tag.objects.all():
i = 1
while random.random() < 0.2:
language = random.choice([Language.ENGLISH, Language.JAPANESE])
tag_alias = TagAlias(tag=tag, language=language, name='{} - Alias {}'.format(tag.name, i))
tag_alias_list.append(tag_alias)
i += 1
TagAlias.objects.bulk_create(tag_alias_list)
@timed
def create_tag_data(self):
for language in ['en', 'ja']:
tag_data_list = []
for tag in Tag.objects.all():
tag_data_list.append(TagData(
tag=tag,
language=language,
markdown='Tag Data - {} - {}'.format(tag.name, language),
html='Tag Data - {} - {}'.format(tag.name, language),
created_by=self.user,
updated_by=self.user,
))
TagData.objects.bulk_create(tag_data_list)
@timed
def create_manga(self):
manga_category_keys = list(MangaCategory.choices_dict)
manga_list = []
for i in range(1, self.config['MANGA']+1):
title = 'Test Manga {}'.format(i)
manga = Manga(
title=title,
slug=slugify(title),
status=MangaStatus.PUBLISHED,
category=random.choice(manga_category_keys),
language=random.choice(['en'] * 9 + ['ja'] * 1),
uncensored=random.random() < 0.05,
published_on=timezone.now(),
created_by=self.user,
updated_by=self.user,
)
manga_list.append(manga)
Manga.objects.bulk_create(manga_list)
two_days_ago = timezone.now() - timezone.timedelta(days=2)
Manga.objects.update(created_on=two_days_ago, updated_on=two_days_ago, published_on=two_days_ago)
@timed
def assign_manga_tank(self):
manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
tank_name = 'Tank {}'.format(i)
tank = Tag(tag_type=TagType.TANK, name=tank_name, slug=slugify(tank_name), created_by=self.user, updated_by=self.user)
tank.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
chapter_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
chapter_dict[manga.language] += 1
manga.tank = tank
manga.tank_chapter = chapter_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id)
@timed
def assign_manga_collection(self):
manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
collection_name = 'Collection {}'.format(i)
collection = Tag(tag_type=TagType.COLLECTION, name=collection_name, slug=slugify(collection_name), created_by=self.user, updated_by=self.user)
collection.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
part_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
part_dict[manga.language] += 1
manga.collection = collection
manga.collection_part = part_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id)
@timed
def create_manga_tags(self):
tag_dict = defaultdict(list)
for tag in Tag.objects.all():
tag_dict[tag.tag_type].append(tag)
tag_content_count = len(tag_dict[TagType.CONTENT])
def _create_manga_tags(manga_list):
manga_tag_list = []
for manga in manga_list:
tag_list = []
for tag_type in [TagType.AUTHOR, TagType.CIRCLE, TagType.EVENT, TagType.MAGAZINE, TagType.PARODY, TagType.SCANLATOR]:
if random.random() < 0.5: tag_list.append(random.choice(tag_dict[tag_type]))
tag_list.extend(random.sample(tag_dict[TagType.CONTENT], random.randint(1, min(10, tag_content_count))))
manga_tag_list.extend(map(lambda tag: MangaTag(manga=manga, tag=tag), tag_list))
MangaTag.objects.bulk_create(manga_tag_list)
for i in range(0, Manga.objects.count(), CHUNK_SIZE):
_create_manga_tags(Manga.objects.all()[i:i+CHUNK_SIZE])
@timed
def create_manga_pages(self):
manga_page_list = []
for manga in Manga.objects.all():
manga_page_list.append(MangaPage(
manga=manga,
page=1,
name='001.jpg',
))
MangaPage.objects.bulk_create(manga_page_list)
@timed
def create_comments(self):
user_list = User.objects.all()
comment_list = []
for manga in Manga.published.all():
for i in range(random.choice(self.config['COMMENTS'])):
comment = lorem_ipsum.words(random.randint(1, 15), common=False)
comment_list.append(Comment(
content_type=ContentType.objects.get_for_model(manga),
object_id=manga.id,
markdown=comment,
html='<p>{}</p>'.format(comment),
created_by=random.choice(user_list),
))
Comment.objects.bulk_create(comment_list)
@timed
def create_manga_reports(self):
user_id_list = User.objects.all().values_list('id', flat=True)
manga_id_list = Manga.objects.all().values_list('id', flat=True)[:self.config['REPORTS']]
type_list = list(ReportMangaType.choices_dict.keys())
report_manga_list = []
for i in range(self.config['REPORTS']):
report_manga_list.append(ReportManga(
manga_id=random.choice(manga_id_list),
status=ReportStatus.OPEN,
type=random.choice(type_list),
comment=lorem_ipsum.sentence(),
weight=random.randint(1, 25),
created_by_id=random.choice(user_id_list),
))
ReportManga.all.bulk_create(report_manga_list)
@timed
def create_blog_entries(self):
blog_entry_list = []
for i in range(self.config['BLOG']):
title = lorem_ipsum.sentence()
markdown = '\n\n'.join(lorem_ipsum.paragraphs(random.randint(1, 3)))
blog_entry = BlogEntry(
title=title,
slug=slugify(title),
markdown=markdown,
html=convert_markdown(markdown),
created_by=self.user,
)
blog_entry_list.append(blog_entry)
BlogEntry.objects.bulk_create(blog_entry_list)
@timed
def create_settings(self):
settings = (
(SiteSettingKey.ENABLE_COMMENTS, 'True'),
(SiteSettingKey.ENABLE_DOWNLOADS, 'True'),
(SiteSettingKey.ENABLE_REGISTRATION, 'True'),
(SiteSettingKey.ENABLE_UPLOADS, 'True'), | )
for k, v in settings: SiteSetting.set_val(k, v, self.user)
def run(self):
print('-'*80)
print('datacreator.py started')
start = datetime.datetime.now()
self.create_users()
self.create_tags()
self.create_tag_aliases()
self.create_tag_data()
self.create_manga()
self.assign_manga_tank()
self.assign_manga_collection()
self.create_manga_tags()
self.create_manga_pages()
self.create_comments()
self.create_manga_reports()
self.create_blog_entries()
self.create_settings()
finish = datetime.datetime.now()
print('datacreator.py finished in {}'.format(finish-start))
print('-'*80)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Datacreator utility for Fufufuu')
parser.add_argument('--config', dest='config', default='default', help='specify the configuration for datacreator to use (optional)')
arg_dict = vars(parser.parse_args())
dc = DataCreator(arg_dict['config'])
dc.run() | random_line_split | |
commands.py | # -*- coding: utf-8 -*-
from __future__ import with_statement
import logging
import os
import pprint
import sys
import re
import time
from random import choice
from socket import gethostbyname
from libcloud.compute.drivers import ec2, rackspace, linode
from .. import etchosts
from .utils import confirm
from . import command, NodeAction, ProjectAction
IP_RE = re.compile(r'(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})')
## Utilities
@command
def show_config(config, args):
"""Pretty-print the current kraftwerk configuration."""
pprint.pprint(config)
@command
def create_node(config, args):
"""Commissions a new server node."""
log = logging.getLogger('kraftwerk.create-node')
if 'pubkey' in config:
pubkey_paths = [config["pubkey"]]
else:
pubkey_paths = [os.path.join(os.environ['HOME'], '.ssh', f) for f \
in ['id_rsa.pub', 'id_dsa.pub']]
for path in pubkey_paths:
if os.path.exists(path):
print 'SSH public key: %s' % path
with open(path) as fp:
pubkey = fp.read().strip()
break
else:
pubkey = raw_input('Your SSH public key (for root login): ')
if not re.search(r'^[a-z0-9.-]+$', args.hostname):
raise CommandError(
"Invalid hostname (must contain only letters, numbers, ., and -): %r"
% args.hostname)
# Query driver for size, image, and location
print args
print dir(args)
image_id = args.image_id or config['image_id']
for i in config.driver.list_images():
if str(i.id) == image_id:
image = i
break
else:
sys.exit("Image %s not found for this provider. Aborting." % image_id)
size_id = args.size_id or config['size_id']
for s in config.driver.list_sizes():
if str(s.id) == size_id:
size = s
break
else:
sys.exit("Size %s not found for this provider. Aborting." % size_id)
location_id = str(getattr(args, 'location-id', config.get("location_id", "0")))
if location_id != 'None':
for l in config.driver.list_locations():
if str(l.id) == location_id:
location = l
break
else:
sys.exit("Location %s not found for this provider. Aborting." % location_id)
else:
location = None
if isinstance(config.driver, ec2.EC2NodeDriver):
extra = dict(ex_userdata="""#!/bin/bash
echo '%s' > /root/.ssh/authorized_keys""" % pubkey)
if not "keyname" in config:
config["keyname"] = raw_input("EC2 Key Pair [default=\"default\"]: ") or "default"
extra.update(ex_keyname=config["keyname"])
if 'securitygroup' in config:
extra.update(ex_securitygroup=config["securitygroup"])
elif isinstance(config.driver, rackspace.RackspaceNodeDriver):
extra = dict(ex_files={'/root/.ssh/authorized_keys': pubkey})
elif isinstance(config.driver, linode.LinodeNodeDriver):
from libcloud.base import NodeAuthSSHKey
extra = dict(auth=NodeAuthSSHKey(pubkey))
create_info = dict(name=args.hostname, location=location,
image=image, size=size, **extra)
node = config.driver.create_node(**create_info)
public_ip = node.public_ip
# Poll the node until it has a public ip
while not public_ip:
time.sleep(3)
for node_ in config.driver.list_nodes():
if node.id == node_.id and node_.public_ip:
public_ip = node_.public_ip[0]
if type(public_ip) == list:
public_ip = public_ip[0]
# At least EC2 passes only back hostname
if not IP_RE.match(public_ip):
public_ip = gethostbyname(public_ip)
if confirm("Create /etc/hosts entry?"):
etchosts.set_etchosts(args.hostname, public_ip)
print u"Node %s (%s)" % (args.hostname, public_ip)
print u"Run 'kraftwerk setup-node %s'" % args.hostname
create_node.parser.add_argument('hostname', default=None,
help="Hostname label for the node (optionally adds an entry to /etc/hosts for easy access)")
create_node.parser.add_argument('--size-id',
help="Provider node size. Defaults to user config.")
create_node.parser.add_argument('--image-id',
help="Ubuntu image. Defaults to user config. (ex. EC2 AMI image id)")
@command
def setup_node(config, args):
"""Install software and prepare a node for kraftwerk action."""
if args.templates:
config['templates'].insert(0, args.templates)
config.templates = config._templates()
stdin, stderr = args.node.ssh(config.template("scripts/node_setup.sh"))
if stderr:
print stderr
else:
print u"Node ready at %s" % (args.node.hostname)
setup_node.parser.add_argument('node', action=NodeAction,
help="Server node to interact with.")
setup_node.parser.add_argument('--templates',
help="External template directory. These will take precedence over "
"kraftwerk and user templates. You can use this to save and "
"organize setup recipes.")
@command
def deploy(config, args):
"""Sync and/or setup a WSGI project. Kraftwerk detects a first-
time setup and runs service setup."""
log = logging.getLogger('kraftwerk.deploy')
# TODO better way to detect new, or maybe move to dedicated command
stdout, stderr = args.node.ssh('stat /var/service/%s' % args.project.name, pipe=True)
new = bool(stderr) or args.override
# Sync codebase over with the web user
destination = 'web@%s:/web/%s/' % (args.node.hostname, args.project.name)
stdout, stderr = args.project.rsync(destination)
if stderr:
log.error("Sync error: %s" % stderr)
sys.exit(stderr)
# Copy requirements
args.project.copy(args.node, 'requirements.txt')
# Put together the setup script
cmd = config.template("scripts/project_setup.sh",
project=args.project, new=new,
upgrade_packages=args.upgrade_packages)
stdout, stderr = args.node.ssh(cmd, pipe=True)
if stderr:
print stderr
# TODO detect new services
if not args.no_service_setup and new:
for service in args.project.services():
args.node.ssh(service.setup_script)
print u"%s live at %r" % (args.project.canonical_domain(), args.node.hostname)
deploy.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
deploy.parser.add_argument('project', action=ProjectAction,
nargs='?',
help="Project root directory path. Defaults to current directory.")
deploy.parser.add_argument('--no-service-setup',
default=False, action='store_true',
help="With this hook kraftwerk overwrites the basic config files but " \
"does not attempt to set up project services.")
deploy.parser.add_argument('--upgrade-packages',
default=False, action='store_true',
help="Upgrade Python packages (adds -U to pip install)")
deploy.parser.add_argument('--override',
default=False, action='store_true',
help="Create folders and config as if deploying for the first time.")
@command
def destroy(config, args):
"""Remove project from a node with all related services and
files."""
log = logging.getLogger('kraftwerk.destroy')
if confirm("Remove project %s from node %s along with all services and data?" %
(args.project.name, args.node.hostname)):
args.node.ssh(config.template("scripts/project_destroy.sh", project=args.project))
print "Project %s removed from node %s" % \
(args.project.name, args.node.hostname )
for service in args.project.services(args.node):
args.node.ssh(service.destroy_script)
destroy.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
destroy.parser.add_argument('project', action=ProjectAction,
nargs='?',
help="Path to the project you want to REMOVE from a server node.")
@command
def stab(config, args):
|
stab.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
stab.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
stab.parser.add_argument('--script', '-s', nargs='+', required=True)
stab.parser.add_argument('--user', '-u', help="User to login and issue command as.", default="web")
@command
def dump(config, args):
"""Create a copy of all service data. Reports a directory with all
dump files."""
timestamp = args.project.dump(args.node)
print "Dump ready at %s:%s" % (args.node.hostname,
args.project.dump_path(timestamp))
dump.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
dump.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
@command
def load(config, args):
"""Load a timestamped dumpdir from the same node. (No support yet
for user provided dumps). To load data from another node use
`sync-services`."""
if not confirm("WARNING: This isn't considered production ready just yet. Continue?"):
return
if not args.no_backup:
timestamp = args.project.dump(args.node)
print "Pre-load backup: %s" % args.project.dump_path(timestamp)
args.project.load(args.node, args.timestamp)
print "Service data from %s loaded at %s" % (args.timestamp,
args.node.hostname)
load.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
load.parser.add_argument('timestamp',
help="ISO 8601 timestamp. This is the dir name inside " \
"/web/project/dump to load data from. (Example: 2010-03-24T15:42:54)")
load.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
load.parser.add_argument('--no-backup', default=False, action='store_true',
help="Only sync files across and exit. Quicker if you don't need to reload Python code.")
@command
def env(config, args):
"""List all project service environment variables, for
convenience."""
print config.template("scripts/env.sh", project=args.project)
env.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
| """Execute a shell command in the project environment. Useful for
django-admin.py syncdb and such."""
cmd = config.template("scripts/env.sh", project=args.project)
cmd = '\n'.join([cmd, ' '.join(args.script)])
args.node.ssh(cmd, user=args.user) | identifier_body |
commands.py | # -*- coding: utf-8 -*-
from __future__ import with_statement
import logging
import os
import pprint
import sys
import re
import time
from random import choice
from socket import gethostbyname
from libcloud.compute.drivers import ec2, rackspace, linode
from .. import etchosts
from .utils import confirm
from . import command, NodeAction, ProjectAction
IP_RE = re.compile(r'(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})')
## Utilities
@command
def show_config(config, args):
"""Pretty-print the current kraftwerk configuration."""
pprint.pprint(config)
@command
def create_node(config, args):
"""Commissions a new server node."""
log = logging.getLogger('kraftwerk.create-node')
if 'pubkey' in config:
pubkey_paths = [config["pubkey"]]
else:
pubkey_paths = [os.path.join(os.environ['HOME'], '.ssh', f) for f \
in ['id_rsa.pub', 'id_dsa.pub']]
for path in pubkey_paths:
if os.path.exists(path):
print 'SSH public key: %s' % path
with open(path) as fp:
pubkey = fp.read().strip()
break
else:
pubkey = raw_input('Your SSH public key (for root login): ')
if not re.search(r'^[a-z0-9.-]+$', args.hostname):
raise CommandError(
"Invalid hostname (must contain only letters, numbers, ., and -): %r"
% args.hostname)
# Query driver for size, image, and location
print args
print dir(args)
image_id = args.image_id or config['image_id']
for i in config.driver.list_images():
if str(i.id) == image_id:
image = i
break
else:
sys.exit("Image %s not found for this provider. Aborting." % image_id)
size_id = args.size_id or config['size_id']
for s in config.driver.list_sizes():
if str(s.id) == size_id:
size = s
break
else:
sys.exit("Size %s not found for this provider. Aborting." % size_id)
location_id = str(getattr(args, 'location-id', config.get("location_id", "0")))
if location_id != 'None':
for l in config.driver.list_locations():
if str(l.id) == location_id:
location = l
break
else:
sys.exit("Location %s not found for this provider. Aborting." % location_id)
else:
location = None
if isinstance(config.driver, ec2.EC2NodeDriver):
extra = dict(ex_userdata="""#!/bin/bash
echo '%s' > /root/.ssh/authorized_keys""" % pubkey)
if not "keyname" in config:
config["keyname"] = raw_input("EC2 Key Pair [default=\"default\"]: ") or "default"
extra.update(ex_keyname=config["keyname"])
if 'securitygroup' in config:
extra.update(ex_securitygroup=config["securitygroup"])
elif isinstance(config.driver, rackspace.RackspaceNodeDriver):
extra = dict(ex_files={'/root/.ssh/authorized_keys': pubkey})
elif isinstance(config.driver, linode.LinodeNodeDriver):
from libcloud.base import NodeAuthSSHKey
extra = dict(auth=NodeAuthSSHKey(pubkey))
create_info = dict(name=args.hostname, location=location,
image=image, size=size, **extra)
node = config.driver.create_node(**create_info)
public_ip = node.public_ip
# Poll the node until it has a public ip
while not public_ip:
time.sleep(3)
for node_ in config.driver.list_nodes():
if node.id == node_.id and node_.public_ip:
public_ip = node_.public_ip[0]
if type(public_ip) == list:
public_ip = public_ip[0]
# At least EC2 passes only back hostname
if not IP_RE.match(public_ip):
public_ip = gethostbyname(public_ip)
if confirm("Create /etc/hosts entry?"):
etchosts.set_etchosts(args.hostname, public_ip)
print u"Node %s (%s)" % (args.hostname, public_ip)
print u"Run 'kraftwerk setup-node %s'" % args.hostname
create_node.parser.add_argument('hostname', default=None,
help="Hostname label for the node (optionally adds an entry to /etc/hosts for easy access)")
create_node.parser.add_argument('--size-id',
help="Provider node size. Defaults to user config.")
create_node.parser.add_argument('--image-id',
help="Ubuntu image. Defaults to user config. (ex. EC2 AMI image id)")
@command
def setup_node(config, args):
"""Install software and prepare a node for kraftwerk action."""
if args.templates:
config['templates'].insert(0, args.templates)
config.templates = config._templates()
stdin, stderr = args.node.ssh(config.template("scripts/node_setup.sh"))
if stderr:
print stderr
else:
print u"Node ready at %s" % (args.node.hostname)
setup_node.parser.add_argument('node', action=NodeAction,
help="Server node to interact with.")
setup_node.parser.add_argument('--templates',
help="External template directory. These will take precedence over "
"kraftwerk and user templates. You can use this to save and "
"organize setup recipes.")
@command
def deploy(config, args):
"""Sync and/or setup a WSGI project. Kraftwerk detects a first-
time setup and runs service setup."""
log = logging.getLogger('kraftwerk.deploy')
# TODO better way to detect new, or maybe move to dedicated command
stdout, stderr = args.node.ssh('stat /var/service/%s' % args.project.name, pipe=True)
new = bool(stderr) or args.override
# Sync codebase over with the web user
destination = 'web@%s:/web/%s/' % (args.node.hostname, args.project.name)
stdout, stderr = args.project.rsync(destination)
if stderr:
log.error("Sync error: %s" % stderr)
sys.exit(stderr)
# Copy requirements
args.project.copy(args.node, 'requirements.txt')
# Put together the setup script
cmd = config.template("scripts/project_setup.sh",
project=args.project, new=new,
upgrade_packages=args.upgrade_packages)
stdout, stderr = args.node.ssh(cmd, pipe=True)
if stderr:
print stderr
# TODO detect new services
if not args.no_service_setup and new:
for service in args.project.services():
args.node.ssh(service.setup_script)
print u"%s live at %r" % (args.project.canonical_domain(), args.node.hostname)
deploy.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
deploy.parser.add_argument('project', action=ProjectAction,
nargs='?',
help="Project root directory path. Defaults to current directory.")
deploy.parser.add_argument('--no-service-setup',
default=False, action='store_true',
help="With this hook kraftwerk overwrites the basic config files but " \
"does not attempt to set up project services.")
deploy.parser.add_argument('--upgrade-packages',
default=False, action='store_true',
help="Upgrade Python packages (adds -U to pip install)")
deploy.parser.add_argument('--override',
default=False, action='store_true',
help="Create folders and config as if deploying for the first time.")
@command
def destroy(config, args):
"""Remove project from a node with all related services and
files."""
log = logging.getLogger('kraftwerk.destroy')
if confirm("Remove project %s from node %s along with all services and data?" %
(args.project.name, args.node.hostname)):
args.node.ssh(config.template("scripts/project_destroy.sh", project=args.project))
print "Project %s removed from node %s" % \
(args.project.name, args.node.hostname )
for service in args.project.services(args.node):
args.node.ssh(service.destroy_script)
destroy.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
destroy.parser.add_argument('project', action=ProjectAction,
nargs='?',
help="Path to the project you want to REMOVE from a server node.")
@command
def stab(config, args):
"""Execute a shell command in the project environment. Useful for
django-admin.py syncdb and such."""
cmd = config.template("scripts/env.sh", project=args.project)
cmd = '\n'.join([cmd, ' '.join(args.script)])
args.node.ssh(cmd, user=args.user)
stab.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
stab.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
stab.parser.add_argument('--script', '-s', nargs='+', required=True)
stab.parser.add_argument('--user', '-u', help="User to login and issue command as.", default="web")
@command
def dump(config, args):
"""Create a copy of all service data. Reports a directory with all
dump files."""
timestamp = args.project.dump(args.node)
print "Dump ready at %s:%s" % (args.node.hostname,
args.project.dump_path(timestamp))
dump.parser.add_argument('node', action=NodeAction, nargs='?', |
dump.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
@command
def load(config, args):
"""Load a timestamped dumpdir from the same node. (No support yet
for user provided dumps). To load data from another node use
`sync-services`."""
if not confirm("WARNING: This isn't considered production ready just yet. Continue?"):
return
if not args.no_backup:
timestamp = args.project.dump(args.node)
print "Pre-load backup: %s" % args.project.dump_path(timestamp)
args.project.load(args.node, args.timestamp)
print "Service data from %s loaded at %s" % (args.timestamp,
args.node.hostname)
load.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
load.parser.add_argument('timestamp',
help="ISO 8601 timestamp. This is the dir name inside " \
"/web/project/dump to load data from. (Example: 2010-03-24T15:42:54)")
load.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
load.parser.add_argument('--no-backup', default=False, action='store_true',
help="Only sync files across and exit. Quicker if you don't need to reload Python code.")
@command
def env(config, args):
"""List all project service environment variables, for
convenience."""
print config.template("scripts/env.sh", project=args.project)
env.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.") | help="Server node to interact with.") | random_line_split |
commands.py | # -*- coding: utf-8 -*-
from __future__ import with_statement
import logging
import os
import pprint
import sys
import re
import time
from random import choice
from socket import gethostbyname
from libcloud.compute.drivers import ec2, rackspace, linode
from .. import etchosts
from .utils import confirm
from . import command, NodeAction, ProjectAction
IP_RE = re.compile(r'(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})')
## Utilities
@command
def show_config(config, args):
"""Pretty-print the current kraftwerk configuration."""
pprint.pprint(config)
@command
def create_node(config, args):
"""Commissions a new server node."""
log = logging.getLogger('kraftwerk.create-node')
if 'pubkey' in config:
pubkey_paths = [config["pubkey"]]
else:
pubkey_paths = [os.path.join(os.environ['HOME'], '.ssh', f) for f \
in ['id_rsa.pub', 'id_dsa.pub']]
for path in pubkey_paths:
if os.path.exists(path):
print 'SSH public key: %s' % path
with open(path) as fp:
pubkey = fp.read().strip()
break
else:
pubkey = raw_input('Your SSH public key (for root login): ')
if not re.search(r'^[a-z0-9.-]+$', args.hostname):
raise CommandError(
"Invalid hostname (must contain only letters, numbers, ., and -): %r"
% args.hostname)
# Query driver for size, image, and location
print args
print dir(args)
image_id = args.image_id or config['image_id']
for i in config.driver.list_images():
if str(i.id) == image_id:
image = i
break
else:
sys.exit("Image %s not found for this provider. Aborting." % image_id)
size_id = args.size_id or config['size_id']
for s in config.driver.list_sizes():
if str(s.id) == size_id:
size = s
break
else:
sys.exit("Size %s not found for this provider. Aborting." % size_id)
location_id = str(getattr(args, 'location-id', config.get("location_id", "0")))
if location_id != 'None':
for l in config.driver.list_locations():
if str(l.id) == location_id:
location = l
break
else:
sys.exit("Location %s not found for this provider. Aborting." % location_id)
else:
location = None
if isinstance(config.driver, ec2.EC2NodeDriver):
extra = dict(ex_userdata="""#!/bin/bash
echo '%s' > /root/.ssh/authorized_keys""" % pubkey)
if not "keyname" in config:
config["keyname"] = raw_input("EC2 Key Pair [default=\"default\"]: ") or "default"
extra.update(ex_keyname=config["keyname"])
if 'securitygroup' in config:
extra.update(ex_securitygroup=config["securitygroup"])
elif isinstance(config.driver, rackspace.RackspaceNodeDriver):
extra = dict(ex_files={'/root/.ssh/authorized_keys': pubkey})
elif isinstance(config.driver, linode.LinodeNodeDriver):
from libcloud.base import NodeAuthSSHKey
extra = dict(auth=NodeAuthSSHKey(pubkey))
create_info = dict(name=args.hostname, location=location,
image=image, size=size, **extra)
node = config.driver.create_node(**create_info)
public_ip = node.public_ip
# Poll the node until it has a public ip
while not public_ip:
time.sleep(3)
for node_ in config.driver.list_nodes():
|
if type(public_ip) == list:
public_ip = public_ip[0]
# At least EC2 passes only back hostname
if not IP_RE.match(public_ip):
public_ip = gethostbyname(public_ip)
if confirm("Create /etc/hosts entry?"):
etchosts.set_etchosts(args.hostname, public_ip)
print u"Node %s (%s)" % (args.hostname, public_ip)
print u"Run 'kraftwerk setup-node %s'" % args.hostname
create_node.parser.add_argument('hostname', default=None,
help="Hostname label for the node (optionally adds an entry to /etc/hosts for easy access)")
create_node.parser.add_argument('--size-id',
help="Provider node size. Defaults to user config.")
create_node.parser.add_argument('--image-id',
help="Ubuntu image. Defaults to user config. (ex. EC2 AMI image id)")
@command
def setup_node(config, args):
"""Install software and prepare a node for kraftwerk action."""
if args.templates:
config['templates'].insert(0, args.templates)
config.templates = config._templates()
stdin, stderr = args.node.ssh(config.template("scripts/node_setup.sh"))
if stderr:
print stderr
else:
print u"Node ready at %s" % (args.node.hostname)
setup_node.parser.add_argument('node', action=NodeAction,
help="Server node to interact with.")
setup_node.parser.add_argument('--templates',
help="External template directory. These will take precedence over "
"kraftwerk and user templates. You can use this to save and "
"organize setup recipes.")
@command
def deploy(config, args):
"""Sync and/or setup a WSGI project. Kraftwerk detects a first-
time setup and runs service setup."""
log = logging.getLogger('kraftwerk.deploy')
# TODO better way to detect new, or maybe move to dedicated command
stdout, stderr = args.node.ssh('stat /var/service/%s' % args.project.name, pipe=True)
new = bool(stderr) or args.override
# Sync codebase over with the web user
destination = 'web@%s:/web/%s/' % (args.node.hostname, args.project.name)
stdout, stderr = args.project.rsync(destination)
if stderr:
log.error("Sync error: %s" % stderr)
sys.exit(stderr)
# Copy requirements
args.project.copy(args.node, 'requirements.txt')
# Put together the setup script
cmd = config.template("scripts/project_setup.sh",
project=args.project, new=new,
upgrade_packages=args.upgrade_packages)
stdout, stderr = args.node.ssh(cmd, pipe=True)
if stderr:
print stderr
# TODO detect new services
if not args.no_service_setup and new:
for service in args.project.services():
args.node.ssh(service.setup_script)
print u"%s live at %r" % (args.project.canonical_domain(), args.node.hostname)
deploy.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
deploy.parser.add_argument('project', action=ProjectAction,
nargs='?',
help="Project root directory path. Defaults to current directory.")
deploy.parser.add_argument('--no-service-setup',
default=False, action='store_true',
help="With this hook kraftwerk overwrites the basic config files but " \
"does not attempt to set up project services.")
deploy.parser.add_argument('--upgrade-packages',
default=False, action='store_true',
help="Upgrade Python packages (adds -U to pip install)")
deploy.parser.add_argument('--override',
default=False, action='store_true',
help="Create folders and config as if deploying for the first time.")
@command
def destroy(config, args):
"""Remove project from a node with all related services and
files."""
log = logging.getLogger('kraftwerk.destroy')
if confirm("Remove project %s from node %s along with all services and data?" %
(args.project.name, args.node.hostname)):
args.node.ssh(config.template("scripts/project_destroy.sh", project=args.project))
print "Project %s removed from node %s" % \
(args.project.name, args.node.hostname )
for service in args.project.services(args.node):
args.node.ssh(service.destroy_script)
destroy.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
destroy.parser.add_argument('project', action=ProjectAction,
nargs='?',
help="Path to the project you want to REMOVE from a server node.")
@command
def stab(config, args):
"""Execute a shell command in the project environment. Useful for
django-admin.py syncdb and such."""
cmd = config.template("scripts/env.sh", project=args.project)
cmd = '\n'.join([cmd, ' '.join(args.script)])
args.node.ssh(cmd, user=args.user)
stab.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
stab.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
stab.parser.add_argument('--script', '-s', nargs='+', required=True)
stab.parser.add_argument('--user', '-u', help="User to login and issue command as.", default="web")
@command
def dump(config, args):
"""Create a copy of all service data. Reports a directory with all
dump files."""
timestamp = args.project.dump(args.node)
print "Dump ready at %s:%s" % (args.node.hostname,
args.project.dump_path(timestamp))
dump.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
dump.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
@command
def load(config, args):
"""Load a timestamped dumpdir from the same node. (No support yet
for user provided dumps). To load data from another node use
`sync-services`."""
if not confirm("WARNING: This isn't considered production ready just yet. Continue?"):
return
if not args.no_backup:
timestamp = args.project.dump(args.node)
print "Pre-load backup: %s" % args.project.dump_path(timestamp)
args.project.load(args.node, args.timestamp)
print "Service data from %s loaded at %s" % (args.timestamp,
args.node.hostname)
load.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
load.parser.add_argument('timestamp',
help="ISO 8601 timestamp. This is the dir name inside " \
"/web/project/dump to load data from. (Example: 2010-03-24T15:42:54)")
load.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
load.parser.add_argument('--no-backup', default=False, action='store_true',
help="Only sync files across and exit. Quicker if you don't need to reload Python code.")
@command
def env(config, args):
"""List all project service environment variables, for
convenience."""
print config.template("scripts/env.sh", project=args.project)
env.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
| if node.id == node_.id and node_.public_ip:
public_ip = node_.public_ip[0] | conditional_block |
commands.py | # -*- coding: utf-8 -*-
from __future__ import with_statement
import logging
import os
import pprint
import sys
import re
import time
from random import choice
from socket import gethostbyname
from libcloud.compute.drivers import ec2, rackspace, linode
from .. import etchosts
from .utils import confirm
from . import command, NodeAction, ProjectAction
IP_RE = re.compile(r'(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})')
## Utilities
@command
def | (config, args):
"""Pretty-print the current kraftwerk configuration."""
pprint.pprint(config)
@command
def create_node(config, args):
"""Commissions a new server node."""
log = logging.getLogger('kraftwerk.create-node')
if 'pubkey' in config:
pubkey_paths = [config["pubkey"]]
else:
pubkey_paths = [os.path.join(os.environ['HOME'], '.ssh', f) for f \
in ['id_rsa.pub', 'id_dsa.pub']]
for path in pubkey_paths:
if os.path.exists(path):
print 'SSH public key: %s' % path
with open(path) as fp:
pubkey = fp.read().strip()
break
else:
pubkey = raw_input('Your SSH public key (for root login): ')
if not re.search(r'^[a-z0-9.-]+$', args.hostname):
raise CommandError(
"Invalid hostname (must contain only letters, numbers, ., and -): %r"
% args.hostname)
# Query driver for size, image, and location
print args
print dir(args)
image_id = args.image_id or config['image_id']
for i in config.driver.list_images():
if str(i.id) == image_id:
image = i
break
else:
sys.exit("Image %s not found for this provider. Aborting." % image_id)
size_id = args.size_id or config['size_id']
for s in config.driver.list_sizes():
if str(s.id) == size_id:
size = s
break
else:
sys.exit("Size %s not found for this provider. Aborting." % size_id)
location_id = str(getattr(args, 'location-id', config.get("location_id", "0")))
if location_id != 'None':
for l in config.driver.list_locations():
if str(l.id) == location_id:
location = l
break
else:
sys.exit("Location %s not found for this provider. Aborting." % location_id)
else:
location = None
if isinstance(config.driver, ec2.EC2NodeDriver):
extra = dict(ex_userdata="""#!/bin/bash
echo '%s' > /root/.ssh/authorized_keys""" % pubkey)
if not "keyname" in config:
config["keyname"] = raw_input("EC2 Key Pair [default=\"default\"]: ") or "default"
extra.update(ex_keyname=config["keyname"])
if 'securitygroup' in config:
extra.update(ex_securitygroup=config["securitygroup"])
elif isinstance(config.driver, rackspace.RackspaceNodeDriver):
extra = dict(ex_files={'/root/.ssh/authorized_keys': pubkey})
elif isinstance(config.driver, linode.LinodeNodeDriver):
from libcloud.base import NodeAuthSSHKey
extra = dict(auth=NodeAuthSSHKey(pubkey))
create_info = dict(name=args.hostname, location=location,
image=image, size=size, **extra)
node = config.driver.create_node(**create_info)
public_ip = node.public_ip
# Poll the node until it has a public ip
while not public_ip:
time.sleep(3)
for node_ in config.driver.list_nodes():
if node.id == node_.id and node_.public_ip:
public_ip = node_.public_ip[0]
if type(public_ip) == list:
public_ip = public_ip[0]
# At least EC2 passes only back hostname
if not IP_RE.match(public_ip):
public_ip = gethostbyname(public_ip)
if confirm("Create /etc/hosts entry?"):
etchosts.set_etchosts(args.hostname, public_ip)
print u"Node %s (%s)" % (args.hostname, public_ip)
print u"Run 'kraftwerk setup-node %s'" % args.hostname
create_node.parser.add_argument('hostname', default=None,
help="Hostname label for the node (optionally adds an entry to /etc/hosts for easy access)")
create_node.parser.add_argument('--size-id',
help="Provider node size. Defaults to user config.")
create_node.parser.add_argument('--image-id',
help="Ubuntu image. Defaults to user config. (ex. EC2 AMI image id)")
@command
def setup_node(config, args):
"""Install software and prepare a node for kraftwerk action."""
if args.templates:
config['templates'].insert(0, args.templates)
config.templates = config._templates()
stdin, stderr = args.node.ssh(config.template("scripts/node_setup.sh"))
if stderr:
print stderr
else:
print u"Node ready at %s" % (args.node.hostname)
setup_node.parser.add_argument('node', action=NodeAction,
help="Server node to interact with.")
setup_node.parser.add_argument('--templates',
help="External template directory. These will take precedence over "
"kraftwerk and user templates. You can use this to save and "
"organize setup recipes.")
@command
def deploy(config, args):
"""Sync and/or setup a WSGI project. Kraftwerk detects a first-
time setup and runs service setup."""
log = logging.getLogger('kraftwerk.deploy')
# TODO better way to detect new, or maybe move to dedicated command
stdout, stderr = args.node.ssh('stat /var/service/%s' % args.project.name, pipe=True)
new = bool(stderr) or args.override
# Sync codebase over with the web user
destination = 'web@%s:/web/%s/' % (args.node.hostname, args.project.name)
stdout, stderr = args.project.rsync(destination)
if stderr:
log.error("Sync error: %s" % stderr)
sys.exit(stderr)
# Copy requirements
args.project.copy(args.node, 'requirements.txt')
# Put together the setup script
cmd = config.template("scripts/project_setup.sh",
project=args.project, new=new,
upgrade_packages=args.upgrade_packages)
stdout, stderr = args.node.ssh(cmd, pipe=True)
if stderr:
print stderr
# TODO detect new services
if not args.no_service_setup and new:
for service in args.project.services():
args.node.ssh(service.setup_script)
print u"%s live at %r" % (args.project.canonical_domain(), args.node.hostname)
deploy.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
deploy.parser.add_argument('project', action=ProjectAction,
nargs='?',
help="Project root directory path. Defaults to current directory.")
deploy.parser.add_argument('--no-service-setup',
default=False, action='store_true',
help="With this hook kraftwerk overwrites the basic config files but " \
"does not attempt to set up project services.")
deploy.parser.add_argument('--upgrade-packages',
default=False, action='store_true',
help="Upgrade Python packages (adds -U to pip install)")
deploy.parser.add_argument('--override',
default=False, action='store_true',
help="Create folders and config as if deploying for the first time.")
@command
def destroy(config, args):
"""Remove project from a node with all related services and
files."""
log = logging.getLogger('kraftwerk.destroy')
if confirm("Remove project %s from node %s along with all services and data?" %
(args.project.name, args.node.hostname)):
args.node.ssh(config.template("scripts/project_destroy.sh", project=args.project))
print "Project %s removed from node %s" % \
(args.project.name, args.node.hostname )
for service in args.project.services(args.node):
args.node.ssh(service.destroy_script)
destroy.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
destroy.parser.add_argument('project', action=ProjectAction,
nargs='?',
help="Path to the project you want to REMOVE from a server node.")
@command
def stab(config, args):
"""Execute a shell command in the project environment. Useful for
django-admin.py syncdb and such."""
cmd = config.template("scripts/env.sh", project=args.project)
cmd = '\n'.join([cmd, ' '.join(args.script)])
args.node.ssh(cmd, user=args.user)
stab.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
stab.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
stab.parser.add_argument('--script', '-s', nargs='+', required=True)
stab.parser.add_argument('--user', '-u', help="User to login and issue command as.", default="web")
@command
def dump(config, args):
"""Create a copy of all service data. Reports a directory with all
dump files."""
timestamp = args.project.dump(args.node)
print "Dump ready at %s:%s" % (args.node.hostname,
args.project.dump_path(timestamp))
dump.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
dump.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
@command
def load(config, args):
"""Load a timestamped dumpdir from the same node. (No support yet
for user provided dumps). To load data from another node use
`sync-services`."""
if not confirm("WARNING: This isn't considered production ready just yet. Continue?"):
return
if not args.no_backup:
timestamp = args.project.dump(args.node)
print "Pre-load backup: %s" % args.project.dump_path(timestamp)
args.project.load(args.node, args.timestamp)
print "Service data from %s loaded at %s" % (args.timestamp,
args.node.hostname)
load.parser.add_argument('node', action=NodeAction, nargs='?',
help="Server node to interact with.")
load.parser.add_argument('timestamp',
help="ISO 8601 timestamp. This is the dir name inside " \
"/web/project/dump to load data from. (Example: 2010-03-24T15:42:54)")
load.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
load.parser.add_argument('--no-backup', default=False, action='store_true',
help="Only sync files across and exit. Quicker if you don't need to reload Python code.")
@command
def env(config, args):
"""List all project service environment variables, for
convenience."""
print config.template("scripts/env.sh", project=args.project)
env.parser.add_argument('project', action=ProjectAction, nargs='?',
help="Project root directory path. Defaults to current directory.")
| show_config | identifier_name |
v2.rs | use std::fs::{File, OpenOptions};
use std::io::Write;
use std::path::{Path, PathBuf};
use serde_json::{Deserializer, Value};
use tempfile::NamedTempFile;
use crate::index_controller::dump_actor::loaders::compat::{asc_ranking_rule, desc_ranking_rule};
use crate::index_controller::dump_actor::Metadata;
use crate::index_controller::updates::status::{
Aborted, Enqueued, Failed, Processed, Processing, UpdateResult, UpdateStatus,
};
use crate::index_controller::updates::store::dump::UpdateEntry;
use crate::index_controller::updates::store::Update;
use crate::options::IndexerOpts;
use super::v3;
/// The dump v2 reads the dump folder and patches all the needed file to make it compatible with a
/// dump v3, then calls the dump v3 to actually handle the dump.
pub fn load_dump(
meta: Metadata,
src: impl AsRef<Path>,
dst: impl AsRef<Path>,
index_db_size: usize,
update_db_size: usize,
indexing_options: &IndexerOpts,
) -> anyhow::Result<()> |
fn patch_index_uuid_path(path: &Path) -> Option<PathBuf> {
let uuid = path.file_name()?.to_str()?.trim_start_matches("index-");
let new_path = path.parent()?.join(uuid);
Some(new_path)
}
fn patch_settings(path: impl AsRef<Path>) -> anyhow::Result<()> {
let mut meta_file = File::open(&path)?;
let mut meta: Value = serde_json::from_reader(&mut meta_file)?;
// We first deserialize the dump meta into a serde_json::Value and change
// the custom ranking rules settings from the old format to the new format.
if let Some(ranking_rules) = meta.pointer_mut("/settings/rankingRules") {
patch_custom_ranking_rules(ranking_rules);
}
let mut meta_file = OpenOptions::new().truncate(true).write(true).open(path)?;
serde_json::to_writer(&mut meta_file, &meta)?;
Ok(())
}
fn patch_updates(dir: impl AsRef<Path>, path: impl AsRef<Path>) -> anyhow::Result<()> {
let mut output_update_file = NamedTempFile::new_in(&dir)?;
let update_file = File::open(&path)?;
let stream = Deserializer::from_reader(update_file).into_iter::<compat::UpdateEntry>();
for update in stream {
let update_entry = update?;
let update_entry = UpdateEntry::from(update_entry);
serde_json::to_writer(&mut output_update_file, &update_entry)?;
output_update_file.write_all(b"\n")?;
}
output_update_file.flush()?;
output_update_file.persist(path)?;
Ok(())
}
/// Converts the ranking rules from the format `asc(_)`, `desc(_)` to the format `_:asc`, `_:desc`.
///
/// This is done for compatibility reasons, and to avoid a new dump version,
/// since the new syntax was introduced soon after the new dump version.
fn patch_custom_ranking_rules(ranking_rules: &mut Value) {
*ranking_rules = match ranking_rules.take() {
Value::Array(values) => values
.into_iter()
.filter_map(|value| match value {
Value::String(s) if s.starts_with("asc") => asc_ranking_rule(&s)
.map(|f| format!("{}:asc", f))
.map(Value::String),
Value::String(s) if s.starts_with("desc") => desc_ranking_rule(&s)
.map(|f| format!("{}:desc", f))
.map(Value::String),
otherwise => Some(otherwise),
})
.collect(),
otherwise => otherwise,
}
}
impl From<compat::UpdateEntry> for UpdateEntry {
fn from(compat::UpdateEntry { uuid, update }: compat::UpdateEntry) -> Self {
let update = match update {
compat::UpdateStatus::Processing(meta) => UpdateStatus::Processing(meta.into()),
compat::UpdateStatus::Enqueued(meta) => UpdateStatus::Enqueued(meta.into()),
compat::UpdateStatus::Processed(meta) => UpdateStatus::Processed(meta.into()),
compat::UpdateStatus::Aborted(meta) => UpdateStatus::Aborted(meta.into()),
compat::UpdateStatus::Failed(meta) => UpdateStatus::Failed(meta.into()),
};
Self { uuid, update }
}
}
impl From<compat::Failed> for Failed {
fn from(other: compat::Failed) -> Self {
let compat::Failed {
from,
error,
failed_at,
} = other;
Self {
from: from.into(),
msg: error.message,
code: compat::error_code_from_str(&error.error_code)
.expect("Invalid update: Invalid error code"),
failed_at,
}
}
}
impl From<compat::Aborted> for Aborted {
fn from(other: compat::Aborted) -> Self {
let compat::Aborted { from, aborted_at } = other;
Self {
from: from.into(),
aborted_at,
}
}
}
impl From<compat::Processing> for Processing {
fn from(other: compat::Processing) -> Self {
let compat::Processing {
from,
started_processing_at,
} = other;
Self {
from: from.into(),
started_processing_at,
}
}
}
impl From<compat::Enqueued> for Enqueued {
fn from(other: compat::Enqueued) -> Self {
let compat::Enqueued {
update_id,
meta,
enqueued_at,
content,
} = other;
let meta = match meta {
compat::UpdateMeta::DocumentsAddition {
method,
primary_key,
..
} => {
Update::DocumentAddition {
primary_key,
method,
// Just ignore if the uuid is no present. If it is needed later, an error will
// be thrown.
content_uuid: content.unwrap_or_default(),
}
}
compat::UpdateMeta::ClearDocuments => Update::ClearDocuments,
compat::UpdateMeta::DeleteDocuments { ids } => Update::DeleteDocuments(ids),
compat::UpdateMeta::Settings(settings) => Update::Settings(settings),
};
Self {
update_id,
meta,
enqueued_at,
}
}
}
impl From<compat::Processed> for Processed {
fn from(other: compat::Processed) -> Self {
let compat::Processed {
from,
success,
processed_at,
} = other;
Self {
success: success.into(),
processed_at,
from: from.into(),
}
}
}
impl From<compat::UpdateResult> for UpdateResult {
fn from(other: compat::UpdateResult) -> Self {
match other {
compat::UpdateResult::DocumentsAddition(r) => Self::DocumentsAddition(r),
compat::UpdateResult::DocumentDeletion { deleted } => {
Self::DocumentDeletion { deleted }
}
compat::UpdateResult::Other => Self::Other,
}
}
}
/// compat structure from pre-dumpv3 meilisearch
mod compat {
use anyhow::bail;
use chrono::{DateTime, Utc};
use meilisearch_error::Code;
use milli::update::{DocumentAdditionResult, IndexDocumentsMethod};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::index::{Settings, Unchecked};
#[derive(Serialize, Deserialize)]
pub struct UpdateEntry {
pub uuid: Uuid,
pub update: UpdateStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UpdateFormat {
Json,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UpdateResult {
DocumentsAddition(DocumentAdditionResult),
DocumentDeletion { deleted: u64 },
Other,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum UpdateMeta {
DocumentsAddition {
method: IndexDocumentsMethod,
format: UpdateFormat,
primary_key: Option<String>,
},
ClearDocuments,
DeleteDocuments {
ids: Vec<String>,
},
Settings(Settings<Unchecked>),
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Enqueued {
pub update_id: u64,
pub meta: UpdateMeta,
pub enqueued_at: DateTime<Utc>,
pub content: Option<Uuid>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Processed {
pub success: UpdateResult,
pub processed_at: DateTime<Utc>,
#[serde(flatten)]
pub from: Processing,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Processing {
#[serde(flatten)]
pub from: Enqueued,
pub started_processing_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Aborted {
#[serde(flatten)]
pub from: Enqueued,
pub aborted_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Failed {
#[serde(flatten)]
pub from: Processing,
pub error: ResponseError,
pub failed_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "status", rename_all = "camelCase")]
pub enum UpdateStatus {
Processing(Processing),
Enqueued(Enqueued),
Processed(Processed),
Aborted(Aborted),
Failed(Failed),
}
type StatusCode = ();
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ResponseError {
#[serde(skip)]
pub code: StatusCode,
pub message: String,
pub error_code: String,
pub error_type: String,
pub error_link: String,
}
pub fn error_code_from_str(s: &str) -> anyhow::Result<Code> {
let code = match s {
"index_creation_failed" => Code::CreateIndex,
"index_already_exists" => Code::IndexAlreadyExists,
"index_not_found" => Code::IndexNotFound,
"invalid_index_uid" => Code::InvalidIndexUid,
"index_not_accessible" => Code::OpenIndex,
"invalid_state" => Code::InvalidState,
"missing_primary_key" => Code::MissingPrimaryKey,
"primary_key_already_present" => Code::PrimaryKeyAlreadyPresent,
"invalid_request" => Code::InvalidRankingRule,
"max_fields_limit_exceeded" => Code::MaxFieldsLimitExceeded,
"missing_document_id" => Code::MissingDocumentId,
"invalid_facet" => Code::Facet,
"invalid_filter" => Code::Filter,
"invalid_sort" => Code::Sort,
"bad_parameter" => Code::BadParameter,
"bad_request" => Code::BadRequest,
"document_not_found" => Code::DocumentNotFound,
"internal" => Code::Internal,
"invalid_geo_field" => Code::InvalidGeoField,
"invalid_token" => Code::InvalidToken,
"missing_authorization_header" => Code::MissingAuthorizationHeader,
"not_found" => Code::NotFound,
"payload_too_large" => Code::PayloadTooLarge,
"unretrievable_document" => Code::RetrieveDocument,
"search_error" => Code::SearchDocuments,
"unsupported_media_type" => Code::UnsupportedMediaType,
"dump_already_in_progress" => Code::DumpAlreadyInProgress,
"dump_process_failed" => Code::DumpProcessFailed,
_ => bail!("unknow error code."),
};
Ok(code)
}
}
| {
let indexes_path = src.as_ref().join("indexes");
let dir_entries = std::fs::read_dir(indexes_path)?;
for entry in dir_entries {
let entry = entry?;
// rename the index folder
let path = entry.path();
let new_path = patch_index_uuid_path(&path).expect("invalid index folder.");
std::fs::rename(path, &new_path)?;
let settings_path = new_path.join("meta.json");
patch_settings(settings_path)?;
}
let update_dir = src.as_ref().join("updates");
let update_path = update_dir.join("data.jsonl");
patch_updates(update_dir, update_path)?;
v3::load_dump(
meta,
src,
dst,
index_db_size,
update_db_size,
indexing_options,
)
} | identifier_body |
v2.rs | use std::fs::{File, OpenOptions};
use std::io::Write;
use std::path::{Path, PathBuf};
use serde_json::{Deserializer, Value};
use tempfile::NamedTempFile;
use crate::index_controller::dump_actor::loaders::compat::{asc_ranking_rule, desc_ranking_rule};
use crate::index_controller::dump_actor::Metadata;
use crate::index_controller::updates::status::{
Aborted, Enqueued, Failed, Processed, Processing, UpdateResult, UpdateStatus,
};
use crate::index_controller::updates::store::dump::UpdateEntry;
use crate::index_controller::updates::store::Update;
use crate::options::IndexerOpts;
use super::v3;
/// The dump v2 reads the dump folder and patches all the needed file to make it compatible with a
/// dump v3, then calls the dump v3 to actually handle the dump.
pub fn load_dump(
meta: Metadata,
src: impl AsRef<Path>,
dst: impl AsRef<Path>,
index_db_size: usize,
update_db_size: usize,
indexing_options: &IndexerOpts,
) -> anyhow::Result<()> {
let indexes_path = src.as_ref().join("indexes");
let dir_entries = std::fs::read_dir(indexes_path)?;
for entry in dir_entries {
let entry = entry?;
// rename the index folder
let path = entry.path();
let new_path = patch_index_uuid_path(&path).expect("invalid index folder.");
std::fs::rename(path, &new_path)?;
let settings_path = new_path.join("meta.json");
patch_settings(settings_path)?;
}
let update_dir = src.as_ref().join("updates");
let update_path = update_dir.join("data.jsonl");
patch_updates(update_dir, update_path)?;
v3::load_dump(
meta,
src,
dst,
index_db_size,
update_db_size,
indexing_options,
)
}
fn | (path: &Path) -> Option<PathBuf> {
let uuid = path.file_name()?.to_str()?.trim_start_matches("index-");
let new_path = path.parent()?.join(uuid);
Some(new_path)
}
fn patch_settings(path: impl AsRef<Path>) -> anyhow::Result<()> {
let mut meta_file = File::open(&path)?;
let mut meta: Value = serde_json::from_reader(&mut meta_file)?;
// We first deserialize the dump meta into a serde_json::Value and change
// the custom ranking rules settings from the old format to the new format.
if let Some(ranking_rules) = meta.pointer_mut("/settings/rankingRules") {
patch_custom_ranking_rules(ranking_rules);
}
let mut meta_file = OpenOptions::new().truncate(true).write(true).open(path)?;
serde_json::to_writer(&mut meta_file, &meta)?;
Ok(())
}
fn patch_updates(dir: impl AsRef<Path>, path: impl AsRef<Path>) -> anyhow::Result<()> {
let mut output_update_file = NamedTempFile::new_in(&dir)?;
let update_file = File::open(&path)?;
let stream = Deserializer::from_reader(update_file).into_iter::<compat::UpdateEntry>();
for update in stream {
let update_entry = update?;
let update_entry = UpdateEntry::from(update_entry);
serde_json::to_writer(&mut output_update_file, &update_entry)?;
output_update_file.write_all(b"\n")?;
}
output_update_file.flush()?;
output_update_file.persist(path)?;
Ok(())
}
/// Converts the ranking rules from the format `asc(_)`, `desc(_)` to the format `_:asc`, `_:desc`.
///
/// This is done for compatibility reasons, and to avoid a new dump version,
/// since the new syntax was introduced soon after the new dump version.
fn patch_custom_ranking_rules(ranking_rules: &mut Value) {
*ranking_rules = match ranking_rules.take() {
Value::Array(values) => values
.into_iter()
.filter_map(|value| match value {
Value::String(s) if s.starts_with("asc") => asc_ranking_rule(&s)
.map(|f| format!("{}:asc", f))
.map(Value::String),
Value::String(s) if s.starts_with("desc") => desc_ranking_rule(&s)
.map(|f| format!("{}:desc", f))
.map(Value::String),
otherwise => Some(otherwise),
})
.collect(),
otherwise => otherwise,
}
}
impl From<compat::UpdateEntry> for UpdateEntry {
fn from(compat::UpdateEntry { uuid, update }: compat::UpdateEntry) -> Self {
let update = match update {
compat::UpdateStatus::Processing(meta) => UpdateStatus::Processing(meta.into()),
compat::UpdateStatus::Enqueued(meta) => UpdateStatus::Enqueued(meta.into()),
compat::UpdateStatus::Processed(meta) => UpdateStatus::Processed(meta.into()),
compat::UpdateStatus::Aborted(meta) => UpdateStatus::Aborted(meta.into()),
compat::UpdateStatus::Failed(meta) => UpdateStatus::Failed(meta.into()),
};
Self { uuid, update }
}
}
impl From<compat::Failed> for Failed {
fn from(other: compat::Failed) -> Self {
let compat::Failed {
from,
error,
failed_at,
} = other;
Self {
from: from.into(),
msg: error.message,
code: compat::error_code_from_str(&error.error_code)
.expect("Invalid update: Invalid error code"),
failed_at,
}
}
}
impl From<compat::Aborted> for Aborted {
fn from(other: compat::Aborted) -> Self {
let compat::Aborted { from, aborted_at } = other;
Self {
from: from.into(),
aborted_at,
}
}
}
impl From<compat::Processing> for Processing {
fn from(other: compat::Processing) -> Self {
let compat::Processing {
from,
started_processing_at,
} = other;
Self {
from: from.into(),
started_processing_at,
}
}
}
impl From<compat::Enqueued> for Enqueued {
fn from(other: compat::Enqueued) -> Self {
let compat::Enqueued {
update_id,
meta,
enqueued_at,
content,
} = other;
let meta = match meta {
compat::UpdateMeta::DocumentsAddition {
method,
primary_key,
..
} => {
Update::DocumentAddition {
primary_key,
method,
// Just ignore if the uuid is no present. If it is needed later, an error will
// be thrown.
content_uuid: content.unwrap_or_default(),
}
}
compat::UpdateMeta::ClearDocuments => Update::ClearDocuments,
compat::UpdateMeta::DeleteDocuments { ids } => Update::DeleteDocuments(ids),
compat::UpdateMeta::Settings(settings) => Update::Settings(settings),
};
Self {
update_id,
meta,
enqueued_at,
}
}
}
impl From<compat::Processed> for Processed {
fn from(other: compat::Processed) -> Self {
let compat::Processed {
from,
success,
processed_at,
} = other;
Self {
success: success.into(),
processed_at,
from: from.into(),
}
}
}
impl From<compat::UpdateResult> for UpdateResult {
fn from(other: compat::UpdateResult) -> Self {
match other {
compat::UpdateResult::DocumentsAddition(r) => Self::DocumentsAddition(r),
compat::UpdateResult::DocumentDeletion { deleted } => {
Self::DocumentDeletion { deleted }
}
compat::UpdateResult::Other => Self::Other,
}
}
}
/// compat structure from pre-dumpv3 meilisearch
mod compat {
use anyhow::bail;
use chrono::{DateTime, Utc};
use meilisearch_error::Code;
use milli::update::{DocumentAdditionResult, IndexDocumentsMethod};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::index::{Settings, Unchecked};
#[derive(Serialize, Deserialize)]
pub struct UpdateEntry {
pub uuid: Uuid,
pub update: UpdateStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UpdateFormat {
Json,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UpdateResult {
DocumentsAddition(DocumentAdditionResult),
DocumentDeletion { deleted: u64 },
Other,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum UpdateMeta {
DocumentsAddition {
method: IndexDocumentsMethod,
format: UpdateFormat,
primary_key: Option<String>,
},
ClearDocuments,
DeleteDocuments {
ids: Vec<String>,
},
Settings(Settings<Unchecked>),
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Enqueued {
pub update_id: u64,
pub meta: UpdateMeta,
pub enqueued_at: DateTime<Utc>,
pub content: Option<Uuid>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Processed {
pub success: UpdateResult,
pub processed_at: DateTime<Utc>,
#[serde(flatten)]
pub from: Processing,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Processing {
#[serde(flatten)]
pub from: Enqueued,
pub started_processing_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Aborted {
#[serde(flatten)]
pub from: Enqueued,
pub aborted_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Failed {
#[serde(flatten)]
pub from: Processing,
pub error: ResponseError,
pub failed_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "status", rename_all = "camelCase")]
pub enum UpdateStatus {
Processing(Processing),
Enqueued(Enqueued),
Processed(Processed),
Aborted(Aborted),
Failed(Failed),
}
type StatusCode = ();
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ResponseError {
#[serde(skip)]
pub code: StatusCode,
pub message: String,
pub error_code: String,
pub error_type: String,
pub error_link: String,
}
pub fn error_code_from_str(s: &str) -> anyhow::Result<Code> {
let code = match s {
"index_creation_failed" => Code::CreateIndex,
"index_already_exists" => Code::IndexAlreadyExists,
"index_not_found" => Code::IndexNotFound,
"invalid_index_uid" => Code::InvalidIndexUid,
"index_not_accessible" => Code::OpenIndex,
"invalid_state" => Code::InvalidState,
"missing_primary_key" => Code::MissingPrimaryKey,
"primary_key_already_present" => Code::PrimaryKeyAlreadyPresent,
"invalid_request" => Code::InvalidRankingRule,
"max_fields_limit_exceeded" => Code::MaxFieldsLimitExceeded,
"missing_document_id" => Code::MissingDocumentId,
"invalid_facet" => Code::Facet,
"invalid_filter" => Code::Filter,
"invalid_sort" => Code::Sort,
"bad_parameter" => Code::BadParameter,
"bad_request" => Code::BadRequest,
"document_not_found" => Code::DocumentNotFound,
"internal" => Code::Internal,
"invalid_geo_field" => Code::InvalidGeoField,
"invalid_token" => Code::InvalidToken,
"missing_authorization_header" => Code::MissingAuthorizationHeader,
"not_found" => Code::NotFound,
"payload_too_large" => Code::PayloadTooLarge,
"unretrievable_document" => Code::RetrieveDocument,
"search_error" => Code::SearchDocuments,
"unsupported_media_type" => Code::UnsupportedMediaType,
"dump_already_in_progress" => Code::DumpAlreadyInProgress,
"dump_process_failed" => Code::DumpProcessFailed,
_ => bail!("unknow error code."),
};
Ok(code)
}
}
| patch_index_uuid_path | identifier_name |
v2.rs | use std::fs::{File, OpenOptions};
use std::io::Write;
use std::path::{Path, PathBuf};
use serde_json::{Deserializer, Value};
use tempfile::NamedTempFile;
use crate::index_controller::dump_actor::loaders::compat::{asc_ranking_rule, desc_ranking_rule};
use crate::index_controller::dump_actor::Metadata;
use crate::index_controller::updates::status::{
Aborted, Enqueued, Failed, Processed, Processing, UpdateResult, UpdateStatus,
};
use crate::index_controller::updates::store::dump::UpdateEntry;
use crate::index_controller::updates::store::Update;
use crate::options::IndexerOpts;
use super::v3;
/// The dump v2 reads the dump folder and patches all the needed file to make it compatible with a
/// dump v3, then calls the dump v3 to actually handle the dump.
pub fn load_dump(
meta: Metadata,
src: impl AsRef<Path>,
dst: impl AsRef<Path>,
index_db_size: usize,
update_db_size: usize,
indexing_options: &IndexerOpts,
) -> anyhow::Result<()> {
let indexes_path = src.as_ref().join("indexes");
let dir_entries = std::fs::read_dir(indexes_path)?;
for entry in dir_entries {
let entry = entry?;
// rename the index folder
let path = entry.path();
let new_path = patch_index_uuid_path(&path).expect("invalid index folder.");
std::fs::rename(path, &new_path)?;
let settings_path = new_path.join("meta.json");
patch_settings(settings_path)?;
}
let update_dir = src.as_ref().join("updates");
let update_path = update_dir.join("data.jsonl");
patch_updates(update_dir, update_path)?;
v3::load_dump(
meta,
src,
dst,
index_db_size,
update_db_size,
indexing_options,
)
}
fn patch_index_uuid_path(path: &Path) -> Option<PathBuf> {
let uuid = path.file_name()?.to_str()?.trim_start_matches("index-");
let new_path = path.parent()?.join(uuid);
Some(new_path)
}
fn patch_settings(path: impl AsRef<Path>) -> anyhow::Result<()> {
let mut meta_file = File::open(&path)?;
let mut meta: Value = serde_json::from_reader(&mut meta_file)?;
// We first deserialize the dump meta into a serde_json::Value and change
// the custom ranking rules settings from the old format to the new format.
if let Some(ranking_rules) = meta.pointer_mut("/settings/rankingRules") {
patch_custom_ranking_rules(ranking_rules);
}
let mut meta_file = OpenOptions::new().truncate(true).write(true).open(path)?;
serde_json::to_writer(&mut meta_file, &meta)?;
Ok(())
}
fn patch_updates(dir: impl AsRef<Path>, path: impl AsRef<Path>) -> anyhow::Result<()> {
let mut output_update_file = NamedTempFile::new_in(&dir)?;
let update_file = File::open(&path)?;
let stream = Deserializer::from_reader(update_file).into_iter::<compat::UpdateEntry>();
for update in stream {
let update_entry = update?;
let update_entry = UpdateEntry::from(update_entry);
serde_json::to_writer(&mut output_update_file, &update_entry)?;
output_update_file.write_all(b"\n")?;
}
output_update_file.flush()?;
output_update_file.persist(path)?;
Ok(())
}
/// Converts the ranking rules from the format `asc(_)`, `desc(_)` to the format `_:asc`, `_:desc`.
///
/// This is done for compatibility reasons, and to avoid a new dump version,
/// since the new syntax was introduced soon after the new dump version.
fn patch_custom_ranking_rules(ranking_rules: &mut Value) {
*ranking_rules = match ranking_rules.take() {
Value::Array(values) => values
.into_iter()
.filter_map(|value| match value {
Value::String(s) if s.starts_with("asc") => asc_ranking_rule(&s)
.map(|f| format!("{}:asc", f))
.map(Value::String),
Value::String(s) if s.starts_with("desc") => desc_ranking_rule(&s)
.map(|f| format!("{}:desc", f))
.map(Value::String),
otherwise => Some(otherwise),
})
.collect(),
otherwise => otherwise,
}
}
impl From<compat::UpdateEntry> for UpdateEntry {
fn from(compat::UpdateEntry { uuid, update }: compat::UpdateEntry) -> Self {
let update = match update {
compat::UpdateStatus::Processing(meta) => UpdateStatus::Processing(meta.into()),
compat::UpdateStatus::Enqueued(meta) => UpdateStatus::Enqueued(meta.into()),
compat::UpdateStatus::Processed(meta) => UpdateStatus::Processed(meta.into()),
compat::UpdateStatus::Aborted(meta) => UpdateStatus::Aborted(meta.into()),
compat::UpdateStatus::Failed(meta) => UpdateStatus::Failed(meta.into()),
};
Self { uuid, update }
}
}
impl From<compat::Failed> for Failed {
fn from(other: compat::Failed) -> Self {
let compat::Failed {
from,
error,
failed_at,
} = other;
Self {
from: from.into(),
msg: error.message,
code: compat::error_code_from_str(&error.error_code)
.expect("Invalid update: Invalid error code"),
failed_at,
}
}
}
impl From<compat::Aborted> for Aborted {
fn from(other: compat::Aborted) -> Self {
let compat::Aborted { from, aborted_at } = other;
Self {
from: from.into(),
aborted_at,
}
}
}
impl From<compat::Processing> for Processing {
fn from(other: compat::Processing) -> Self {
let compat::Processing {
from,
started_processing_at,
} = other;
Self {
from: from.into(),
started_processing_at,
}
}
}
impl From<compat::Enqueued> for Enqueued {
fn from(other: compat::Enqueued) -> Self {
let compat::Enqueued {
update_id,
meta,
enqueued_at,
content,
} = other;
let meta = match meta {
compat::UpdateMeta::DocumentsAddition {
method,
primary_key,
..
} => {
Update::DocumentAddition {
primary_key,
method,
// Just ignore if the uuid is no present. If it is needed later, an error will
// be thrown.
content_uuid: content.unwrap_or_default(),
}
}
compat::UpdateMeta::ClearDocuments => Update::ClearDocuments,
compat::UpdateMeta::DeleteDocuments { ids } => Update::DeleteDocuments(ids),
compat::UpdateMeta::Settings(settings) => Update::Settings(settings),
};
Self {
update_id,
meta,
enqueued_at,
}
}
}
impl From<compat::Processed> for Processed {
fn from(other: compat::Processed) -> Self {
let compat::Processed {
from,
success,
processed_at,
} = other;
Self {
success: success.into(),
processed_at,
from: from.into(),
}
}
}
| compat::UpdateResult::DocumentsAddition(r) => Self::DocumentsAddition(r),
compat::UpdateResult::DocumentDeletion { deleted } => {
Self::DocumentDeletion { deleted }
}
compat::UpdateResult::Other => Self::Other,
}
}
}
/// compat structure from pre-dumpv3 meilisearch
mod compat {
use anyhow::bail;
use chrono::{DateTime, Utc};
use meilisearch_error::Code;
use milli::update::{DocumentAdditionResult, IndexDocumentsMethod};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::index::{Settings, Unchecked};
#[derive(Serialize, Deserialize)]
pub struct UpdateEntry {
pub uuid: Uuid,
pub update: UpdateStatus,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UpdateFormat {
Json,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UpdateResult {
DocumentsAddition(DocumentAdditionResult),
DocumentDeletion { deleted: u64 },
Other,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum UpdateMeta {
DocumentsAddition {
method: IndexDocumentsMethod,
format: UpdateFormat,
primary_key: Option<String>,
},
ClearDocuments,
DeleteDocuments {
ids: Vec<String>,
},
Settings(Settings<Unchecked>),
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Enqueued {
pub update_id: u64,
pub meta: UpdateMeta,
pub enqueued_at: DateTime<Utc>,
pub content: Option<Uuid>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Processed {
pub success: UpdateResult,
pub processed_at: DateTime<Utc>,
#[serde(flatten)]
pub from: Processing,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Processing {
#[serde(flatten)]
pub from: Enqueued,
pub started_processing_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Aborted {
#[serde(flatten)]
pub from: Enqueued,
pub aborted_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Failed {
#[serde(flatten)]
pub from: Processing,
pub error: ResponseError,
pub failed_at: DateTime<Utc>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "status", rename_all = "camelCase")]
pub enum UpdateStatus {
Processing(Processing),
Enqueued(Enqueued),
Processed(Processed),
Aborted(Aborted),
Failed(Failed),
}
type StatusCode = ();
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ResponseError {
#[serde(skip)]
pub code: StatusCode,
pub message: String,
pub error_code: String,
pub error_type: String,
pub error_link: String,
}
pub fn error_code_from_str(s: &str) -> anyhow::Result<Code> {
let code = match s {
"index_creation_failed" => Code::CreateIndex,
"index_already_exists" => Code::IndexAlreadyExists,
"index_not_found" => Code::IndexNotFound,
"invalid_index_uid" => Code::InvalidIndexUid,
"index_not_accessible" => Code::OpenIndex,
"invalid_state" => Code::InvalidState,
"missing_primary_key" => Code::MissingPrimaryKey,
"primary_key_already_present" => Code::PrimaryKeyAlreadyPresent,
"invalid_request" => Code::InvalidRankingRule,
"max_fields_limit_exceeded" => Code::MaxFieldsLimitExceeded,
"missing_document_id" => Code::MissingDocumentId,
"invalid_facet" => Code::Facet,
"invalid_filter" => Code::Filter,
"invalid_sort" => Code::Sort,
"bad_parameter" => Code::BadParameter,
"bad_request" => Code::BadRequest,
"document_not_found" => Code::DocumentNotFound,
"internal" => Code::Internal,
"invalid_geo_field" => Code::InvalidGeoField,
"invalid_token" => Code::InvalidToken,
"missing_authorization_header" => Code::MissingAuthorizationHeader,
"not_found" => Code::NotFound,
"payload_too_large" => Code::PayloadTooLarge,
"unretrievable_document" => Code::RetrieveDocument,
"search_error" => Code::SearchDocuments,
"unsupported_media_type" => Code::UnsupportedMediaType,
"dump_already_in_progress" => Code::DumpAlreadyInProgress,
"dump_process_failed" => Code::DumpProcessFailed,
_ => bail!("unknow error code."),
};
Ok(code)
}
} | impl From<compat::UpdateResult> for UpdateResult {
fn from(other: compat::UpdateResult) -> Self {
match other { | random_line_split |
subtour2opt.py | from itertools import izip
import cPickle
import math
import numpy
import random
import sys
import pandas
import itertools
import time
CITIES_NEIGHBORS_FILE = '/home/chefele/kaggle/Santa/data/santa_cities_nndists.pkl'
CITIES_INFO_FILE = '/home/chefele/kaggle/Santa/download/santa_cities.csv'
MIN_PASS_IMPROVEMENT_FULLTOUR = 1000 # must be >0; 1000 gets ~99% of possible improvement with this data
MIN_PASS_IMPROVEMENT_SUBTOUR = 50
FAST_SEARCH = True
BATCH_SIZE_FULLTOUR = 1000 # Num nodes to process between prints of status updates
BATCH_SIZE_SUBTOUR = 1000 # Num nodes to process between prints of status updates
MAX_DISTANCE = 40000 # 20K x 20K image, so need any value >sqrt(2)*20K
RANDOM_SEED = 1234567
random.seed(RANDOM_SEED)
class TourEngine:
neighbors = [] # read only, node nearest neighbors' info: id,dist
nodes = [] # dataframe of node id,x,y
edges = {} # read/write, dict of counts of all edges in all graphs/tours
# edges is read/write, hash table of counts of all edges in all graphs/tours
# edges = numpy.zeros(EDGE_HASH_TABLE_SIZE, int)
def __init__(self, starting_tour, load_neighbors=True):
# read only; starting tour of nodes
self.starting_tour = starting_tour
# *** read/write; pointers to next node in this tour's path
# that is, next_node[node_id] = next_node_id
self.next_node = self.to_next_node_list(starting_tour)
# add all edges in the starting tour to the common edges set
for node in xrange(len(self.next_node)):
self.edge_add(node, self.next_node[node])
if len(TourEngine.neighbors) == 0 and load_neighbors:
TourEngine.neighbors = self.get_neighbors()
if len(TourEngine.nodes) == 0:
TourEngine.nodes = self.get_nodes()
# for speed, convert to numpy arrays (pandas dataframe access much slower)
self.nodes_x = numpy.array(TourEngine.nodes['x'])
self.nodes_y = numpy.array(TourEngine.nodes['y'])
def clear_all_edges(self):
TourEngine.edges = {}
def get_neighbors(self):
print "Loading nearest neighbor distances from:", CITIES_NEIGHBORS_FILE,"...",
sys.stdout.flush()
fin = open(CITIES_NEIGHBORS_FILE, 'rb')
neighbors = cPickle.load(fin)
fin.close()
print "Done."
sys.stdout.flush()
return neighbors # list of dataframes, one per city
def get_nodes(self):
print "Loading node data from:", CITIES_INFO_FILE,"...",
sys.stdout.flush()
nodes = pandas.read_csv(CITIES_INFO_FILE)
print "Done."
sys.stdout.flush()
return nodes
def to_next_node_list(self, node_lst): # node list -> next_node_list
# NOTE asserts below doesn't work with subpaths (only with all nodes)
# assert len(node_lst) == len(set(node_lst)), 'Duplicate nodes!'
# assert len(node_lst) == max(node_lst)+1, 'Missing nodes!'
next_node_lst = [None] * (max(node_lst)+1)
# link the nodes in the body of the list
for cur_node, next_node in izip(node_lst[:-1], node_lst[1:]):
next_node_lst[cur_node] = next_node
# now link the end of the list to the start
cur_node, next_node = node_lst[-1], node_lst[0]
next_node_lst[cur_node] = next_node
return next_node_lst
def to_node_list(self, next_node_list, node_start):
node_list = [node_start]
node = next_node_list[node_start]
while node != node_start:
node_list.append(node)
node = next_node_list[node]
return node_list
def get_current_tour_node_list(self):
return self.to_node_list(self.next_node, self.starting_tour[0])
def dist(self, node1, node2):
dx = self.nodes_x[node1] - self.nodes_x[node2]
dy = self.nodes_y[node1] - self.nodes_y[node2]
edge_dist = math.sqrt(dx*dx + dy*dy)
return edge_dist
def edge_hash(self, node1, node2):
return hash( ( min(node1,node2), max(node1,node2) ) ) % EDGE_HASH_TABLE_SIZE
def edge_exists(self, node1, node2):
# return TourEngine.edges[self.edge_hash(node1,node2)] > 0
return (node1,node2) in TourEngine.edges or \
(node2,node1) in TourEngine.edges
def edge_multiple_exists(self, node1, node2):
# return TourEngine.edges[self.edge_hash(node1,node2)] > 1
return TourEngine.edges.get((node1,node2), 0) > 1 or \
TourEngine.edges.get((node2,node1), 0) > 1
def edge_add(self, node1, node2):
# TourEngine.edges[self.edge_hash(node1,node2)] += 1
e12 = (node1,node2)
e21 = (node2,node1)
TourEngine.edges[e12] = TourEngine.edges.setdefault(e12, 0) + 1
TourEngine.edges[e21] = TourEngine.edges.setdefault(e21, 0) + 1
def edge_delete(self, node1, node2):
# TourEngine.edges[self.edge_hash(node1,node2)] -= 1
e12 = (node1,node2)
e21 = (node2,node1)
edges = TourEngine.edges
assert edges[e12]>0 and edges[e21]>0, "Deleting nonexistant edge"
edges[e12] -= 1
edges[e21] -= 1
if edges[e12] == 0:
del edges[e12]
if edges[e21] == 0:
del edges[e21]
# TourEngine.edges.remove( (node1, node2) )
# TourEngine.edges.remove( (node2, node1) )
def tour_update_generator(self, update_path=None):
if update_path == None:
update_path = self.starting_tour
update_path_nodes = set(update_path)
# infinite sequence of nodes in random order to update
node_generator = itertools.cycle(random.sample(update_path, len(update_path)))
for node1 in node_generator:
node2 = self.next_node[node1]
assert node1 != node2
dist12 = self.dist(node1, node2) # original edge
swap_found = False
if self.edge_multiple_exists(node1, node2):
best_dist_diff = MAX_DISTANCE # forces a swap
else:
best_dist_diff = 0 # only swaps if tot dist improves
for node3 in TourEngine.neighbors[node1].id: # sorted by distance
node4 = self.next_node[node3]
if len(set((node1,node2,node3,node4)))<4: # skip overlaps
continue
if (node1 not in update_path_nodes) or (node2 not in update_path_nodes) or \
(node3 not in update_path_nodes) or (node4 not in update_path_nodes):
continue # only do 'in-region' updates along update path
dist34 = self.dist(node3, node4) # original edge
dist13 = self.dist(node1, node3) # proposed swap edge
dist24 = self.dist(node2, node4) # proposed swap edge
dist_diff = (dist13 + dist24) - (dist12 + dist34)
if dist_diff < best_dist_diff and \
not self.edge_exists(node1, node3) and \
not self.edge_exists(node2, node4):
swap_found = True
best_dist_diff, best_node3, best_node4 = dist_diff, node3, node4
# TODO for max accuracy, remove the if & break below?
if (dist13 > dist12) and FAST_SEARCH: # Much faster (3x-10x), but path length is ~1% more
break
# note the symmetry of test, since the outer node2 loop will
# eventually hit the other points
if swap_found:
self.do_swap(node1, node2, best_node3, best_node4)
self.edge_delete(node1, node2)
self.edge_delete(best_node3, best_node4)
self.edge_add(node1, best_node3)
self.edge_add(node2, best_node4)
yield True # NOTE this functionis a generator!
def do_swap(self, node1, node2, node3, node4):
# does a swap, updating self.next_node array in-place
assert node2 == self.next_node[node1], 'node2 does not follow node1 in tour'
assert node4 == self.next_node[node3], 'node4 does not follow node3 in tour'
self.next_node[node1] = node3
self.reverse_path(node2, node4)
self.next_node[node2] = node4
def reverse_path(self, head, tail):
# reverses a path in self.next_node array in-place (like reversing a linked list)
previous = None
node = head
while node != tail:
temp = self.next_node[node]
self.next_node[node] = previous
previous = node
node = temp
def get_current_tour(self):
return self.to_node_list(self.next_node, self.starting_tour[0])
def get_length_common(self, next_node):
return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node) if n2!=None))
def get_tour_length(self):
# n1 = node, n2 = next_node[n1]
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(self.next_node)))
return self.get_length_common(self.next_node)
def external_tour_length(self, node_list):
next_node = self.to_next_node_list(node_list)
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node)))
return self.get_length_common(next_node)
def external_path_length(self, node_list):
next_node = self.to_next_node_list(node_list)
return self.get_length_common(next_node[:-1])
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node[:-1])))
def get_edge_counts(self):
# return (TourEngine.edges.min(), TourEngine.edges.max())
edges = TourEngine.edges
ecounts = {}
for e in edges:
ecounts[edges[e]] = ecounts.setdefault(edges[e],0) + 1
return [(cnt, ecounts[cnt]) for cnt in sorted(ecounts)]
def read_tours(tour_file):
df = pandas.read_csv(tour_file)
tour1, tour2 = list(df['path1']), list(df['path2'])
return (tour1, tour2)
def write_tours(tour1, tour2, outfile):
fout = open(outfile, 'w')
fout.write('path1,path2\n')
lines = [str(n1)+','+str(n2) for n1, n2 in izip(tour1, tour2)] | for line in lines:
fout.write(line+'\n')
fout.close()
print '\nWrote 2-OPT tours to: ', outfile, '\n'
def chunks(l, n):
# split a list into chunks of length n
return [l[i:i+n] for i in range(0, len(l), n)]
def shuffled_paths(node_list, path_len):
return [random.sample(path, len(path)) for path in chunks(node_list, path_len)]
def z_order_id(x,y):
# z-order id is interleaving of 16-bit binary strings of the int x & y coordinates
x, y = int(x), int(y)
sx = '{:016b}'.format(x)
sy = '{:016b}'.format(y)
return ''.join(i for j in zip(sx,sy) for i in j)
def z_order_paths(node_list, path_len):
# TourEngine.nodes = df of id, x, y
zpaths = []
for path in chunks(node_list, path_len):
xs = TourEngine.nodes['x'][path]
ys = TourEngine.nodes['y'][path]
z_node = [(z_order_id(x,y), node) for node, x,y in zip(path, xs, ys)]
z_order_nodes = [node for z, node in sorted(z_node)]
zpaths.append(z_order_nodes)
return zpaths
def bit_counter_iterator(nbits):
for bit_tuple in itertools.product((0,1), repeat=nbits):
yield bit_tuple
class PathLengthCache:
def __init__(self, tour_engine):
self.cache = {}
self.tour_engine = tour_engine
def path_length(self, path):
path = tuple(path)
#print "PathLengthCache.path_length():", path
#print "Min/max for path:", min(path),max(path)
if path not in self.cache:
self.cache[path] = self.tour_engine.external_path_length(path)
return self.cache[path]
def unchunk(lst_of_lsts):
# returns list of lists of nodes into a single concatenated list of nodes
return sum(lst_of_lsts, [])
def print_tour_len_from_chunks(t1c, t2c, f_tourlen):
tourlen1 = f_tourlen(unchunk(t1c))
tourlen2 = f_tourlen(unchunk(t2c))
print 'REAL LENGTHS:', 'tour1:', round(tourlen1), 'tour2:', round(tourlen2)
sys.stdout.flush()
def opt_merge_tours(tour1_nodes, tour2_nodes, chunk_length, f_tourlen):
assert len(tour1_nodes) == len(tour2_nodes), 'Tour lengths differ!'
t1c = chunks(tour1_nodes, chunk_length)
t2c = chunks(tour2_nodes, chunk_length)
n_chunks = len(t1c)
assert len(t1c) == len(t2c), 'Unequal number of tour chunks!'
passes = 0
last_gap = None
improving = True
while improving:
improving = False
for i, n in enumerate(random.sample(range(0,n_chunks), n_chunks)):
tourlen1_noswap = f_tourlen(unchunk(t1c))
tourlen2_noswap = f_tourlen(unchunk(t2c))
t1c[n], t2c[n] = t2c[n], t1c[n] # swap tour chunk n between tour
tourlen1_swap = f_tourlen(unchunk(t1c))
tourlen2_swap = f_tourlen(unchunk(t2c))
t1c[n], t2c[n] = t2c[n], t1c[n] # swap back
if abs(tourlen1_swap - tourlen2_swap) < \
abs(tourlen1_noswap - tourlen2_noswap):
t1c[n], t2c[n] = t2c[n], t1c[n] # swap tour chunk n between tours
improving = True
vals = (i, n, 'swap', int(tourlen1_swap), int(tourlen2_swap))
print '%4i chunk: %4i %6s tour1: %8i tour2: %8i' % vals
else:
vals = (i, n, '----', int(tourlen1_noswap), int(tourlen2_noswap))
print '%4i chunk: %4i %6s tour1: %8i tour2: %8i' % vals
sys.stdout.flush()
passes += 1
gap = abs(tourlen1_noswap - tourlen2_noswap)
if last_gap:
delta_gap = gap - last_gap
else:
delta_gap = 0
last_gap = gap
print '\npass:', passes,
print 'tour1_len:', round(tourlen1_noswap), 'tour2_len:', round(tourlen2_noswap),
print 'gap:', round(gap), 'delta_gap:', round(delta_gap)
print
sys.stdout.flush()
return unchunk(t1c), unchunk(t2c)
def merge_tours(tour1_nodes, tour2_nodes, chunk_length, tour_engine):
f_plen = tour_engine.external_path_length
f_tlen = tour_engine.external_tour_length
tour1_chunks = chunks(tour1_nodes, chunk_length)
tour2_chunks = chunks(tour2_nodes, chunk_length)
# TODO now just splices alternate chunks; shoud it merge more smartly?
t1,t2 = [],[]
for n, (chunk1, chunk2) in enumerate(zip(tour1_chunks, tour2_chunks)):
print 'chunk_path_lenths',n,'tour1:', int(f_plen(chunk1)), 'tour2:', int(f_plen(chunk2)),
# DEBUG
s1=set(chunk1)
s2=set(chunk2)
print "chunk_sets:",n,"lenIntersect:", len(s1.intersection(s2)), "lenUnion:", len(s1.union(s2))
if n % 2 == 0:
t1.extend(chunk1)
t2.extend(chunk2)
else:
t1.extend(chunk2)
t2.extend(chunk1)
#print 'cumulative tour lengths:', int(f_tlen(t1)), int(f_tlen(t2))
print 'merged tour lengths:',int(f_tlen(t1)), int(f_tlen(t2))
print "***start NONE check in merge_tours***"
for n,(a,b) in enumerate(zip(t1,t2)):
if a==None or b==None:
print n,a,b
print "***end NONE check in merge_tours***"
print "t1, t2 lengths:", len(t1),len(set(t1)), len(t2), len(set(t2))
sys.stdout.flush()
return t1, t2
def do_2opt_passes( tour1, tour2, updater, update_path_len, \
batch_size = BATCH_SIZE_FULLTOUR, \
min_pass_improvement = MIN_PASS_IMPROVEMENT_FULLTOUR):
lmax_last = int(max(tour1.get_tour_length(), tour2.get_tour_length()))
t0 = time.time()
nodes_processed = 0
pass_improvement = { 0:min_pass_improvement }
#num_nodes = max(len(tour1.starting_tour), len(tour2.starting_tour))
improving = True
while improving:
for _ in xrange(batch_size):
updater.next()
nodes_processed += 1
l1 = int(tour1.get_tour_length())
l2 = int(tour2.get_tour_length())
lmax = max(l1,l2)
improvement = lmax_last - lmax
lmax_last = lmax
pass_num = int(nodes_processed / update_path_len) + 1
if pass_num not in pass_improvement:
# started a new pass...
pass_improvement[pass_num] = 0
if (pass_improvement[pass_num - 1] < min_pass_improvement) and \
(pass_improvement[pass_num - 1] >= 0) :
improving = False
pass_improvement[pass_num] += improvement
print "pass:", pass_num, "nodes:", nodes_processed,
print "tour1:", l1, "tour2:",l2,
print "max:",lmax, "imp:",improvement,
print "pass_imp:", pass_improvement[pass_num],
print "secs:", int(time.time()-t0),
# print "edge counts:", tour1.get_edge_counts(), tour2.get_edge_counts()
ecounts = tour1.get_edge_counts()
print "edges:", ecounts
sys.stdout.flush()
#print "checking tour lengths"
#tour1_nodelist = tour1.get_current_tour_node_list()
#tour2_nodelist = tour2.get_current_tour_node_list()
#print "tour1_length", len(tour1_nodelist), 'uniq:',len(set(tour1_nodelist)),
#print "tour2_length", len(tour2_nodelist), 'uniq:',len(set(tour2_nodelist))
# print "FOR DEBUG -- aborting loop"
# break # TODO FOR DEBUGGING -- so remove thie!
print "\nRESULTS ",
print "passes:", pass_num, " nodes:", nodes_processed,
print " tour1:", l1, " tour2:",l2,
print " max:",lmax, " secs:", int(time.time()-t0),
print " 2edges:", ecounts
def main():
print "\n*** 2-OPT for Traveling Santa Problem ***\n"
tour_infile = sys.argv[1]
update_path_len = int(sys.argv[2])
tour_outfile = sys.argv[3]
print "Input tour file :", tour_infile
print "Update path length :", update_path_len
print "Output tour file :", tour_outfile
print
tour1_init, tour2_init = read_tours(tour_infile)
assert len(tour1_init)==len(tour2_init)
tour1 = TourEngine(tour1_init)
tour2 = TourEngine(tour2_init)
# cross-splice subtours
tour1_nodes = tour1.get_current_tour_node_list()
tour2_nodes = tour2.get_current_tour_node_list()
tour1_merged, tour2_merged = opt_merge_tours(tour1_nodes, tour2_nodes, update_path_len,
tour1.external_tour_length)
assert len(tour1_merged) == len(tour2_merged)
assert len(tour1_merged)==len(set(tour1_merged))
assert len(tour2_merged)==len(set(tour2_merged))
# do 2opt passes on entirity of both merged tours to improve links between subtours
tour1.clear_all_edges()
tour1 = TourEngine(tour1_merged)
tour2 = TourEngine(tour2_merged)
print '\nstarting 2opt\n'
updater = itertools.izip(tour1.tour_update_generator(),
tour2.tour_update_generator())
do_2opt_passes(tour1, tour2, updater, len(tour1_merged),
batch_size=BATCH_SIZE_FULLTOUR,
min_pass_improvement=MIN_PASS_IMPROVEMENT_FULLTOUR)
# write results
tour1_nodelist = tour1.get_current_tour_node_list()
tour2_nodelist = tour2.get_current_tour_node_list()
write_tours(tour1_nodelist, tour2_nodelist, tour_outfile)
print "Done.\n"
def TEST_do_swap():
l = [0,4,7,8,9,1,2,3,5,6]
print "node list :", l
nnl = to_next_node_list(l)
print "next node list before swap:", nnl
do_swap(nnl, 2,3,7,8)
print "next node list after swap:", nnl
print
print "node list :", l
print "node list after swap :", to_node_list(nnl, 0)
if __name__ == '__main__':
main() | random_line_split | |
subtour2opt.py | from itertools import izip
import cPickle
import math
import numpy
import random
import sys
import pandas
import itertools
import time
CITIES_NEIGHBORS_FILE = '/home/chefele/kaggle/Santa/data/santa_cities_nndists.pkl'
CITIES_INFO_FILE = '/home/chefele/kaggle/Santa/download/santa_cities.csv'
MIN_PASS_IMPROVEMENT_FULLTOUR = 1000 # must be >0; 1000 gets ~99% of possible improvement with this data
MIN_PASS_IMPROVEMENT_SUBTOUR = 50
FAST_SEARCH = True
BATCH_SIZE_FULLTOUR = 1000 # Num nodes to process between prints of status updates
BATCH_SIZE_SUBTOUR = 1000 # Num nodes to process between prints of status updates
MAX_DISTANCE = 40000 # 20K x 20K image, so need any value >sqrt(2)*20K
RANDOM_SEED = 1234567
random.seed(RANDOM_SEED)
class TourEngine:
neighbors = [] # read only, node nearest neighbors' info: id,dist
nodes = [] # dataframe of node id,x,y
edges = {} # read/write, dict of counts of all edges in all graphs/tours
# edges is read/write, hash table of counts of all edges in all graphs/tours
# edges = numpy.zeros(EDGE_HASH_TABLE_SIZE, int)
def __init__(self, starting_tour, load_neighbors=True):
# read only; starting tour of nodes
self.starting_tour = starting_tour
# *** read/write; pointers to next node in this tour's path
# that is, next_node[node_id] = next_node_id
self.next_node = self.to_next_node_list(starting_tour)
# add all edges in the starting tour to the common edges set
for node in xrange(len(self.next_node)):
self.edge_add(node, self.next_node[node])
if len(TourEngine.neighbors) == 0 and load_neighbors:
TourEngine.neighbors = self.get_neighbors()
if len(TourEngine.nodes) == 0:
TourEngine.nodes = self.get_nodes()
# for speed, convert to numpy arrays (pandas dataframe access much slower)
self.nodes_x = numpy.array(TourEngine.nodes['x'])
self.nodes_y = numpy.array(TourEngine.nodes['y'])
def clear_all_edges(self):
TourEngine.edges = {}
def get_neighbors(self):
print "Loading nearest neighbor distances from:", CITIES_NEIGHBORS_FILE,"...",
sys.stdout.flush()
fin = open(CITIES_NEIGHBORS_FILE, 'rb')
neighbors = cPickle.load(fin)
fin.close()
print "Done."
sys.stdout.flush()
return neighbors # list of dataframes, one per city
def get_nodes(self):
print "Loading node data from:", CITIES_INFO_FILE,"...",
sys.stdout.flush()
nodes = pandas.read_csv(CITIES_INFO_FILE)
print "Done."
sys.stdout.flush()
return nodes
def to_next_node_list(self, node_lst): # node list -> next_node_list
# NOTE asserts below doesn't work with subpaths (only with all nodes)
# assert len(node_lst) == len(set(node_lst)), 'Duplicate nodes!'
# assert len(node_lst) == max(node_lst)+1, 'Missing nodes!'
next_node_lst = [None] * (max(node_lst)+1)
# link the nodes in the body of the list
for cur_node, next_node in izip(node_lst[:-1], node_lst[1:]):
next_node_lst[cur_node] = next_node
# now link the end of the list to the start
cur_node, next_node = node_lst[-1], node_lst[0]
next_node_lst[cur_node] = next_node
return next_node_lst
def to_node_list(self, next_node_list, node_start):
node_list = [node_start]
node = next_node_list[node_start]
while node != node_start:
node_list.append(node)
node = next_node_list[node]
return node_list
def get_current_tour_node_list(self):
return self.to_node_list(self.next_node, self.starting_tour[0])
def dist(self, node1, node2):
dx = self.nodes_x[node1] - self.nodes_x[node2]
dy = self.nodes_y[node1] - self.nodes_y[node2]
edge_dist = math.sqrt(dx*dx + dy*dy)
return edge_dist
def edge_hash(self, node1, node2):
return hash( ( min(node1,node2), max(node1,node2) ) ) % EDGE_HASH_TABLE_SIZE
def edge_exists(self, node1, node2):
# return TourEngine.edges[self.edge_hash(node1,node2)] > 0
return (node1,node2) in TourEngine.edges or \
(node2,node1) in TourEngine.edges
def edge_multiple_exists(self, node1, node2):
# return TourEngine.edges[self.edge_hash(node1,node2)] > 1
return TourEngine.edges.get((node1,node2), 0) > 1 or \
TourEngine.edges.get((node2,node1), 0) > 1
def edge_add(self, node1, node2):
# TourEngine.edges[self.edge_hash(node1,node2)] += 1
e12 = (node1,node2)
e21 = (node2,node1)
TourEngine.edges[e12] = TourEngine.edges.setdefault(e12, 0) + 1
TourEngine.edges[e21] = TourEngine.edges.setdefault(e21, 0) + 1
def edge_delete(self, node1, node2):
# TourEngine.edges[self.edge_hash(node1,node2)] -= 1
e12 = (node1,node2)
e21 = (node2,node1)
edges = TourEngine.edges
assert edges[e12]>0 and edges[e21]>0, "Deleting nonexistant edge"
edges[e12] -= 1
edges[e21] -= 1
if edges[e12] == 0:
del edges[e12]
if edges[e21] == 0:
del edges[e21]
# TourEngine.edges.remove( (node1, node2) )
# TourEngine.edges.remove( (node2, node1) )
def tour_update_generator(self, update_path=None):
if update_path == None:
update_path = self.starting_tour
update_path_nodes = set(update_path)
# infinite sequence of nodes in random order to update
node_generator = itertools.cycle(random.sample(update_path, len(update_path)))
for node1 in node_generator:
node2 = self.next_node[node1]
assert node1 != node2
dist12 = self.dist(node1, node2) # original edge
swap_found = False
if self.edge_multiple_exists(node1, node2):
best_dist_diff = MAX_DISTANCE # forces a swap
else:
best_dist_diff = 0 # only swaps if tot dist improves
for node3 in TourEngine.neighbors[node1].id: # sorted by distance
node4 = self.next_node[node3]
if len(set((node1,node2,node3,node4)))<4: # skip overlaps
continue
if (node1 not in update_path_nodes) or (node2 not in update_path_nodes) or \
(node3 not in update_path_nodes) or (node4 not in update_path_nodes):
continue # only do 'in-region' updates along update path
dist34 = self.dist(node3, node4) # original edge
dist13 = self.dist(node1, node3) # proposed swap edge
dist24 = self.dist(node2, node4) # proposed swap edge
dist_diff = (dist13 + dist24) - (dist12 + dist34)
if dist_diff < best_dist_diff and \
not self.edge_exists(node1, node3) and \
not self.edge_exists(node2, node4):
swap_found = True
best_dist_diff, best_node3, best_node4 = dist_diff, node3, node4
# TODO for max accuracy, remove the if & break below?
if (dist13 > dist12) and FAST_SEARCH: # Much faster (3x-10x), but path length is ~1% more
break
# note the symmetry of test, since the outer node2 loop will
# eventually hit the other points
if swap_found:
self.do_swap(node1, node2, best_node3, best_node4)
self.edge_delete(node1, node2)
self.edge_delete(best_node3, best_node4)
self.edge_add(node1, best_node3)
self.edge_add(node2, best_node4)
yield True # NOTE this functionis a generator!
def | (self, node1, node2, node3, node4):
# does a swap, updating self.next_node array in-place
assert node2 == self.next_node[node1], 'node2 does not follow node1 in tour'
assert node4 == self.next_node[node3], 'node4 does not follow node3 in tour'
self.next_node[node1] = node3
self.reverse_path(node2, node4)
self.next_node[node2] = node4
def reverse_path(self, head, tail):
# reverses a path in self.next_node array in-place (like reversing a linked list)
previous = None
node = head
while node != tail:
temp = self.next_node[node]
self.next_node[node] = previous
previous = node
node = temp
def get_current_tour(self):
return self.to_node_list(self.next_node, self.starting_tour[0])
def get_length_common(self, next_node):
return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node) if n2!=None))
def get_tour_length(self):
# n1 = node, n2 = next_node[n1]
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(self.next_node)))
return self.get_length_common(self.next_node)
def external_tour_length(self, node_list):
next_node = self.to_next_node_list(node_list)
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node)))
return self.get_length_common(next_node)
def external_path_length(self, node_list):
next_node = self.to_next_node_list(node_list)
return self.get_length_common(next_node[:-1])
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node[:-1])))
def get_edge_counts(self):
# return (TourEngine.edges.min(), TourEngine.edges.max())
edges = TourEngine.edges
ecounts = {}
for e in edges:
ecounts[edges[e]] = ecounts.setdefault(edges[e],0) + 1
return [(cnt, ecounts[cnt]) for cnt in sorted(ecounts)]
def read_tours(tour_file):
df = pandas.read_csv(tour_file)
tour1, tour2 = list(df['path1']), list(df['path2'])
return (tour1, tour2)
def write_tours(tour1, tour2, outfile):
fout = open(outfile, 'w')
fout.write('path1,path2\n')
lines = [str(n1)+','+str(n2) for n1, n2 in izip(tour1, tour2)]
for line in lines:
fout.write(line+'\n')
fout.close()
print '\nWrote 2-OPT tours to: ', outfile, '\n'
def chunks(l, n):
# split a list into chunks of length n
return [l[i:i+n] for i in range(0, len(l), n)]
def shuffled_paths(node_list, path_len):
return [random.sample(path, len(path)) for path in chunks(node_list, path_len)]
def z_order_id(x,y):
# z-order id is interleaving of 16-bit binary strings of the int x & y coordinates
x, y = int(x), int(y)
sx = '{:016b}'.format(x)
sy = '{:016b}'.format(y)
return ''.join(i for j in zip(sx,sy) for i in j)
def z_order_paths(node_list, path_len):
# TourEngine.nodes = df of id, x, y
zpaths = []
for path in chunks(node_list, path_len):
xs = TourEngine.nodes['x'][path]
ys = TourEngine.nodes['y'][path]
z_node = [(z_order_id(x,y), node) for node, x,y in zip(path, xs, ys)]
z_order_nodes = [node for z, node in sorted(z_node)]
zpaths.append(z_order_nodes)
return zpaths
def bit_counter_iterator(nbits):
for bit_tuple in itertools.product((0,1), repeat=nbits):
yield bit_tuple
class PathLengthCache:
def __init__(self, tour_engine):
self.cache = {}
self.tour_engine = tour_engine
def path_length(self, path):
path = tuple(path)
#print "PathLengthCache.path_length():", path
#print "Min/max for path:", min(path),max(path)
if path not in self.cache:
self.cache[path] = self.tour_engine.external_path_length(path)
return self.cache[path]
def unchunk(lst_of_lsts):
# returns list of lists of nodes into a single concatenated list of nodes
return sum(lst_of_lsts, [])
def print_tour_len_from_chunks(t1c, t2c, f_tourlen):
tourlen1 = f_tourlen(unchunk(t1c))
tourlen2 = f_tourlen(unchunk(t2c))
print 'REAL LENGTHS:', 'tour1:', round(tourlen1), 'tour2:', round(tourlen2)
sys.stdout.flush()
def opt_merge_tours(tour1_nodes, tour2_nodes, chunk_length, f_tourlen):
assert len(tour1_nodes) == len(tour2_nodes), 'Tour lengths differ!'
t1c = chunks(tour1_nodes, chunk_length)
t2c = chunks(tour2_nodes, chunk_length)
n_chunks = len(t1c)
assert len(t1c) == len(t2c), 'Unequal number of tour chunks!'
passes = 0
last_gap = None
improving = True
while improving:
improving = False
for i, n in enumerate(random.sample(range(0,n_chunks), n_chunks)):
tourlen1_noswap = f_tourlen(unchunk(t1c))
tourlen2_noswap = f_tourlen(unchunk(t2c))
t1c[n], t2c[n] = t2c[n], t1c[n] # swap tour chunk n between tour
tourlen1_swap = f_tourlen(unchunk(t1c))
tourlen2_swap = f_tourlen(unchunk(t2c))
t1c[n], t2c[n] = t2c[n], t1c[n] # swap back
if abs(tourlen1_swap - tourlen2_swap) < \
abs(tourlen1_noswap - tourlen2_noswap):
t1c[n], t2c[n] = t2c[n], t1c[n] # swap tour chunk n between tours
improving = True
vals = (i, n, 'swap', int(tourlen1_swap), int(tourlen2_swap))
print '%4i chunk: %4i %6s tour1: %8i tour2: %8i' % vals
else:
vals = (i, n, '----', int(tourlen1_noswap), int(tourlen2_noswap))
print '%4i chunk: %4i %6s tour1: %8i tour2: %8i' % vals
sys.stdout.flush()
passes += 1
gap = abs(tourlen1_noswap - tourlen2_noswap)
if last_gap:
delta_gap = gap - last_gap
else:
delta_gap = 0
last_gap = gap
print '\npass:', passes,
print 'tour1_len:', round(tourlen1_noswap), 'tour2_len:', round(tourlen2_noswap),
print 'gap:', round(gap), 'delta_gap:', round(delta_gap)
print
sys.stdout.flush()
return unchunk(t1c), unchunk(t2c)
def merge_tours(tour1_nodes, tour2_nodes, chunk_length, tour_engine):
f_plen = tour_engine.external_path_length
f_tlen = tour_engine.external_tour_length
tour1_chunks = chunks(tour1_nodes, chunk_length)
tour2_chunks = chunks(tour2_nodes, chunk_length)
# TODO now just splices alternate chunks; shoud it merge more smartly?
t1,t2 = [],[]
for n, (chunk1, chunk2) in enumerate(zip(tour1_chunks, tour2_chunks)):
print 'chunk_path_lenths',n,'tour1:', int(f_plen(chunk1)), 'tour2:', int(f_plen(chunk2)),
# DEBUG
s1=set(chunk1)
s2=set(chunk2)
print "chunk_sets:",n,"lenIntersect:", len(s1.intersection(s2)), "lenUnion:", len(s1.union(s2))
if n % 2 == 0:
t1.extend(chunk1)
t2.extend(chunk2)
else:
t1.extend(chunk2)
t2.extend(chunk1)
#print 'cumulative tour lengths:', int(f_tlen(t1)), int(f_tlen(t2))
print 'merged tour lengths:',int(f_tlen(t1)), int(f_tlen(t2))
print "***start NONE check in merge_tours***"
for n,(a,b) in enumerate(zip(t1,t2)):
if a==None or b==None:
print n,a,b
print "***end NONE check in merge_tours***"
print "t1, t2 lengths:", len(t1),len(set(t1)), len(t2), len(set(t2))
sys.stdout.flush()
return t1, t2
def do_2opt_passes( tour1, tour2, updater, update_path_len, \
batch_size = BATCH_SIZE_FULLTOUR, \
min_pass_improvement = MIN_PASS_IMPROVEMENT_FULLTOUR):
lmax_last = int(max(tour1.get_tour_length(), tour2.get_tour_length()))
t0 = time.time()
nodes_processed = 0
pass_improvement = { 0:min_pass_improvement }
#num_nodes = max(len(tour1.starting_tour), len(tour2.starting_tour))
improving = True
while improving:
for _ in xrange(batch_size):
updater.next()
nodes_processed += 1
l1 = int(tour1.get_tour_length())
l2 = int(tour2.get_tour_length())
lmax = max(l1,l2)
improvement = lmax_last - lmax
lmax_last = lmax
pass_num = int(nodes_processed / update_path_len) + 1
if pass_num not in pass_improvement:
# started a new pass...
pass_improvement[pass_num] = 0
if (pass_improvement[pass_num - 1] < min_pass_improvement) and \
(pass_improvement[pass_num - 1] >= 0) :
improving = False
pass_improvement[pass_num] += improvement
print "pass:", pass_num, "nodes:", nodes_processed,
print "tour1:", l1, "tour2:",l2,
print "max:",lmax, "imp:",improvement,
print "pass_imp:", pass_improvement[pass_num],
print "secs:", int(time.time()-t0),
# print "edge counts:", tour1.get_edge_counts(), tour2.get_edge_counts()
ecounts = tour1.get_edge_counts()
print "edges:", ecounts
sys.stdout.flush()
#print "checking tour lengths"
#tour1_nodelist = tour1.get_current_tour_node_list()
#tour2_nodelist = tour2.get_current_tour_node_list()
#print "tour1_length", len(tour1_nodelist), 'uniq:',len(set(tour1_nodelist)),
#print "tour2_length", len(tour2_nodelist), 'uniq:',len(set(tour2_nodelist))
# print "FOR DEBUG -- aborting loop"
# break # TODO FOR DEBUGGING -- so remove thie!
print "\nRESULTS ",
print "passes:", pass_num, " nodes:", nodes_processed,
print " tour1:", l1, " tour2:",l2,
print " max:",lmax, " secs:", int(time.time()-t0),
print " 2edges:", ecounts
def main():
print "\n*** 2-OPT for Traveling Santa Problem ***\n"
tour_infile = sys.argv[1]
update_path_len = int(sys.argv[2])
tour_outfile = sys.argv[3]
print "Input tour file :", tour_infile
print "Update path length :", update_path_len
print "Output tour file :", tour_outfile
print
tour1_init, tour2_init = read_tours(tour_infile)
assert len(tour1_init)==len(tour2_init)
tour1 = TourEngine(tour1_init)
tour2 = TourEngine(tour2_init)
# cross-splice subtours
tour1_nodes = tour1.get_current_tour_node_list()
tour2_nodes = tour2.get_current_tour_node_list()
tour1_merged, tour2_merged = opt_merge_tours(tour1_nodes, tour2_nodes, update_path_len,
tour1.external_tour_length)
assert len(tour1_merged) == len(tour2_merged)
assert len(tour1_merged)==len(set(tour1_merged))
assert len(tour2_merged)==len(set(tour2_merged))
# do 2opt passes on entirity of both merged tours to improve links between subtours
tour1.clear_all_edges()
tour1 = TourEngine(tour1_merged)
tour2 = TourEngine(tour2_merged)
print '\nstarting 2opt\n'
updater = itertools.izip(tour1.tour_update_generator(),
tour2.tour_update_generator())
do_2opt_passes(tour1, tour2, updater, len(tour1_merged),
batch_size=BATCH_SIZE_FULLTOUR,
min_pass_improvement=MIN_PASS_IMPROVEMENT_FULLTOUR)
# write results
tour1_nodelist = tour1.get_current_tour_node_list()
tour2_nodelist = tour2.get_current_tour_node_list()
write_tours(tour1_nodelist, tour2_nodelist, tour_outfile)
print "Done.\n"
def TEST_do_swap():
l = [0,4,7,8,9,1,2,3,5,6]
print "node list :", l
nnl = to_next_node_list(l)
print "next node list before swap:", nnl
do_swap(nnl, 2,3,7,8)
print "next node list after swap:", nnl
print
print "node list :", l
print "node list after swap :", to_node_list(nnl, 0)
if __name__ == '__main__':
main()
| do_swap | identifier_name |
subtour2opt.py | from itertools import izip
import cPickle
import math
import numpy
import random
import sys
import pandas
import itertools
import time
CITIES_NEIGHBORS_FILE = '/home/chefele/kaggle/Santa/data/santa_cities_nndists.pkl'
CITIES_INFO_FILE = '/home/chefele/kaggle/Santa/download/santa_cities.csv'
MIN_PASS_IMPROVEMENT_FULLTOUR = 1000 # must be >0; 1000 gets ~99% of possible improvement with this data
MIN_PASS_IMPROVEMENT_SUBTOUR = 50
FAST_SEARCH = True
BATCH_SIZE_FULLTOUR = 1000 # Num nodes to process between prints of status updates
BATCH_SIZE_SUBTOUR = 1000 # Num nodes to process between prints of status updates
MAX_DISTANCE = 40000 # 20K x 20K image, so need any value >sqrt(2)*20K
RANDOM_SEED = 1234567
random.seed(RANDOM_SEED)
class TourEngine:
neighbors = [] # read only, node nearest neighbors' info: id,dist
nodes = [] # dataframe of node id,x,y
edges = {} # read/write, dict of counts of all edges in all graphs/tours
# edges is read/write, hash table of counts of all edges in all graphs/tours
# edges = numpy.zeros(EDGE_HASH_TABLE_SIZE, int)
def __init__(self, starting_tour, load_neighbors=True):
# read only; starting tour of nodes
self.starting_tour = starting_tour
# *** read/write; pointers to next node in this tour's path
# that is, next_node[node_id] = next_node_id
self.next_node = self.to_next_node_list(starting_tour)
# add all edges in the starting tour to the common edges set
for node in xrange(len(self.next_node)):
self.edge_add(node, self.next_node[node])
if len(TourEngine.neighbors) == 0 and load_neighbors:
TourEngine.neighbors = self.get_neighbors()
if len(TourEngine.nodes) == 0:
TourEngine.nodes = self.get_nodes()
# for speed, convert to numpy arrays (pandas dataframe access much slower)
self.nodes_x = numpy.array(TourEngine.nodes['x'])
self.nodes_y = numpy.array(TourEngine.nodes['y'])
def clear_all_edges(self):
TourEngine.edges = {}
def get_neighbors(self):
print "Loading nearest neighbor distances from:", CITIES_NEIGHBORS_FILE,"...",
sys.stdout.flush()
fin = open(CITIES_NEIGHBORS_FILE, 'rb')
neighbors = cPickle.load(fin)
fin.close()
print "Done."
sys.stdout.flush()
return neighbors # list of dataframes, one per city
def get_nodes(self):
print "Loading node data from:", CITIES_INFO_FILE,"...",
sys.stdout.flush()
nodes = pandas.read_csv(CITIES_INFO_FILE)
print "Done."
sys.stdout.flush()
return nodes
def to_next_node_list(self, node_lst): # node list -> next_node_list
# NOTE asserts below doesn't work with subpaths (only with all nodes)
# assert len(node_lst) == len(set(node_lst)), 'Duplicate nodes!'
# assert len(node_lst) == max(node_lst)+1, 'Missing nodes!'
next_node_lst = [None] * (max(node_lst)+1)
# link the nodes in the body of the list
for cur_node, next_node in izip(node_lst[:-1], node_lst[1:]):
next_node_lst[cur_node] = next_node
# now link the end of the list to the start
cur_node, next_node = node_lst[-1], node_lst[0]
next_node_lst[cur_node] = next_node
return next_node_lst
def to_node_list(self, next_node_list, node_start):
node_list = [node_start]
node = next_node_list[node_start]
while node != node_start:
node_list.append(node)
node = next_node_list[node]
return node_list
def get_current_tour_node_list(self):
return self.to_node_list(self.next_node, self.starting_tour[0])
def dist(self, node1, node2):
dx = self.nodes_x[node1] - self.nodes_x[node2]
dy = self.nodes_y[node1] - self.nodes_y[node2]
edge_dist = math.sqrt(dx*dx + dy*dy)
return edge_dist
def edge_hash(self, node1, node2):
return hash( ( min(node1,node2), max(node1,node2) ) ) % EDGE_HASH_TABLE_SIZE
def edge_exists(self, node1, node2):
# return TourEngine.edges[self.edge_hash(node1,node2)] > 0
return (node1,node2) in TourEngine.edges or \
(node2,node1) in TourEngine.edges
def edge_multiple_exists(self, node1, node2):
# return TourEngine.edges[self.edge_hash(node1,node2)] > 1
return TourEngine.edges.get((node1,node2), 0) > 1 or \
TourEngine.edges.get((node2,node1), 0) > 1
def edge_add(self, node1, node2):
# TourEngine.edges[self.edge_hash(node1,node2)] += 1
e12 = (node1,node2)
e21 = (node2,node1)
TourEngine.edges[e12] = TourEngine.edges.setdefault(e12, 0) + 1
TourEngine.edges[e21] = TourEngine.edges.setdefault(e21, 0) + 1
def edge_delete(self, node1, node2):
# TourEngine.edges[self.edge_hash(node1,node2)] -= 1
e12 = (node1,node2)
e21 = (node2,node1)
edges = TourEngine.edges
assert edges[e12]>0 and edges[e21]>0, "Deleting nonexistant edge"
edges[e12] -= 1
edges[e21] -= 1
if edges[e12] == 0:
del edges[e12]
if edges[e21] == 0:
del edges[e21]
# TourEngine.edges.remove( (node1, node2) )
# TourEngine.edges.remove( (node2, node1) )
def tour_update_generator(self, update_path=None):
if update_path == None:
update_path = self.starting_tour
update_path_nodes = set(update_path)
# infinite sequence of nodes in random order to update
node_generator = itertools.cycle(random.sample(update_path, len(update_path)))
for node1 in node_generator:
node2 = self.next_node[node1]
assert node1 != node2
dist12 = self.dist(node1, node2) # original edge
swap_found = False
if self.edge_multiple_exists(node1, node2):
best_dist_diff = MAX_DISTANCE # forces a swap
else:
best_dist_diff = 0 # only swaps if tot dist improves
for node3 in TourEngine.neighbors[node1].id: # sorted by distance
node4 = self.next_node[node3]
if len(set((node1,node2,node3,node4)))<4: # skip overlaps
continue
if (node1 not in update_path_nodes) or (node2 not in update_path_nodes) or \
(node3 not in update_path_nodes) or (node4 not in update_path_nodes):
continue # only do 'in-region' updates along update path
dist34 = self.dist(node3, node4) # original edge
dist13 = self.dist(node1, node3) # proposed swap edge
dist24 = self.dist(node2, node4) # proposed swap edge
dist_diff = (dist13 + dist24) - (dist12 + dist34)
if dist_diff < best_dist_diff and \
not self.edge_exists(node1, node3) and \
not self.edge_exists(node2, node4):
swap_found = True
best_dist_diff, best_node3, best_node4 = dist_diff, node3, node4
# TODO for max accuracy, remove the if & break below?
if (dist13 > dist12) and FAST_SEARCH: # Much faster (3x-10x), but path length is ~1% more
break
# note the symmetry of test, since the outer node2 loop will
# eventually hit the other points
if swap_found:
self.do_swap(node1, node2, best_node3, best_node4)
self.edge_delete(node1, node2)
self.edge_delete(best_node3, best_node4)
self.edge_add(node1, best_node3)
self.edge_add(node2, best_node4)
yield True # NOTE this functionis a generator!
def do_swap(self, node1, node2, node3, node4):
# does a swap, updating self.next_node array in-place
assert node2 == self.next_node[node1], 'node2 does not follow node1 in tour'
assert node4 == self.next_node[node3], 'node4 does not follow node3 in tour'
self.next_node[node1] = node3
self.reverse_path(node2, node4)
self.next_node[node2] = node4
def reverse_path(self, head, tail):
# reverses a path in self.next_node array in-place (like reversing a linked list)
previous = None
node = head
while node != tail:
temp = self.next_node[node]
self.next_node[node] = previous
previous = node
node = temp
def get_current_tour(self):
return self.to_node_list(self.next_node, self.starting_tour[0])
def get_length_common(self, next_node):
return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node) if n2!=None))
def get_tour_length(self):
# n1 = node, n2 = next_node[n1]
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(self.next_node)))
return self.get_length_common(self.next_node)
def external_tour_length(self, node_list):
next_node = self.to_next_node_list(node_list)
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node)))
return self.get_length_common(next_node)
def external_path_length(self, node_list):
next_node = self.to_next_node_list(node_list)
return self.get_length_common(next_node[:-1])
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node[:-1])))
def get_edge_counts(self):
# return (TourEngine.edges.min(), TourEngine.edges.max())
edges = TourEngine.edges
ecounts = {}
for e in edges:
ecounts[edges[e]] = ecounts.setdefault(edges[e],0) + 1
return [(cnt, ecounts[cnt]) for cnt in sorted(ecounts)]
def read_tours(tour_file):
df = pandas.read_csv(tour_file)
tour1, tour2 = list(df['path1']), list(df['path2'])
return (tour1, tour2)
def write_tours(tour1, tour2, outfile):
fout = open(outfile, 'w')
fout.write('path1,path2\n')
lines = [str(n1)+','+str(n2) for n1, n2 in izip(tour1, tour2)]
for line in lines:
fout.write(line+'\n')
fout.close()
print '\nWrote 2-OPT tours to: ', outfile, '\n'
def chunks(l, n):
# split a list into chunks of length n
return [l[i:i+n] for i in range(0, len(l), n)]
def shuffled_paths(node_list, path_len):
return [random.sample(path, len(path)) for path in chunks(node_list, path_len)]
def z_order_id(x,y):
# z-order id is interleaving of 16-bit binary strings of the int x & y coordinates
x, y = int(x), int(y)
sx = '{:016b}'.format(x)
sy = '{:016b}'.format(y)
return ''.join(i for j in zip(sx,sy) for i in j)
def z_order_paths(node_list, path_len):
# TourEngine.nodes = df of id, x, y
zpaths = []
for path in chunks(node_list, path_len):
xs = TourEngine.nodes['x'][path]
ys = TourEngine.nodes['y'][path]
z_node = [(z_order_id(x,y), node) for node, x,y in zip(path, xs, ys)]
z_order_nodes = [node for z, node in sorted(z_node)]
zpaths.append(z_order_nodes)
return zpaths
def bit_counter_iterator(nbits):
for bit_tuple in itertools.product((0,1), repeat=nbits):
yield bit_tuple
class PathLengthCache:
def __init__(self, tour_engine):
self.cache = {}
self.tour_engine = tour_engine
def path_length(self, path):
path = tuple(path)
#print "PathLengthCache.path_length():", path
#print "Min/max for path:", min(path),max(path)
if path not in self.cache:
self.cache[path] = self.tour_engine.external_path_length(path)
return self.cache[path]
def unchunk(lst_of_lsts):
# returns list of lists of nodes into a single concatenated list of nodes
return sum(lst_of_lsts, [])
def print_tour_len_from_chunks(t1c, t2c, f_tourlen):
tourlen1 = f_tourlen(unchunk(t1c))
tourlen2 = f_tourlen(unchunk(t2c))
print 'REAL LENGTHS:', 'tour1:', round(tourlen1), 'tour2:', round(tourlen2)
sys.stdout.flush()
def opt_merge_tours(tour1_nodes, tour2_nodes, chunk_length, f_tourlen):
assert len(tour1_nodes) == len(tour2_nodes), 'Tour lengths differ!'
t1c = chunks(tour1_nodes, chunk_length)
t2c = chunks(tour2_nodes, chunk_length)
n_chunks = len(t1c)
assert len(t1c) == len(t2c), 'Unequal number of tour chunks!'
passes = 0
last_gap = None
improving = True
while improving:
improving = False
for i, n in enumerate(random.sample(range(0,n_chunks), n_chunks)):
tourlen1_noswap = f_tourlen(unchunk(t1c))
tourlen2_noswap = f_tourlen(unchunk(t2c))
t1c[n], t2c[n] = t2c[n], t1c[n] # swap tour chunk n between tour
tourlen1_swap = f_tourlen(unchunk(t1c))
tourlen2_swap = f_tourlen(unchunk(t2c))
t1c[n], t2c[n] = t2c[n], t1c[n] # swap back
if abs(tourlen1_swap - tourlen2_swap) < \
abs(tourlen1_noswap - tourlen2_noswap):
t1c[n], t2c[n] = t2c[n], t1c[n] # swap tour chunk n between tours
improving = True
vals = (i, n, 'swap', int(tourlen1_swap), int(tourlen2_swap))
print '%4i chunk: %4i %6s tour1: %8i tour2: %8i' % vals
else:
vals = (i, n, '----', int(tourlen1_noswap), int(tourlen2_noswap))
print '%4i chunk: %4i %6s tour1: %8i tour2: %8i' % vals
sys.stdout.flush()
passes += 1
gap = abs(tourlen1_noswap - tourlen2_noswap)
if last_gap:
delta_gap = gap - last_gap
else:
delta_gap = 0
last_gap = gap
print '\npass:', passes,
print 'tour1_len:', round(tourlen1_noswap), 'tour2_len:', round(tourlen2_noswap),
print 'gap:', round(gap), 'delta_gap:', round(delta_gap)
print
sys.stdout.flush()
return unchunk(t1c), unchunk(t2c)
def merge_tours(tour1_nodes, tour2_nodes, chunk_length, tour_engine):
f_plen = tour_engine.external_path_length
f_tlen = tour_engine.external_tour_length
tour1_chunks = chunks(tour1_nodes, chunk_length)
tour2_chunks = chunks(tour2_nodes, chunk_length)
# TODO now just splices alternate chunks; shoud it merge more smartly?
t1,t2 = [],[]
for n, (chunk1, chunk2) in enumerate(zip(tour1_chunks, tour2_chunks)):
print 'chunk_path_lenths',n,'tour1:', int(f_plen(chunk1)), 'tour2:', int(f_plen(chunk2)),
# DEBUG
s1=set(chunk1)
s2=set(chunk2)
print "chunk_sets:",n,"lenIntersect:", len(s1.intersection(s2)), "lenUnion:", len(s1.union(s2))
if n % 2 == 0:
t1.extend(chunk1)
t2.extend(chunk2)
else:
t1.extend(chunk2)
t2.extend(chunk1)
#print 'cumulative tour lengths:', int(f_tlen(t1)), int(f_tlen(t2))
print 'merged tour lengths:',int(f_tlen(t1)), int(f_tlen(t2))
print "***start NONE check in merge_tours***"
for n,(a,b) in enumerate(zip(t1,t2)):
if a==None or b==None:
print n,a,b
print "***end NONE check in merge_tours***"
print "t1, t2 lengths:", len(t1),len(set(t1)), len(t2), len(set(t2))
sys.stdout.flush()
return t1, t2
def do_2opt_passes( tour1, tour2, updater, update_path_len, \
batch_size = BATCH_SIZE_FULLTOUR, \
min_pass_improvement = MIN_PASS_IMPROVEMENT_FULLTOUR):
lmax_last = int(max(tour1.get_tour_length(), tour2.get_tour_length()))
t0 = time.time()
nodes_processed = 0
pass_improvement = { 0:min_pass_improvement }
#num_nodes = max(len(tour1.starting_tour), len(tour2.starting_tour))
improving = True
while improving:
for _ in xrange(batch_size):
updater.next()
nodes_processed += 1
l1 = int(tour1.get_tour_length())
l2 = int(tour2.get_tour_length())
lmax = max(l1,l2)
improvement = lmax_last - lmax
lmax_last = lmax
pass_num = int(nodes_processed / update_path_len) + 1
if pass_num not in pass_improvement:
# started a new pass...
pass_improvement[pass_num] = 0
if (pass_improvement[pass_num - 1] < min_pass_improvement) and \
(pass_improvement[pass_num - 1] >= 0) :
|
pass_improvement[pass_num] += improvement
print "pass:", pass_num, "nodes:", nodes_processed,
print "tour1:", l1, "tour2:",l2,
print "max:",lmax, "imp:",improvement,
print "pass_imp:", pass_improvement[pass_num],
print "secs:", int(time.time()-t0),
# print "edge counts:", tour1.get_edge_counts(), tour2.get_edge_counts()
ecounts = tour1.get_edge_counts()
print "edges:", ecounts
sys.stdout.flush()
#print "checking tour lengths"
#tour1_nodelist = tour1.get_current_tour_node_list()
#tour2_nodelist = tour2.get_current_tour_node_list()
#print "tour1_length", len(tour1_nodelist), 'uniq:',len(set(tour1_nodelist)),
#print "tour2_length", len(tour2_nodelist), 'uniq:',len(set(tour2_nodelist))
# print "FOR DEBUG -- aborting loop"
# break # TODO FOR DEBUGGING -- so remove thie!
print "\nRESULTS ",
print "passes:", pass_num, " nodes:", nodes_processed,
print " tour1:", l1, " tour2:",l2,
print " max:",lmax, " secs:", int(time.time()-t0),
print " 2edges:", ecounts
def main():
print "\n*** 2-OPT for Traveling Santa Problem ***\n"
tour_infile = sys.argv[1]
update_path_len = int(sys.argv[2])
tour_outfile = sys.argv[3]
print "Input tour file :", tour_infile
print "Update path length :", update_path_len
print "Output tour file :", tour_outfile
print
tour1_init, tour2_init = read_tours(tour_infile)
assert len(tour1_init)==len(tour2_init)
tour1 = TourEngine(tour1_init)
tour2 = TourEngine(tour2_init)
# cross-splice subtours
tour1_nodes = tour1.get_current_tour_node_list()
tour2_nodes = tour2.get_current_tour_node_list()
tour1_merged, tour2_merged = opt_merge_tours(tour1_nodes, tour2_nodes, update_path_len,
tour1.external_tour_length)
assert len(tour1_merged) == len(tour2_merged)
assert len(tour1_merged)==len(set(tour1_merged))
assert len(tour2_merged)==len(set(tour2_merged))
# do 2opt passes on entirity of both merged tours to improve links between subtours
tour1.clear_all_edges()
tour1 = TourEngine(tour1_merged)
tour2 = TourEngine(tour2_merged)
print '\nstarting 2opt\n'
updater = itertools.izip(tour1.tour_update_generator(),
tour2.tour_update_generator())
do_2opt_passes(tour1, tour2, updater, len(tour1_merged),
batch_size=BATCH_SIZE_FULLTOUR,
min_pass_improvement=MIN_PASS_IMPROVEMENT_FULLTOUR)
# write results
tour1_nodelist = tour1.get_current_tour_node_list()
tour2_nodelist = tour2.get_current_tour_node_list()
write_tours(tour1_nodelist, tour2_nodelist, tour_outfile)
print "Done.\n"
def TEST_do_swap():
l = [0,4,7,8,9,1,2,3,5,6]
print "node list :", l
nnl = to_next_node_list(l)
print "next node list before swap:", nnl
do_swap(nnl, 2,3,7,8)
print "next node list after swap:", nnl
print
print "node list :", l
print "node list after swap :", to_node_list(nnl, 0)
if __name__ == '__main__':
main()
| improving = False | conditional_block |
subtour2opt.py | from itertools import izip
import cPickle
import math
import numpy
import random
import sys
import pandas
import itertools
import time
CITIES_NEIGHBORS_FILE = '/home/chefele/kaggle/Santa/data/santa_cities_nndists.pkl'
CITIES_INFO_FILE = '/home/chefele/kaggle/Santa/download/santa_cities.csv'
MIN_PASS_IMPROVEMENT_FULLTOUR = 1000 # must be >0; 1000 gets ~99% of possible improvement with this data
MIN_PASS_IMPROVEMENT_SUBTOUR = 50
FAST_SEARCH = True
BATCH_SIZE_FULLTOUR = 1000 # Num nodes to process between prints of status updates
BATCH_SIZE_SUBTOUR = 1000 # Num nodes to process between prints of status updates
MAX_DISTANCE = 40000 # 20K x 20K image, so need any value >sqrt(2)*20K
RANDOM_SEED = 1234567
random.seed(RANDOM_SEED)
class TourEngine:
neighbors = [] # read only, node nearest neighbors' info: id,dist
nodes = [] # dataframe of node id,x,y
edges = {} # read/write, dict of counts of all edges in all graphs/tours
# edges is read/write, hash table of counts of all edges in all graphs/tours
# edges = numpy.zeros(EDGE_HASH_TABLE_SIZE, int)
def __init__(self, starting_tour, load_neighbors=True):
# read only; starting tour of nodes
self.starting_tour = starting_tour
# *** read/write; pointers to next node in this tour's path
# that is, next_node[node_id] = next_node_id
self.next_node = self.to_next_node_list(starting_tour)
# add all edges in the starting tour to the common edges set
for node in xrange(len(self.next_node)):
self.edge_add(node, self.next_node[node])
if len(TourEngine.neighbors) == 0 and load_neighbors:
TourEngine.neighbors = self.get_neighbors()
if len(TourEngine.nodes) == 0:
TourEngine.nodes = self.get_nodes()
# for speed, convert to numpy arrays (pandas dataframe access much slower)
self.nodes_x = numpy.array(TourEngine.nodes['x'])
self.nodes_y = numpy.array(TourEngine.nodes['y'])
def clear_all_edges(self):
TourEngine.edges = {}
def get_neighbors(self):
print "Loading nearest neighbor distances from:", CITIES_NEIGHBORS_FILE,"...",
sys.stdout.flush()
fin = open(CITIES_NEIGHBORS_FILE, 'rb')
neighbors = cPickle.load(fin)
fin.close()
print "Done."
sys.stdout.flush()
return neighbors # list of dataframes, one per city
def get_nodes(self):
print "Loading node data from:", CITIES_INFO_FILE,"...",
sys.stdout.flush()
nodes = pandas.read_csv(CITIES_INFO_FILE)
print "Done."
sys.stdout.flush()
return nodes
def to_next_node_list(self, node_lst): # node list -> next_node_list
# NOTE asserts below doesn't work with subpaths (only with all nodes)
# assert len(node_lst) == len(set(node_lst)), 'Duplicate nodes!'
# assert len(node_lst) == max(node_lst)+1, 'Missing nodes!'
next_node_lst = [None] * (max(node_lst)+1)
# link the nodes in the body of the list
for cur_node, next_node in izip(node_lst[:-1], node_lst[1:]):
next_node_lst[cur_node] = next_node
# now link the end of the list to the start
cur_node, next_node = node_lst[-1], node_lst[0]
next_node_lst[cur_node] = next_node
return next_node_lst
def to_node_list(self, next_node_list, node_start):
node_list = [node_start]
node = next_node_list[node_start]
while node != node_start:
node_list.append(node)
node = next_node_list[node]
return node_list
def get_current_tour_node_list(self):
return self.to_node_list(self.next_node, self.starting_tour[0])
def dist(self, node1, node2):
dx = self.nodes_x[node1] - self.nodes_x[node2]
dy = self.nodes_y[node1] - self.nodes_y[node2]
edge_dist = math.sqrt(dx*dx + dy*dy)
return edge_dist
def edge_hash(self, node1, node2):
return hash( ( min(node1,node2), max(node1,node2) ) ) % EDGE_HASH_TABLE_SIZE
def edge_exists(self, node1, node2):
# return TourEngine.edges[self.edge_hash(node1,node2)] > 0
return (node1,node2) in TourEngine.edges or \
(node2,node1) in TourEngine.edges
def edge_multiple_exists(self, node1, node2):
# return TourEngine.edges[self.edge_hash(node1,node2)] > 1
return TourEngine.edges.get((node1,node2), 0) > 1 or \
TourEngine.edges.get((node2,node1), 0) > 1
def edge_add(self, node1, node2):
# TourEngine.edges[self.edge_hash(node1,node2)] += 1
e12 = (node1,node2)
e21 = (node2,node1)
TourEngine.edges[e12] = TourEngine.edges.setdefault(e12, 0) + 1
TourEngine.edges[e21] = TourEngine.edges.setdefault(e21, 0) + 1
def edge_delete(self, node1, node2):
# TourEngine.edges[self.edge_hash(node1,node2)] -= 1
e12 = (node1,node2)
e21 = (node2,node1)
edges = TourEngine.edges
assert edges[e12]>0 and edges[e21]>0, "Deleting nonexistant edge"
edges[e12] -= 1
edges[e21] -= 1
if edges[e12] == 0:
del edges[e12]
if edges[e21] == 0:
del edges[e21]
# TourEngine.edges.remove( (node1, node2) )
# TourEngine.edges.remove( (node2, node1) )
def tour_update_generator(self, update_path=None):
if update_path == None:
update_path = self.starting_tour
update_path_nodes = set(update_path)
# infinite sequence of nodes in random order to update
node_generator = itertools.cycle(random.sample(update_path, len(update_path)))
for node1 in node_generator:
node2 = self.next_node[node1]
assert node1 != node2
dist12 = self.dist(node1, node2) # original edge
swap_found = False
if self.edge_multiple_exists(node1, node2):
best_dist_diff = MAX_DISTANCE # forces a swap
else:
best_dist_diff = 0 # only swaps if tot dist improves
for node3 in TourEngine.neighbors[node1].id: # sorted by distance
node4 = self.next_node[node3]
if len(set((node1,node2,node3,node4)))<4: # skip overlaps
continue
if (node1 not in update_path_nodes) or (node2 not in update_path_nodes) or \
(node3 not in update_path_nodes) or (node4 not in update_path_nodes):
continue # only do 'in-region' updates along update path
dist34 = self.dist(node3, node4) # original edge
dist13 = self.dist(node1, node3) # proposed swap edge
dist24 = self.dist(node2, node4) # proposed swap edge
dist_diff = (dist13 + dist24) - (dist12 + dist34)
if dist_diff < best_dist_diff and \
not self.edge_exists(node1, node3) and \
not self.edge_exists(node2, node4):
swap_found = True
best_dist_diff, best_node3, best_node4 = dist_diff, node3, node4
# TODO for max accuracy, remove the if & break below?
if (dist13 > dist12) and FAST_SEARCH: # Much faster (3x-10x), but path length is ~1% more
break
# note the symmetry of test, since the outer node2 loop will
# eventually hit the other points
if swap_found:
self.do_swap(node1, node2, best_node3, best_node4)
self.edge_delete(node1, node2)
self.edge_delete(best_node3, best_node4)
self.edge_add(node1, best_node3)
self.edge_add(node2, best_node4)
yield True # NOTE this functionis a generator!
def do_swap(self, node1, node2, node3, node4):
# does a swap, updating self.next_node array in-place
assert node2 == self.next_node[node1], 'node2 does not follow node1 in tour'
assert node4 == self.next_node[node3], 'node4 does not follow node3 in tour'
self.next_node[node1] = node3
self.reverse_path(node2, node4)
self.next_node[node2] = node4
def reverse_path(self, head, tail):
# reverses a path in self.next_node array in-place (like reversing a linked list)
previous = None
node = head
while node != tail:
temp = self.next_node[node]
self.next_node[node] = previous
previous = node
node = temp
def get_current_tour(self):
return self.to_node_list(self.next_node, self.starting_tour[0])
def get_length_common(self, next_node):
return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node) if n2!=None))
def get_tour_length(self):
# n1 = node, n2 = next_node[n1]
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(self.next_node)))
return self.get_length_common(self.next_node)
def external_tour_length(self, node_list):
next_node = self.to_next_node_list(node_list)
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node)))
return self.get_length_common(next_node)
def external_path_length(self, node_list):
next_node = self.to_next_node_list(node_list)
return self.get_length_common(next_node[:-1])
# return sum(( self.dist(n1,n2) for n1, n2 in enumerate(next_node[:-1])))
def get_edge_counts(self):
# return (TourEngine.edges.min(), TourEngine.edges.max())
edges = TourEngine.edges
ecounts = {}
for e in edges:
ecounts[edges[e]] = ecounts.setdefault(edges[e],0) + 1
return [(cnt, ecounts[cnt]) for cnt in sorted(ecounts)]
def read_tours(tour_file):
|
def write_tours(tour1, tour2, outfile):
fout = open(outfile, 'w')
fout.write('path1,path2\n')
lines = [str(n1)+','+str(n2) for n1, n2 in izip(tour1, tour2)]
for line in lines:
fout.write(line+'\n')
fout.close()
print '\nWrote 2-OPT tours to: ', outfile, '\n'
def chunks(l, n):
# split a list into chunks of length n
return [l[i:i+n] for i in range(0, len(l), n)]
def shuffled_paths(node_list, path_len):
return [random.sample(path, len(path)) for path in chunks(node_list, path_len)]
def z_order_id(x,y):
# z-order id is interleaving of 16-bit binary strings of the int x & y coordinates
x, y = int(x), int(y)
sx = '{:016b}'.format(x)
sy = '{:016b}'.format(y)
return ''.join(i for j in zip(sx,sy) for i in j)
def z_order_paths(node_list, path_len):
# TourEngine.nodes = df of id, x, y
zpaths = []
for path in chunks(node_list, path_len):
xs = TourEngine.nodes['x'][path]
ys = TourEngine.nodes['y'][path]
z_node = [(z_order_id(x,y), node) for node, x,y in zip(path, xs, ys)]
z_order_nodes = [node for z, node in sorted(z_node)]
zpaths.append(z_order_nodes)
return zpaths
def bit_counter_iterator(nbits):
for bit_tuple in itertools.product((0,1), repeat=nbits):
yield bit_tuple
class PathLengthCache:
def __init__(self, tour_engine):
self.cache = {}
self.tour_engine = tour_engine
def path_length(self, path):
path = tuple(path)
#print "PathLengthCache.path_length():", path
#print "Min/max for path:", min(path),max(path)
if path not in self.cache:
self.cache[path] = self.tour_engine.external_path_length(path)
return self.cache[path]
def unchunk(lst_of_lsts):
# returns list of lists of nodes into a single concatenated list of nodes
return sum(lst_of_lsts, [])
def print_tour_len_from_chunks(t1c, t2c, f_tourlen):
tourlen1 = f_tourlen(unchunk(t1c))
tourlen2 = f_tourlen(unchunk(t2c))
print 'REAL LENGTHS:', 'tour1:', round(tourlen1), 'tour2:', round(tourlen2)
sys.stdout.flush()
def opt_merge_tours(tour1_nodes, tour2_nodes, chunk_length, f_tourlen):
assert len(tour1_nodes) == len(tour2_nodes), 'Tour lengths differ!'
t1c = chunks(tour1_nodes, chunk_length)
t2c = chunks(tour2_nodes, chunk_length)
n_chunks = len(t1c)
assert len(t1c) == len(t2c), 'Unequal number of tour chunks!'
passes = 0
last_gap = None
improving = True
while improving:
improving = False
for i, n in enumerate(random.sample(range(0,n_chunks), n_chunks)):
tourlen1_noswap = f_tourlen(unchunk(t1c))
tourlen2_noswap = f_tourlen(unchunk(t2c))
t1c[n], t2c[n] = t2c[n], t1c[n] # swap tour chunk n between tour
tourlen1_swap = f_tourlen(unchunk(t1c))
tourlen2_swap = f_tourlen(unchunk(t2c))
t1c[n], t2c[n] = t2c[n], t1c[n] # swap back
if abs(tourlen1_swap - tourlen2_swap) < \
abs(tourlen1_noswap - tourlen2_noswap):
t1c[n], t2c[n] = t2c[n], t1c[n] # swap tour chunk n between tours
improving = True
vals = (i, n, 'swap', int(tourlen1_swap), int(tourlen2_swap))
print '%4i chunk: %4i %6s tour1: %8i tour2: %8i' % vals
else:
vals = (i, n, '----', int(tourlen1_noswap), int(tourlen2_noswap))
print '%4i chunk: %4i %6s tour1: %8i tour2: %8i' % vals
sys.stdout.flush()
passes += 1
gap = abs(tourlen1_noswap - tourlen2_noswap)
if last_gap:
delta_gap = gap - last_gap
else:
delta_gap = 0
last_gap = gap
print '\npass:', passes,
print 'tour1_len:', round(tourlen1_noswap), 'tour2_len:', round(tourlen2_noswap),
print 'gap:', round(gap), 'delta_gap:', round(delta_gap)
print
sys.stdout.flush()
return unchunk(t1c), unchunk(t2c)
def merge_tours(tour1_nodes, tour2_nodes, chunk_length, tour_engine):
f_plen = tour_engine.external_path_length
f_tlen = tour_engine.external_tour_length
tour1_chunks = chunks(tour1_nodes, chunk_length)
tour2_chunks = chunks(tour2_nodes, chunk_length)
# TODO now just splices alternate chunks; shoud it merge more smartly?
t1,t2 = [],[]
for n, (chunk1, chunk2) in enumerate(zip(tour1_chunks, tour2_chunks)):
print 'chunk_path_lenths',n,'tour1:', int(f_plen(chunk1)), 'tour2:', int(f_plen(chunk2)),
# DEBUG
s1=set(chunk1)
s2=set(chunk2)
print "chunk_sets:",n,"lenIntersect:", len(s1.intersection(s2)), "lenUnion:", len(s1.union(s2))
if n % 2 == 0:
t1.extend(chunk1)
t2.extend(chunk2)
else:
t1.extend(chunk2)
t2.extend(chunk1)
#print 'cumulative tour lengths:', int(f_tlen(t1)), int(f_tlen(t2))
print 'merged tour lengths:',int(f_tlen(t1)), int(f_tlen(t2))
print "***start NONE check in merge_tours***"
for n,(a,b) in enumerate(zip(t1,t2)):
if a==None or b==None:
print n,a,b
print "***end NONE check in merge_tours***"
print "t1, t2 lengths:", len(t1),len(set(t1)), len(t2), len(set(t2))
sys.stdout.flush()
return t1, t2
def do_2opt_passes( tour1, tour2, updater, update_path_len, \
batch_size = BATCH_SIZE_FULLTOUR, \
min_pass_improvement = MIN_PASS_IMPROVEMENT_FULLTOUR):
lmax_last = int(max(tour1.get_tour_length(), tour2.get_tour_length()))
t0 = time.time()
nodes_processed = 0
pass_improvement = { 0:min_pass_improvement }
#num_nodes = max(len(tour1.starting_tour), len(tour2.starting_tour))
improving = True
while improving:
for _ in xrange(batch_size):
updater.next()
nodes_processed += 1
l1 = int(tour1.get_tour_length())
l2 = int(tour2.get_tour_length())
lmax = max(l1,l2)
improvement = lmax_last - lmax
lmax_last = lmax
pass_num = int(nodes_processed / update_path_len) + 1
if pass_num not in pass_improvement:
# started a new pass...
pass_improvement[pass_num] = 0
if (pass_improvement[pass_num - 1] < min_pass_improvement) and \
(pass_improvement[pass_num - 1] >= 0) :
improving = False
pass_improvement[pass_num] += improvement
print "pass:", pass_num, "nodes:", nodes_processed,
print "tour1:", l1, "tour2:",l2,
print "max:",lmax, "imp:",improvement,
print "pass_imp:", pass_improvement[pass_num],
print "secs:", int(time.time()-t0),
# print "edge counts:", tour1.get_edge_counts(), tour2.get_edge_counts()
ecounts = tour1.get_edge_counts()
print "edges:", ecounts
sys.stdout.flush()
#print "checking tour lengths"
#tour1_nodelist = tour1.get_current_tour_node_list()
#tour2_nodelist = tour2.get_current_tour_node_list()
#print "tour1_length", len(tour1_nodelist), 'uniq:',len(set(tour1_nodelist)),
#print "tour2_length", len(tour2_nodelist), 'uniq:',len(set(tour2_nodelist))
# print "FOR DEBUG -- aborting loop"
# break # TODO FOR DEBUGGING -- so remove thie!
print "\nRESULTS ",
print "passes:", pass_num, " nodes:", nodes_processed,
print " tour1:", l1, " tour2:",l2,
print " max:",lmax, " secs:", int(time.time()-t0),
print " 2edges:", ecounts
def main():
print "\n*** 2-OPT for Traveling Santa Problem ***\n"
tour_infile = sys.argv[1]
update_path_len = int(sys.argv[2])
tour_outfile = sys.argv[3]
print "Input tour file :", tour_infile
print "Update path length :", update_path_len
print "Output tour file :", tour_outfile
print
tour1_init, tour2_init = read_tours(tour_infile)
assert len(tour1_init)==len(tour2_init)
tour1 = TourEngine(tour1_init)
tour2 = TourEngine(tour2_init)
# cross-splice subtours
tour1_nodes = tour1.get_current_tour_node_list()
tour2_nodes = tour2.get_current_tour_node_list()
tour1_merged, tour2_merged = opt_merge_tours(tour1_nodes, tour2_nodes, update_path_len,
tour1.external_tour_length)
assert len(tour1_merged) == len(tour2_merged)
assert len(tour1_merged)==len(set(tour1_merged))
assert len(tour2_merged)==len(set(tour2_merged))
# do 2opt passes on entirity of both merged tours to improve links between subtours
tour1.clear_all_edges()
tour1 = TourEngine(tour1_merged)
tour2 = TourEngine(tour2_merged)
print '\nstarting 2opt\n'
updater = itertools.izip(tour1.tour_update_generator(),
tour2.tour_update_generator())
do_2opt_passes(tour1, tour2, updater, len(tour1_merged),
batch_size=BATCH_SIZE_FULLTOUR,
min_pass_improvement=MIN_PASS_IMPROVEMENT_FULLTOUR)
# write results
tour1_nodelist = tour1.get_current_tour_node_list()
tour2_nodelist = tour2.get_current_tour_node_list()
write_tours(tour1_nodelist, tour2_nodelist, tour_outfile)
print "Done.\n"
def TEST_do_swap():
l = [0,4,7,8,9,1,2,3,5,6]
print "node list :", l
nnl = to_next_node_list(l)
print "next node list before swap:", nnl
do_swap(nnl, 2,3,7,8)
print "next node list after swap:", nnl
print
print "node list :", l
print "node list after swap :", to_node_list(nnl, 0)
if __name__ == '__main__':
main()
| df = pandas.read_csv(tour_file)
tour1, tour2 = list(df['path1']), list(df['path2'])
return (tour1, tour2) | identifier_body |
reedsolomon.go | // Package reedsolomon provides a Reed-Solomon erasure encoder.
package reedsolomon
import (
"bytes"
"errors"
"io"
"sync"
"golang.org/x/sys/cpu"
)
var (
useSSSE3 = cpu.X86.HasSSSE3
useAVX2 = cpu.X86.HasAVX2
// ErrInvShardNum will be returned by New, if you attempt to create an
// Encoder where either data or parity shards is zero or less.
ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
// ErrMaxShardNum will be returned by New, if you attempt to create an
// Encoder where data and parity shards are bigger than the order of
// GF(2^8).
ErrMaxShardNum = errors.New("cannot create Encoder with more than 256 data+parity shards")
// ErrTooFewShards is returned if too few shards were given to
// Encode/Reconstruct. It will also be returned from Reconstruct if there
// were too few shards to reconstruct the missing data.
ErrTooFewShards = errors.New("too few shards given")
// ErrShardNoData will be returned if there are no shards, or if the length
// of all shards is zero.
ErrShardNoData = errors.New("no shard data")
// ErrShardSize is returned if shard length isn't the same for all shards.
ErrShardSize = errors.New("shard sizes do not match")
// ErrShortData will be returned by Split(), if there isn't enough data to
// fill the number of shards.
ErrShortData = errors.New("not enough data to fill the number of requested shards")
// ErrReconstructRequired is returned if too few data shards are intact and
// a reconstruction is required before you can successfully join the shards.
ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
)
// ReedSolomon contains a matrix for a specific distribution of datashards and
// parity shards.
type ReedSolomon struct {
DataShards int
ParityShards int
shards int // DataShards+ParityShards, for convenience
m matrix
parity [][]byte
}
// buildMatrix creates the matrix to use for encoding, given the number of data
// shards and the number of total shards.
//
// The top square of the matrix is guaranteed to be an identity matrix, which
// means that the data shards are unchanged after encoding.
func buildMatrix(dataShards, totalShards int) matrix {
// Start with a Vandermonde matrix. This matrix would work, in theory, but
// doesn't have the property that the data shards are unchanged after
// encoding.
vm := vandermonde(totalShards, dataShards)
// Multiply by the inverse of the top square of the matrix. This will make
// the top square be the identity matrix, but preserve the property that any
// square subset of rows is invertible.
top := vm.SubMatrix(0, 0, dataShards, dataShards)
topInv, _ := top.Invert()
return vm.Multiply(topInv)
}
// New returns an Encoder with the specified number of shards.
func New(dataShards, parityShards int) (*ReedSolomon, error) {
r := &ReedSolomon{
DataShards: dataShards,
ParityShards: parityShards,
shards: dataShards + parityShards,
}
if dataShards <= 0 || parityShards <= 0 {
return nil, ErrInvShardNum
}
if uint64(dataShards)+uint64(parityShards) > 256 {
return nil, ErrMaxShardNum
}
r.m = buildMatrix(dataShards, r.shards)
r.parity = make([][]byte, parityShards)
for i := range r.parity {
r.parity[i] = r.m[dataShards+i]
}
return r, nil
}
// Encode encodes parity for a set of shards. The number of shards must match
// the number given to New, and each shard must have the same capacity. The data
// in the first r.DataShards elements will be used to generate parity, which is
// written into the remaining elements.
func (r *ReedSolomon) Encode(shards [][]byte) error {
if len(shards) != r.shards {
return ErrTooFewShards
}
err := checkShards(shards, false)
if err != nil {
return err
}
r.codeSomeShardsP(r.parity, shards[:r.DataShards], shards[r.DataShards:], len(shards[0]))
return nil
}
// codeSomeShardsP multiplies, in parallel, a subset of rows from a coding
// matrix by a full set of input shards to produce some output shards.
func (r *ReedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, byteCount int) {
const maxGoroutines = 384
const minSplitSize = 1024
var wg sync.WaitGroup
do := byteCount / maxGoroutines
if do < minSplitSize {
do = minSplitSize
}
// Make sizes divisible by 32
do = (do + 31) & (^31)
start := 0
for start < byteCount {
if start+do > byteCount {
do = byteCount - start
}
wg.Add(1)
go func(start, stop int) {
for c := 0; c < r.DataShards; c++ {
in := inputs[c][start:stop]
for iRow, out := range outputs {
if c == 0 {
galMulSlice(matrixRows[iRow][c], in, out[start:stop], useSSSE3, useAVX2)
} else {
galMulSliceXor(matrixRows[iRow][c], in, out[start:stop], useSSSE3, useAVX2)
}
}
}
wg.Done()
}(start, start+do)
start += do
}
wg.Wait()
}
// checkShards checks if shards are the same size.
func checkShards(shards [][]byte, nilok bool) error {
size := shardSize(shards)
if size == 0 {
return ErrShardNoData
}
for _, shard := range shards {
if len(shard) != size {
if len(shard) != 0 || !nilok {
return ErrShardSize
}
}
}
return nil
}
// shardSize return the size of a single shard. The first non-zero size is
// returned, or 0 if all shards are size 0.
func shardSize(shards [][]byte) int {
for _, shard := range shards {
if len(shard) != 0 {
return len(shard)
}
}
return 0
}
// Reconstruct recreates missing data and parity shards, if possible. The input
// should match the input to Encode, with missing shards resliced to have a
// length of 0 (but sufficient capacity to hold a recreated shard).
//
// Reconstruct does not check the integrity of the data; if the input shards do
// not match the shards passed to Encode, it will produce garbage.
func (r *ReedSolomon) Reconstruct(shards [][]byte) error {
return r.reconstruct(shards, false)
}
// ReconstructData is like Reconstruct, but only recreates missing data shards.
func (r *ReedSolomon) ReconstructData(shards [][]byte) error {
return r.reconstruct(shards, true)
}
func (r *ReedSolomon) | (shards [][]byte, dataOnly bool) error {
if len(shards) != r.shards {
return ErrTooFewShards
}
err := checkShards(shards, true)
if err != nil {
return err
}
shardSize := shardSize(shards)
// Quick check: are all of the shards present (or, if dataOnly, all of the
// data shards)? If so, there's nothing to do.
numberPresent := 0
dataPresent := 0
for i := 0; i < r.shards; i++ {
if len(shards[i]) != 0 {
numberPresent++
if i < r.DataShards {
dataPresent++
}
}
}
if numberPresent == r.shards || (dataOnly && dataPresent == r.DataShards) {
return nil
}
if numberPresent < r.DataShards {
return ErrTooFewShards
}
// Pull out an array holding just the shards that correspond to the rows of
// the submatrix. These shards will be the input to the decoding process
// that recreates the missing data shards.
//
// Also, create an array of indices of the valid rows we do have.
subShards := make([][]byte, 0, 256)
validIndices := make([]int, 0, 256)
for matrixRow := 0; matrixRow < r.shards && len(validIndices) < r.DataShards; matrixRow++ {
if len(shards[matrixRow]) != 0 {
subShards = append(subShards, shards[matrixRow])
validIndices = append(validIndices, matrixRow)
}
}
// Pull out the rows of the matrix that correspond to the shards that we
// have and build a square matrix. This matrix could be used to generate
// the shards that we have from the original data.
subMatrix := newMatrix(r.DataShards, r.DataShards)
for subMatrixRow, validIndex := range validIndices {
for c := 0; c < r.DataShards; c++ {
subMatrix[subMatrixRow][c] = r.m[validIndex][c]
}
}
// Invert the matrix, so we can go from the encoded shards back to the
// original data. Then pull out the row that generates the shard that we
// want to decode. Note that since this matrix maps back to the original
// data, it can be used to create a data shard, but not a parity shard.
dataDecodeMatrix, err := subMatrix.Invert()
if err != nil {
return err
}
// Re-create any data shards that were missing.
//
// The input to the coding is all of the shards we actually have, and the
// output is the missing data shards. The computation is done using the
// special decode matrix we just built.
outputs := make([][]byte, 0, r.shards)
matrixRows := make([][]byte, 0, r.shards)
for iShard := 0; iShard < r.DataShards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = shards[iShard][:shardSize]
outputs = append(outputs, shards[iShard])
matrixRows = append(matrixRows, dataDecodeMatrix[iShard])
}
}
r.codeSomeShardsP(matrixRows, subShards, outputs, shardSize)
if dataOnly {
return nil
}
// Now that we have all of the data shards intact, we can compute any of the
// parity that is missing.
//
// The input to the coding is ALL of the data shards, including any that we
// just calculated. The output is whichever of the data shards were missing.
outputs, matrixRows = outputs[:0], matrixRows[:0]
for iShard := r.DataShards; iShard < r.shards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = shards[iShard][:shardSize]
outputs = append(outputs, shards[iShard])
matrixRows = append(matrixRows, r.parity[iShard-r.DataShards])
}
}
r.codeSomeShardsP(matrixRows, shards[:r.DataShards], outputs, shardSize)
return nil
}
// SplitMulti splits data into blocks of shards, where each block has subsize
// bytes. The shards must have sufficient capacity to hold the sharded data. The
// length of the shards will be modified to fit their new contents.
func (r *ReedSolomon) SplitMulti(data []byte, shards [][]byte, subsize int) error {
chunkSize := r.DataShards * subsize
numChunks := len(data) / chunkSize
if len(data)%chunkSize != 0 {
numChunks++
}
// extend shards to proper len
shardSize := numChunks * subsize
for i := range shards {
if cap(shards[i]) < shardSize {
return errors.New("each shard must have capacity of at least len(data)/m")
}
shards[i] = shards[i][:shardSize]
}
// copy data into first DataShards shards, subsize bytes at a time
buf := bytes.NewBuffer(data)
for off := 0; buf.Len() > 0; off += subsize {
for i := 0; i < r.DataShards; i++ {
copy(shards[i][off:], buf.Next(subsize))
}
}
return nil
}
// JoinMulti joins the supplied multi-block shards, writing them to dst. The
// first 'skip' bytes of the recovered data are skipped, and 'writeLen' bytes
// are written in total.
func (r *ReedSolomon) JoinMulti(dst io.Writer, shards [][]byte, subsize, skip, writeLen int) error {
// Do we have enough shards?
if len(shards) < r.DataShards {
return ErrTooFewShards
}
shards = shards[:r.DataShards]
// Do we have enough data?
size := 0
for _, shard := range shards {
if len(shard) == 0 {
return ErrReconstructRequired
}
size += len(shard)
if size >= writeLen {
break
}
}
if size < writeLen {
return ErrShortData
}
// Copy data to dst.
for off := 0; writeLen > 0; off += subsize {
for _, shard := range shards {
shard = shard[off:][:subsize]
if skip >= len(shard) {
skip -= len(shard)
continue
} else if skip > 0 {
shard = shard[skip:]
skip = 0
}
if writeLen < len(shard) {
shard = shard[:writeLen]
}
n, err := dst.Write(shard)
if err != nil {
return err
}
writeLen -= n
}
}
return nil
}
| reconstruct | identifier_name |
reedsolomon.go | // Package reedsolomon provides a Reed-Solomon erasure encoder.
package reedsolomon
import (
"bytes"
"errors"
"io"
"sync"
"golang.org/x/sys/cpu"
)
var (
useSSSE3 = cpu.X86.HasSSSE3
useAVX2 = cpu.X86.HasAVX2
// ErrInvShardNum will be returned by New, if you attempt to create an
// Encoder where either data or parity shards is zero or less.
ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
// ErrMaxShardNum will be returned by New, if you attempt to create an
// Encoder where data and parity shards are bigger than the order of
// GF(2^8).
ErrMaxShardNum = errors.New("cannot create Encoder with more than 256 data+parity shards")
// ErrTooFewShards is returned if too few shards were given to
// Encode/Reconstruct. It will also be returned from Reconstruct if there
// were too few shards to reconstruct the missing data.
ErrTooFewShards = errors.New("too few shards given")
// ErrShardNoData will be returned if there are no shards, or if the length
// of all shards is zero.
ErrShardNoData = errors.New("no shard data")
// ErrShardSize is returned if shard length isn't the same for all shards.
ErrShardSize = errors.New("shard sizes do not match")
// ErrShortData will be returned by Split(), if there isn't enough data to
// fill the number of shards.
ErrShortData = errors.New("not enough data to fill the number of requested shards")
// ErrReconstructRequired is returned if too few data shards are intact and
// a reconstruction is required before you can successfully join the shards.
ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
)
// ReedSolomon contains a matrix for a specific distribution of datashards and
// parity shards.
type ReedSolomon struct {
DataShards int
ParityShards int
shards int // DataShards+ParityShards, for convenience
m matrix
parity [][]byte
}
// buildMatrix creates the matrix to use for encoding, given the number of data
// shards and the number of total shards.
//
// The top square of the matrix is guaranteed to be an identity matrix, which
// means that the data shards are unchanged after encoding.
func buildMatrix(dataShards, totalShards int) matrix {
// Start with a Vandermonde matrix. This matrix would work, in theory, but
// doesn't have the property that the data shards are unchanged after
// encoding.
vm := vandermonde(totalShards, dataShards)
// Multiply by the inverse of the top square of the matrix. This will make
// the top square be the identity matrix, but preserve the property that any
// square subset of rows is invertible.
top := vm.SubMatrix(0, 0, dataShards, dataShards)
topInv, _ := top.Invert()
return vm.Multiply(topInv)
}
// New returns an Encoder with the specified number of shards.
func New(dataShards, parityShards int) (*ReedSolomon, error) {
r := &ReedSolomon{
DataShards: dataShards,
ParityShards: parityShards,
shards: dataShards + parityShards,
}
if dataShards <= 0 || parityShards <= 0 {
return nil, ErrInvShardNum
}
if uint64(dataShards)+uint64(parityShards) > 256 {
return nil, ErrMaxShardNum
}
r.m = buildMatrix(dataShards, r.shards)
r.parity = make([][]byte, parityShards)
for i := range r.parity {
r.parity[i] = r.m[dataShards+i]
}
return r, nil
}
// Encode encodes parity for a set of shards. The number of shards must match
// the number given to New, and each shard must have the same capacity. The data
// in the first r.DataShards elements will be used to generate parity, which is
// written into the remaining elements.
func (r *ReedSolomon) Encode(shards [][]byte) error {
if len(shards) != r.shards {
return ErrTooFewShards
}
err := checkShards(shards, false)
if err != nil {
return err
}
r.codeSomeShardsP(r.parity, shards[:r.DataShards], shards[r.DataShards:], len(shards[0]))
return nil
}
// codeSomeShardsP multiplies, in parallel, a subset of rows from a coding
// matrix by a full set of input shards to produce some output shards.
func (r *ReedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, byteCount int) {
const maxGoroutines = 384
const minSplitSize = 1024
var wg sync.WaitGroup
do := byteCount / maxGoroutines
if do < minSplitSize {
do = minSplitSize
}
// Make sizes divisible by 32
do = (do + 31) & (^31)
start := 0
for start < byteCount {
if start+do > byteCount {
do = byteCount - start
}
wg.Add(1)
go func(start, stop int) {
for c := 0; c < r.DataShards; c++ {
in := inputs[c][start:stop]
for iRow, out := range outputs {
if c == 0 {
galMulSlice(matrixRows[iRow][c], in, out[start:stop], useSSSE3, useAVX2)
} else {
galMulSliceXor(matrixRows[iRow][c], in, out[start:stop], useSSSE3, useAVX2)
}
}
}
wg.Done()
}(start, start+do)
start += do
}
wg.Wait()
}
// checkShards checks if shards are the same size.
func checkShards(shards [][]byte, nilok bool) error {
size := shardSize(shards)
if size == 0 {
return ErrShardNoData
}
for _, shard := range shards {
if len(shard) != size {
if len(shard) != 0 || !nilok {
return ErrShardSize
}
}
}
return nil
}
// shardSize return the size of a single shard. The first non-zero size is
// returned, or 0 if all shards are size 0.
func shardSize(shards [][]byte) int {
for _, shard := range shards {
if len(shard) != 0 {
return len(shard)
}
}
return 0
}
// Reconstruct recreates missing data and parity shards, if possible. The input
// should match the input to Encode, with missing shards resliced to have a
// length of 0 (but sufficient capacity to hold a recreated shard).
//
// Reconstruct does not check the integrity of the data; if the input shards do
// not match the shards passed to Encode, it will produce garbage.
func (r *ReedSolomon) Reconstruct(shards [][]byte) error {
return r.reconstruct(shards, false)
}
// ReconstructData is like Reconstruct, but only recreates missing data shards.
func (r *ReedSolomon) ReconstructData(shards [][]byte) error {
return r.reconstruct(shards, true)
}
func (r *ReedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
if len(shards) != r.shards {
return ErrTooFewShards
}
err := checkShards(shards, true)
if err != nil {
return err
}
shardSize := shardSize(shards)
// Quick check: are all of the shards present (or, if dataOnly, all of the
// data shards)? If so, there's nothing to do.
numberPresent := 0
dataPresent := 0
for i := 0; i < r.shards; i++ {
if len(shards[i]) != 0 {
numberPresent++
if i < r.DataShards {
dataPresent++
}
}
}
if numberPresent == r.shards || (dataOnly && dataPresent == r.DataShards) {
return nil
}
if numberPresent < r.DataShards {
return ErrTooFewShards
}
// Pull out an array holding just the shards that correspond to the rows of
// the submatrix. These shards will be the input to the decoding process
// that recreates the missing data shards.
//
// Also, create an array of indices of the valid rows we do have.
subShards := make([][]byte, 0, 256)
validIndices := make([]int, 0, 256)
for matrixRow := 0; matrixRow < r.shards && len(validIndices) < r.DataShards; matrixRow++ {
if len(shards[matrixRow]) != 0 {
subShards = append(subShards, shards[matrixRow])
validIndices = append(validIndices, matrixRow)
}
}
// Pull out the rows of the matrix that correspond to the shards that we
// have and build a square matrix. This matrix could be used to generate
// the shards that we have from the original data.
subMatrix := newMatrix(r.DataShards, r.DataShards)
for subMatrixRow, validIndex := range validIndices {
for c := 0; c < r.DataShards; c++ {
subMatrix[subMatrixRow][c] = r.m[validIndex][c]
}
}
// Invert the matrix, so we can go from the encoded shards back to the
// original data. Then pull out the row that generates the shard that we
// want to decode. Note that since this matrix maps back to the original
// data, it can be used to create a data shard, but not a parity shard.
dataDecodeMatrix, err := subMatrix.Invert()
if err != nil {
return err
}
// Re-create any data shards that were missing.
//
// The input to the coding is all of the shards we actually have, and the
// output is the missing data shards. The computation is done using the
// special decode matrix we just built.
outputs := make([][]byte, 0, r.shards)
matrixRows := make([][]byte, 0, r.shards)
for iShard := 0; iShard < r.DataShards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = shards[iShard][:shardSize]
outputs = append(outputs, shards[iShard])
matrixRows = append(matrixRows, dataDecodeMatrix[iShard])
}
}
r.codeSomeShardsP(matrixRows, subShards, outputs, shardSize)
if dataOnly {
return nil
}
// Now that we have all of the data shards intact, we can compute any of the
// parity that is missing.
//
// The input to the coding is ALL of the data shards, including any that we
// just calculated. The output is whichever of the data shards were missing.
outputs, matrixRows = outputs[:0], matrixRows[:0]
for iShard := r.DataShards; iShard < r.shards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = shards[iShard][:shardSize]
outputs = append(outputs, shards[iShard])
matrixRows = append(matrixRows, r.parity[iShard-r.DataShards])
}
}
r.codeSomeShardsP(matrixRows, shards[:r.DataShards], outputs, shardSize)
return nil
}
// SplitMulti splits data into blocks of shards, where each block has subsize
// bytes. The shards must have sufficient capacity to hold the sharded data. The
// length of the shards will be modified to fit their new contents.
func (r *ReedSolomon) SplitMulti(data []byte, shards [][]byte, subsize int) error |
// JoinMulti joins the supplied multi-block shards, writing them to dst. The
// first 'skip' bytes of the recovered data are skipped, and 'writeLen' bytes
// are written in total.
func (r *ReedSolomon) JoinMulti(dst io.Writer, shards [][]byte, subsize, skip, writeLen int) error {
// Do we have enough shards?
if len(shards) < r.DataShards {
return ErrTooFewShards
}
shards = shards[:r.DataShards]
// Do we have enough data?
size := 0
for _, shard := range shards {
if len(shard) == 0 {
return ErrReconstructRequired
}
size += len(shard)
if size >= writeLen {
break
}
}
if size < writeLen {
return ErrShortData
}
// Copy data to dst.
for off := 0; writeLen > 0; off += subsize {
for _, shard := range shards {
shard = shard[off:][:subsize]
if skip >= len(shard) {
skip -= len(shard)
continue
} else if skip > 0 {
shard = shard[skip:]
skip = 0
}
if writeLen < len(shard) {
shard = shard[:writeLen]
}
n, err := dst.Write(shard)
if err != nil {
return err
}
writeLen -= n
}
}
return nil
}
| {
chunkSize := r.DataShards * subsize
numChunks := len(data) / chunkSize
if len(data)%chunkSize != 0 {
numChunks++
}
// extend shards to proper len
shardSize := numChunks * subsize
for i := range shards {
if cap(shards[i]) < shardSize {
return errors.New("each shard must have capacity of at least len(data)/m")
}
shards[i] = shards[i][:shardSize]
}
// copy data into first DataShards shards, subsize bytes at a time
buf := bytes.NewBuffer(data)
for off := 0; buf.Len() > 0; off += subsize {
for i := 0; i < r.DataShards; i++ {
copy(shards[i][off:], buf.Next(subsize))
}
}
return nil
} | identifier_body |
reedsolomon.go | // Package reedsolomon provides a Reed-Solomon erasure encoder.
package reedsolomon
import (
"bytes"
"errors"
"io"
"sync"
"golang.org/x/sys/cpu"
)
var (
useSSSE3 = cpu.X86.HasSSSE3
useAVX2 = cpu.X86.HasAVX2
// ErrInvShardNum will be returned by New, if you attempt to create an
// Encoder where either data or parity shards is zero or less.
ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
// ErrMaxShardNum will be returned by New, if you attempt to create an
// Encoder where data and parity shards are bigger than the order of
// GF(2^8).
ErrMaxShardNum = errors.New("cannot create Encoder with more than 256 data+parity shards")
// ErrTooFewShards is returned if too few shards were given to
// Encode/Reconstruct. It will also be returned from Reconstruct if there
// were too few shards to reconstruct the missing data.
ErrTooFewShards = errors.New("too few shards given")
// ErrShardNoData will be returned if there are no shards, or if the length
// of all shards is zero.
ErrShardNoData = errors.New("no shard data")
// ErrShardSize is returned if shard length isn't the same for all shards.
ErrShardSize = errors.New("shard sizes do not match")
// ErrShortData will be returned by Split(), if there isn't enough data to
// fill the number of shards.
ErrShortData = errors.New("not enough data to fill the number of requested shards")
// ErrReconstructRequired is returned if too few data shards are intact and
// a reconstruction is required before you can successfully join the shards.
ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
)
// ReedSolomon contains a matrix for a specific distribution of datashards and
// parity shards.
type ReedSolomon struct {
DataShards int
ParityShards int
shards int // DataShards+ParityShards, for convenience
m matrix
parity [][]byte
}
// buildMatrix creates the matrix to use for encoding, given the number of data
// shards and the number of total shards.
//
// The top square of the matrix is guaranteed to be an identity matrix, which
// means that the data shards are unchanged after encoding.
func buildMatrix(dataShards, totalShards int) matrix {
// Start with a Vandermonde matrix. This matrix would work, in theory, but
// doesn't have the property that the data shards are unchanged after
// encoding.
vm := vandermonde(totalShards, dataShards)
// Multiply by the inverse of the top square of the matrix. This will make
// the top square be the identity matrix, but preserve the property that any
// square subset of rows is invertible.
top := vm.SubMatrix(0, 0, dataShards, dataShards)
topInv, _ := top.Invert()
return vm.Multiply(topInv)
}
// New returns an Encoder with the specified number of shards.
func New(dataShards, parityShards int) (*ReedSolomon, error) {
r := &ReedSolomon{
DataShards: dataShards,
ParityShards: parityShards,
shards: dataShards + parityShards,
}
if dataShards <= 0 || parityShards <= 0 {
return nil, ErrInvShardNum
}
if uint64(dataShards)+uint64(parityShards) > 256 {
return nil, ErrMaxShardNum
}
r.m = buildMatrix(dataShards, r.shards)
r.parity = make([][]byte, parityShards)
for i := range r.parity {
r.parity[i] = r.m[dataShards+i]
}
return r, nil
}
// Encode encodes parity for a set of shards. The number of shards must match
// the number given to New, and each shard must have the same capacity. The data
// in the first r.DataShards elements will be used to generate parity, which is
// written into the remaining elements.
func (r *ReedSolomon) Encode(shards [][]byte) error {
if len(shards) != r.shards {
return ErrTooFewShards
}
err := checkShards(shards, false)
if err != nil {
return err
}
r.codeSomeShardsP(r.parity, shards[:r.DataShards], shards[r.DataShards:], len(shards[0]))
return nil
}
// codeSomeShardsP multiplies, in parallel, a subset of rows from a coding
// matrix by a full set of input shards to produce some output shards.
func (r *ReedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, byteCount int) {
const maxGoroutines = 384
const minSplitSize = 1024
var wg sync.WaitGroup
do := byteCount / maxGoroutines
if do < minSplitSize {
do = minSplitSize
}
// Make sizes divisible by 32
do = (do + 31) & (^31)
start := 0
for start < byteCount {
if start+do > byteCount {
do = byteCount - start
}
wg.Add(1)
go func(start, stop int) {
for c := 0; c < r.DataShards; c++ {
in := inputs[c][start:stop]
for iRow, out := range outputs {
if c == 0 {
galMulSlice(matrixRows[iRow][c], in, out[start:stop], useSSSE3, useAVX2)
} else {
galMulSliceXor(matrixRows[iRow][c], in, out[start:stop], useSSSE3, useAVX2)
}
}
}
wg.Done()
}(start, start+do)
start += do
}
wg.Wait()
}
// checkShards checks if shards are the same size.
func checkShards(shards [][]byte, nilok bool) error {
size := shardSize(shards)
if size == 0 {
return ErrShardNoData
}
for _, shard := range shards {
if len(shard) != size {
if len(shard) != 0 || !nilok {
return ErrShardSize
}
}
}
return nil
}
// shardSize return the size of a single shard. The first non-zero size is
// returned, or 0 if all shards are size 0.
func shardSize(shards [][]byte) int {
for _, shard := range shards {
if len(shard) != 0 {
return len(shard)
}
}
return 0
}
// Reconstruct recreates missing data and parity shards, if possible. The input
// should match the input to Encode, with missing shards resliced to have a
// length of 0 (but sufficient capacity to hold a recreated shard).
//
// Reconstruct does not check the integrity of the data; if the input shards do
// not match the shards passed to Encode, it will produce garbage.
func (r *ReedSolomon) Reconstruct(shards [][]byte) error {
return r.reconstruct(shards, false)
}
// ReconstructData is like Reconstruct, but only recreates missing data shards.
func (r *ReedSolomon) ReconstructData(shards [][]byte) error {
return r.reconstruct(shards, true)
}
func (r *ReedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
if len(shards) != r.shards {
return ErrTooFewShards
}
err := checkShards(shards, true)
if err != nil {
return err
}
shardSize := shardSize(shards)
// Quick check: are all of the shards present (or, if dataOnly, all of the
// data shards)? If so, there's nothing to do.
numberPresent := 0
dataPresent := 0
for i := 0; i < r.shards; i++ {
if len(shards[i]) != 0 {
numberPresent++
if i < r.DataShards {
dataPresent++
}
}
}
if numberPresent == r.shards || (dataOnly && dataPresent == r.DataShards) {
return nil
}
if numberPresent < r.DataShards {
return ErrTooFewShards
}
// Pull out an array holding just the shards that correspond to the rows of
// the submatrix. These shards will be the input to the decoding process
// that recreates the missing data shards.
//
// Also, create an array of indices of the valid rows we do have.
subShards := make([][]byte, 0, 256)
validIndices := make([]int, 0, 256)
for matrixRow := 0; matrixRow < r.shards && len(validIndices) < r.DataShards; matrixRow++ {
if len(shards[matrixRow]) != 0 {
subShards = append(subShards, shards[matrixRow])
validIndices = append(validIndices, matrixRow)
}
}
// Pull out the rows of the matrix that correspond to the shards that we
// have and build a square matrix. This matrix could be used to generate
// the shards that we have from the original data.
subMatrix := newMatrix(r.DataShards, r.DataShards)
for subMatrixRow, validIndex := range validIndices |
// Invert the matrix, so we can go from the encoded shards back to the
// original data. Then pull out the row that generates the shard that we
// want to decode. Note that since this matrix maps back to the original
// data, it can be used to create a data shard, but not a parity shard.
dataDecodeMatrix, err := subMatrix.Invert()
if err != nil {
return err
}
// Re-create any data shards that were missing.
//
// The input to the coding is all of the shards we actually have, and the
// output is the missing data shards. The computation is done using the
// special decode matrix we just built.
outputs := make([][]byte, 0, r.shards)
matrixRows := make([][]byte, 0, r.shards)
for iShard := 0; iShard < r.DataShards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = shards[iShard][:shardSize]
outputs = append(outputs, shards[iShard])
matrixRows = append(matrixRows, dataDecodeMatrix[iShard])
}
}
r.codeSomeShardsP(matrixRows, subShards, outputs, shardSize)
if dataOnly {
return nil
}
// Now that we have all of the data shards intact, we can compute any of the
// parity that is missing.
//
// The input to the coding is ALL of the data shards, including any that we
// just calculated. The output is whichever of the data shards were missing.
outputs, matrixRows = outputs[:0], matrixRows[:0]
for iShard := r.DataShards; iShard < r.shards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = shards[iShard][:shardSize]
outputs = append(outputs, shards[iShard])
matrixRows = append(matrixRows, r.parity[iShard-r.DataShards])
}
}
r.codeSomeShardsP(matrixRows, shards[:r.DataShards], outputs, shardSize)
return nil
}
// SplitMulti splits data into blocks of shards, where each block has subsize
// bytes. The shards must have sufficient capacity to hold the sharded data. The
// length of the shards will be modified to fit their new contents.
func (r *ReedSolomon) SplitMulti(data []byte, shards [][]byte, subsize int) error {
chunkSize := r.DataShards * subsize
numChunks := len(data) / chunkSize
if len(data)%chunkSize != 0 {
numChunks++
}
// extend shards to proper len
shardSize := numChunks * subsize
for i := range shards {
if cap(shards[i]) < shardSize {
return errors.New("each shard must have capacity of at least len(data)/m")
}
shards[i] = shards[i][:shardSize]
}
// copy data into first DataShards shards, subsize bytes at a time
buf := bytes.NewBuffer(data)
for off := 0; buf.Len() > 0; off += subsize {
for i := 0; i < r.DataShards; i++ {
copy(shards[i][off:], buf.Next(subsize))
}
}
return nil
}
// JoinMulti joins the supplied multi-block shards, writing them to dst. The
// first 'skip' bytes of the recovered data are skipped, and 'writeLen' bytes
// are written in total.
func (r *ReedSolomon) JoinMulti(dst io.Writer, shards [][]byte, subsize, skip, writeLen int) error {
// Do we have enough shards?
if len(shards) < r.DataShards {
return ErrTooFewShards
}
shards = shards[:r.DataShards]
// Do we have enough data?
size := 0
for _, shard := range shards {
if len(shard) == 0 {
return ErrReconstructRequired
}
size += len(shard)
if size >= writeLen {
break
}
}
if size < writeLen {
return ErrShortData
}
// Copy data to dst.
for off := 0; writeLen > 0; off += subsize {
for _, shard := range shards {
shard = shard[off:][:subsize]
if skip >= len(shard) {
skip -= len(shard)
continue
} else if skip > 0 {
shard = shard[skip:]
skip = 0
}
if writeLen < len(shard) {
shard = shard[:writeLen]
}
n, err := dst.Write(shard)
if err != nil {
return err
}
writeLen -= n
}
}
return nil
}
| {
for c := 0; c < r.DataShards; c++ {
subMatrix[subMatrixRow][c] = r.m[validIndex][c]
}
} | conditional_block |
reedsolomon.go | // Package reedsolomon provides a Reed-Solomon erasure encoder.
package reedsolomon
import (
"bytes"
"errors"
"io"
"sync"
"golang.org/x/sys/cpu"
)
var (
useSSSE3 = cpu.X86.HasSSSE3
useAVX2 = cpu.X86.HasAVX2
// ErrInvShardNum will be returned by New, if you attempt to create an
// Encoder where either data or parity shards is zero or less.
ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
// ErrMaxShardNum will be returned by New, if you attempt to create an
// Encoder where data and parity shards are bigger than the order of
// GF(2^8).
ErrMaxShardNum = errors.New("cannot create Encoder with more than 256 data+parity shards")
// ErrTooFewShards is returned if too few shards were given to
// Encode/Reconstruct. It will also be returned from Reconstruct if there
// were too few shards to reconstruct the missing data.
ErrTooFewShards = errors.New("too few shards given")
// ErrShardNoData will be returned if there are no shards, or if the length
// of all shards is zero.
ErrShardNoData = errors.New("no shard data")
// ErrShardSize is returned if shard length isn't the same for all shards.
ErrShardSize = errors.New("shard sizes do not match")
// ErrShortData will be returned by Split(), if there isn't enough data to
// fill the number of shards.
ErrShortData = errors.New("not enough data to fill the number of requested shards")
// ErrReconstructRequired is returned if too few data shards are intact and
// a reconstruction is required before you can successfully join the shards.
ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
)
// ReedSolomon contains a matrix for a specific distribution of datashards and
// parity shards.
type ReedSolomon struct {
DataShards int
ParityShards int
shards int // DataShards+ParityShards, for convenience
m matrix
parity [][]byte
}
// buildMatrix creates the matrix to use for encoding, given the number of data
// shards and the number of total shards.
//
// The top square of the matrix is guaranteed to be an identity matrix, which
// means that the data shards are unchanged after encoding.
func buildMatrix(dataShards, totalShards int) matrix {
// Start with a Vandermonde matrix. This matrix would work, in theory, but
// doesn't have the property that the data shards are unchanged after
// encoding.
vm := vandermonde(totalShards, dataShards)
// Multiply by the inverse of the top square of the matrix. This will make
// the top square be the identity matrix, but preserve the property that any
// square subset of rows is invertible.
top := vm.SubMatrix(0, 0, dataShards, dataShards)
topInv, _ := top.Invert()
return vm.Multiply(topInv)
}
// New returns an Encoder with the specified number of shards.
func New(dataShards, parityShards int) (*ReedSolomon, error) {
r := &ReedSolomon{
DataShards: dataShards,
ParityShards: parityShards,
shards: dataShards + parityShards,
}
if dataShards <= 0 || parityShards <= 0 {
return nil, ErrInvShardNum
}
if uint64(dataShards)+uint64(parityShards) > 256 {
return nil, ErrMaxShardNum
}
r.m = buildMatrix(dataShards, r.shards)
r.parity = make([][]byte, parityShards)
for i := range r.parity {
r.parity[i] = r.m[dataShards+i]
}
return r, nil
}
// Encode encodes parity for a set of shards. The number of shards must match
// the number given to New, and each shard must have the same capacity. The data
// in the first r.DataShards elements will be used to generate parity, which is
// written into the remaining elements.
func (r *ReedSolomon) Encode(shards [][]byte) error {
if len(shards) != r.shards {
return ErrTooFewShards
}
err := checkShards(shards, false)
if err != nil {
return err
}
r.codeSomeShardsP(r.parity, shards[:r.DataShards], shards[r.DataShards:], len(shards[0]))
return nil
}
// codeSomeShardsP multiplies, in parallel, a subset of rows from a coding
// matrix by a full set of input shards to produce some output shards.
func (r *ReedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, byteCount int) {
const maxGoroutines = 384
const minSplitSize = 1024
var wg sync.WaitGroup
do := byteCount / maxGoroutines
if do < minSplitSize {
do = minSplitSize
}
// Make sizes divisible by 32
do = (do + 31) & (^31)
start := 0
for start < byteCount {
if start+do > byteCount {
do = byteCount - start
}
wg.Add(1)
go func(start, stop int) {
for c := 0; c < r.DataShards; c++ {
in := inputs[c][start:stop]
for iRow, out := range outputs {
if c == 0 {
galMulSlice(matrixRows[iRow][c], in, out[start:stop], useSSSE3, useAVX2)
} else {
galMulSliceXor(matrixRows[iRow][c], in, out[start:stop], useSSSE3, useAVX2)
}
}
}
wg.Done()
}(start, start+do)
start += do
}
wg.Wait()
}
// checkShards checks if shards are the same size.
func checkShards(shards [][]byte, nilok bool) error {
size := shardSize(shards)
if size == 0 {
return ErrShardNoData
}
for _, shard := range shards {
if len(shard) != size {
if len(shard) != 0 || !nilok {
return ErrShardSize
}
}
}
return nil
}
// shardSize return the size of a single shard. The first non-zero size is
// returned, or 0 if all shards are size 0.
func shardSize(shards [][]byte) int {
for _, shard := range shards {
if len(shard) != 0 {
return len(shard)
}
}
return 0
}
// Reconstruct recreates missing data and parity shards, if possible. The input
// should match the input to Encode, with missing shards resliced to have a
// length of 0 (but sufficient capacity to hold a recreated shard).
//
// Reconstruct does not check the integrity of the data; if the input shards do
// not match the shards passed to Encode, it will produce garbage.
func (r *ReedSolomon) Reconstruct(shards [][]byte) error {
return r.reconstruct(shards, false)
}
// ReconstructData is like Reconstruct, but only recreates missing data shards.
func (r *ReedSolomon) ReconstructData(shards [][]byte) error {
return r.reconstruct(shards, true)
}
func (r *ReedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
if len(shards) != r.shards {
return ErrTooFewShards
}
err := checkShards(shards, true)
if err != nil {
return err
}
shardSize := shardSize(shards)
// Quick check: are all of the shards present (or, if dataOnly, all of the
// data shards)? If so, there's nothing to do.
numberPresent := 0
dataPresent := 0
for i := 0; i < r.shards; i++ {
if len(shards[i]) != 0 {
numberPresent++
if i < r.DataShards {
dataPresent++
}
}
}
if numberPresent == r.shards || (dataOnly && dataPresent == r.DataShards) {
return nil
}
if numberPresent < r.DataShards {
return ErrTooFewShards
}
// Pull out an array holding just the shards that correspond to the rows of
// the submatrix. These shards will be the input to the decoding process
// that recreates the missing data shards.
//
// Also, create an array of indices of the valid rows we do have.
subShards := make([][]byte, 0, 256)
validIndices := make([]int, 0, 256)
for matrixRow := 0; matrixRow < r.shards && len(validIndices) < r.DataShards; matrixRow++ {
if len(shards[matrixRow]) != 0 {
subShards = append(subShards, shards[matrixRow])
validIndices = append(validIndices, matrixRow)
}
}
// Pull out the rows of the matrix that correspond to the shards that we
// have and build a square matrix. This matrix could be used to generate
// the shards that we have from the original data.
subMatrix := newMatrix(r.DataShards, r.DataShards)
for subMatrixRow, validIndex := range validIndices {
for c := 0; c < r.DataShards; c++ {
subMatrix[subMatrixRow][c] = r.m[validIndex][c]
}
}
// Invert the matrix, so we can go from the encoded shards back to the
// original data. Then pull out the row that generates the shard that we
// want to decode. Note that since this matrix maps back to the original
// data, it can be used to create a data shard, but not a parity shard.
dataDecodeMatrix, err := subMatrix.Invert()
if err != nil {
return err
}
// Re-create any data shards that were missing.
//
// The input to the coding is all of the shards we actually have, and the
// output is the missing data shards. The computation is done using the
// special decode matrix we just built.
outputs := make([][]byte, 0, r.shards)
matrixRows := make([][]byte, 0, r.shards)
for iShard := 0; iShard < r.DataShards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = shards[iShard][:shardSize]
outputs = append(outputs, shards[iShard])
matrixRows = append(matrixRows, dataDecodeMatrix[iShard])
}
}
r.codeSomeShardsP(matrixRows, subShards, outputs, shardSize)
if dataOnly {
return nil
}
// Now that we have all of the data shards intact, we can compute any of the
// parity that is missing.
//
// The input to the coding is ALL of the data shards, including any that we
// just calculated. The output is whichever of the data shards were missing.
outputs, matrixRows = outputs[:0], matrixRows[:0]
for iShard := r.DataShards; iShard < r.shards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = shards[iShard][:shardSize]
outputs = append(outputs, shards[iShard])
matrixRows = append(matrixRows, r.parity[iShard-r.DataShards])
}
}
r.codeSomeShardsP(matrixRows, shards[:r.DataShards], outputs, shardSize)
return nil
}
// SplitMulti splits data into blocks of shards, where each block has subsize
// bytes. The shards must have sufficient capacity to hold the sharded data. The
// length of the shards will be modified to fit their new contents.
func (r *ReedSolomon) SplitMulti(data []byte, shards [][]byte, subsize int) error {
chunkSize := r.DataShards * subsize
numChunks := len(data) / chunkSize
if len(data)%chunkSize != 0 {
numChunks++
}
// extend shards to proper len
shardSize := numChunks * subsize
for i := range shards {
if cap(shards[i]) < shardSize {
return errors.New("each shard must have capacity of at least len(data)/m")
}
shards[i] = shards[i][:shardSize]
}
// copy data into first DataShards shards, subsize bytes at a time
buf := bytes.NewBuffer(data)
for off := 0; buf.Len() > 0; off += subsize {
for i := 0; i < r.DataShards; i++ {
copy(shards[i][off:], buf.Next(subsize))
}
}
return nil
}
// JoinMulti joins the supplied multi-block shards, writing them to dst. The
// first 'skip' bytes of the recovered data are skipped, and 'writeLen' bytes
// are written in total.
func (r *ReedSolomon) JoinMulti(dst io.Writer, shards [][]byte, subsize, skip, writeLen int) error {
// Do we have enough shards?
if len(shards) < r.DataShards {
return ErrTooFewShards
}
shards = shards[:r.DataShards]
// Do we have enough data?
size := 0
for _, shard := range shards {
if len(shard) == 0 {
return ErrReconstructRequired
}
size += len(shard)
if size >= writeLen {
break
}
}
if size < writeLen {
return ErrShortData
}
// Copy data to dst.
for off := 0; writeLen > 0; off += subsize {
for _, shard := range shards {
shard = shard[off:][:subsize]
if skip >= len(shard) {
skip -= len(shard)
continue
} else if skip > 0 { | shard = shard[skip:]
skip = 0
}
if writeLen < len(shard) {
shard = shard[:writeLen]
}
n, err := dst.Write(shard)
if err != nil {
return err
}
writeLen -= n
}
}
return nil
} | random_line_split | |
SIPResponse.go | package message
import (
"bytes"
"container/list"
"errors"
"github.com/sssgun/gosips/core"
"github.com/sssgun/gosips/sip/address"
"github.com/sssgun/gosips/sip/header"
"strings"
)
/**
* SIP Response structure.
*/
type SIPResponse struct {
SIPMessage
statusLine *header.StatusLine
}
/** Constructor.
*/
func NewSIPResponse() *SIPResponse {
this := &SIPResponse{}
this.SIPMessage.super()
return this
}
func (this *SIPResponse) GetReasonPhraseFromInt(rc int) string {
var retval string
switch rc {
case TRYING:
retval = "Trying"
case RINGING:
retval = "Ringing"
case CALL_IS_BEING_FORWARDED:
retval = "Call is being forwarded"
case QUEUED:
retval = "Queued"
case SESSION_PROGRESS:
retval = "Session progress"
case OK:
retval = "OK"
case ACCEPTED:
retval = "Accepted"
case MULTIPLE_CHOICES:
retval = "Multiple choices"
case MOVED_PERMANENTLY:
retval = "Moved permanently"
case MOVED_TEMPORARILY:
retval = "Moved Temporarily"
case USE_PROXY:
retval = "Use proxy"
case ALTERNATIVE_SERVICE:
retval = "Alternative service"
case BAD_REQUEST:
retval = "Bad request"
case UNAUTHORIZED:
retval = "Unauthorized"
case PAYMENT_REQUIRED:
retval = "Payment required"
case FORBIDDEN:
retval = "Forbidden"
case NOT_FOUND:
retval = "Not found"
case METHOD_NOT_ALLOWED:
retval = "Method not allowed"
case NOT_ACCEPTABLE:
retval = "Not acceptable"
case PROXY_AUTHENTICATION_REQUIRED:
retval = "Proxy Authentication required"
case REQUEST_TIMEOUT:
retval = "Request timeout"
case GONE:
retval = "Gone"
case TEMPORARILY_UNAVAILABLE:
retval = "Temporarily Unavailable"
case REQUEST_ENTITY_TOO_LARGE:
retval = "Request entity too large"
case REQUEST_URI_TOO_LONG:
retval = "Request-URI too large"
case UNSUPPORTED_MEDIA_TYPE:
retval = "Unsupported media type"
case UNSUPPORTED_URI_SCHEME:
retval = "Unsupported URI Scheme"
case BAD_EXTENSION:
retval = "Bad extension"
case EXTENSION_REQUIRED:
retval = "Etension Required"
case INTERVAL_TOO_BRIEF:
retval = "Interval too brief"
case CALL_OR_TRANSACTION_DOES_NOT_EXIST:
retval = "Call leg/Transaction does not exist"
case LOOP_DETECTED:
retval = "Loop detected"
case TOO_MANY_HOPS:
retval = "Too many hops"
case ADDRESS_INCOMPLETE:
retval = "Address incomplete"
case AMBIGUOUS:
retval = "Ambiguous"
case BUSY_HERE:
retval = "Busy here"
case REQUEST_TERMINATED:
retval = "Request Terminated"
case NOT_ACCEPTABLE_HERE:
retval = "Not Accpetable here"
case BAD_EVENT:
retval = "Bad Event"
case REQUEST_PENDING:
retval = "Request Pending"
case SERVER_INTERNAL_ERROR:
retval = "Server Internal Error"
case UNDECIPHERABLE:
retval = "Undecipherable"
case NOT_IMPLEMENTED:
retval = "Not implemented"
case BAD_GATEWAY:
retval = "Bad gateway"
case SERVICE_UNAVAILABLE:
retval = "Service unavailable"
case SERVER_TIMEOUT:
retval = "Gateway timeout"
case VERSION_NOT_SUPPORTED:
retval = "SIP version not supported"
case MESSAGE_TOO_LARGE:
retval = "Message Too Large"
case BUSY_EVERYWHERE:
retval = "Busy everywhere"
case DECLINE:
retval = "Decline"
case DOES_NOT_EXIST_ANYWHERE:
retval = "Does not exist anywhere"
case SESSION_NOT_ACCEPTABLE:
retval = "Session Not acceptable"
default:
retval = ""
}
return retval
}
// /** Set the status code.
// *@param statusCode is the status code to Set.
// *@throws IlegalArgumentException if invalid status code.
// */
func (this *SIPResponse) SetStatusCode(statusCode int) { //throws ParseException {
// if (statusCode < 100 || statusCode > 800)
// throw new ParseException("bad status code",0);
if this.statusLine == nil {
this.statusLine = header.NewStatusLine()
}
this.statusLine.SetStatusCode(statusCode)
}
// /**
// * Get the status line of the response.
// *@return StatusLine
// */
func (this *SIPResponse) GetStatusLine() *header.StatusLine {
return this.statusLine
}
// /** Get the staus code (conveniance function).
// *@return the status code of the status line.
// */
func (this *SIPResponse) GetStatusCode() int {
return this.statusLine.GetStatusCode()
}
// /** Set the reason phrase.
// *@param reasonPhrase the reason phrase.
// *@throws IllegalArgumentException if nil string
// */
func (this *SIPResponse) SetReasonPhrase(reasonPhrase string) {
//if this.reasonPhrase == nil)
// throw new IllegalArgumentException("Bad reason phrase");
if this.statusLine == nil {
this.statusLine = header.NewStatusLine()
}
this.statusLine.SetReasonPhrase(reasonPhrase)
}
// /** Get the reason phrase.
// *@return the reason phrase.
// */
func (this *SIPResponse) GetReasonPhrase() string {
if this.statusLine == nil || this.statusLine.GetReasonPhrase() == "" {
return ""
} else {
return this.statusLine.GetReasonPhrase()
}
}
// /** Return true if the response is a final response.
// *@param rc is the return code.
// *@return true if the parameter is between the range 200 and 700.
// */
func (this *SIPResponse) IsFinalResponseFromInt(rc int) bool {
return rc >= 200 && rc < 700
}
// /** Is this a final response?
// *@return true if this is a final response.
// */
func (this *SIPResponse) IsFinalResponse() bool {
return this.IsFinalResponseFromInt(this.statusLine.GetStatusCode())
}
// /**
// * Set the status line field.
// *@param sl Status line to Set.
// */
func (this *SIPResponse) SetStatusLine(sl *header.StatusLine) {
this.statusLine = sl
}
// /**
// * Print formatting function.
// *Indent and parenthesize for pretty printing.
// * Note -- use the encode method for formatting the message.
// * Hack here to XMLize.
// *
// *@return a string for pretty printing.
// */
// public String debugDump() {
// String superstring = super.debugDump();
// stringRepresentation = "";
// sprint(MESSAGE_PACKAGE + ".SIPResponse");
// sprint("{");
// if (statusLine != nil) {
// sprint(statusLine.debugDump());
// }
// sprint(superstring);
// sprint("}");
// return stringRepresentation;
// }
// /**
// * Check the response structure. Must have from, to CSEQ and VIA
// * headers.
// */
func (this *SIPResponse) CheckHeaders() (ParseException error) {
if this.GetCSeq() == nil {
return errors.New("ParseException: CSeq")
}
if this.GetTo() == nil {
return errors.New("ParseException: To")
}
if this.GetFrom() == nil {
return errors.New("ParseException: From")
}
if this.GetViaHeaders() == nil {
return errors.New("ParseException: Via")
}
return nil
}
// /**
// * Encode the SIP Request as a string.
// *@return The string encoded canonical form of the message.
// */
func (this *SIPResponse) String() string {
var retval string
if this.statusLine != nil {
retval = this.statusLine.String() + this.SIPMessage.String()
} else {
retval = this.SIPMessage.String()
}
return retval + core.SIPSeparatorNames_NEWLINE
}
// func (this *SIPResponse) String() string {
// return this.statusLine.String() + this.SIPMessage.String()
// }
// /** Get this message as a list of encoded strings.
// *@return LinkedList containing encoded strings for each header in
// * the message.
// */
func (this *SIPResponse) GetMessageAsEncodedStrings() *list.List |
// /**
// * Make a clone (deep copy) of this object.
// *@return a deep copy of this object.
// */
// public Object clone() {
// SIPResponse retval = (SIPResponse) super.clone();
// retval.statusLine = (StatusLine) this.statusLine.clone();
// return retval;
// }
// /**
// * Replace a portion of this response with a new structure (given by
// * newObj). This method finds a sub-structure that encodes to cText
// * and has the same type as the second arguement and replaces this
// * portion with the second argument.
// * @param cText is the text that we want to replace.
// * @param newObj is the new object that we want to put in place of
// * cText.
// * @param matchSubstring boolean to indicate whether to match on
// * substrings when searching for a replacement.
// */
// func (this *SIPResponse) replace(String cText, GenericObject newObj,
// boolean matchSubstring ) {
// if (cText == nil || newObj == nil)
// throw new
// IllegalArgumentException("nil args!");
// if (newObj instanceof SIPHeader)
// throw new
// IllegalArgumentException("Bad replacement class " +
// newObj.GetClass().GetName());
// if (statusLine != nil)
// statusLine.replace(cText,newObj,matchSubstring);
// super.replace(cText,newObj,matchSubstring);
// }
// /**
// * Compare for equality.
// *@param other other object to compare with.
// */
// public boolean equals(Object other) {
// if ( ! this.GetClass().equals(other.GetClass())) return false;
// SIPResponse that = (SIPResponse) other;
// return statusLine.equals(that.statusLine) &&
// super.equals(other);
// }
// /**
// * Match with a template.
// *@param matchObj template object to match ourselves with (nil
// * in any position in the template object matches wildcard)
// */
// public boolean match(Object matchObj) {
// if (matchObj == nil) return true;
// else if ( ! matchObj.GetClass().equals(this.GetClass())) {
// return false;
// } else if (matchObj == this) return true;
// SIPResponse that = (SIPResponse) matchObj;
// // System.out.println("---------------------------------------");
// // System.out.println("matching " + this.encode());
// // System.out.println("matchObj " + that.encode());
// StatusLine rline = that.statusLine;
// if (this.statusLine == nil && rline != nil) return false;
// else if (this.statusLine == rline) return super.match(matchObj);
// else {
// // System.out.println(statusLine.match(that.statusLine));
// // System.out.println(super.match(matchObj));
// // System.out.println("---------------------------------------");
// return statusLine.match(that.statusLine) &&
// super.match(matchObj);
// }
// }
// /** Encode this into a byte array.
// * This is used when the body has been Set as a binary array
// * and you want to encode the body as a byte array for transmission.
// *
// *@return a byte array containing the SIPRequest encoded as a byte
// * array.
// */
func (this *SIPResponse) EncodeAsBytes() []byte {
var slbytes []byte
if this.statusLine != nil {
//try {
slbytes = []byte(this.statusLine.String()) //.GetBytes("UTF-8");
// } catch (UnsupportedEncodingException ex){
// InternalErrorHandler.handleException(ex);
// }
}
superbytes := this.SIPMessage.EncodeAsBytes()
retval := make([]byte, len(slbytes)+len(superbytes))
i := 0
if slbytes != nil {
for i = 0; i < len(slbytes); i++ {
retval[i] = slbytes[i]
}
}
for j := 0; j < len(superbytes); j++ {
retval[i] = superbytes[j]
i++
}
return retval
}
/** Get the dialog identifier. Assume the incoming response
* corresponds to a client dialog for an outgoing request.
* Acknowledgement -- this was contributed by Lamine Brahimi.
*
*@return a string that can be used to identify the dialog.
public String GetDialogId() {
CallID cid = (CallID)this.GetCallId();
From from = (From) this.GetFrom();
String retval = cid.GetCallId();
retval += COLON + from.GetUserAtHostPort();
retval += COLON;
if (from.GetTag() != nil)
retval += from.GetTag();
return retval.toLowerCase();
}
*/
// /** Get a dialog identifier.
// * Generates a string that can be used as a dialog identifier.
// *
// * @param isServer is Set to true if this is the UAS
// * and Set to false if this is the UAC
// */
func (this *SIPResponse) GetDialogId(isServer bool) string {
cid := this.GetCallId()
from := this.GetFrom().(*header.From)
to := this.GetTo().(*header.To)
var retval bytes.Buffer
retval.WriteString(cid.GetCallId())
if !isServer {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if to.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetTag())
}
} else {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if to.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
}
return strings.ToLower(retval.String())
}
func (this *SIPResponse) GetDialogId2(isServer bool, toTag string) string {
cid := this.GetCallId()
from := this.GetFrom().(*header.From)
to := this.GetTo().(*header.To)
var retval bytes.Buffer
retval.WriteString(cid.GetCallId())
if !isServer {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if toTag != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(toTag)
}
} else {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if toTag != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(toTag)
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
}
return strings.ToLower(retval.String())
}
// /**
// * Create a new SIPRequest from the given response. Note that the
// * RecordRoute Via and CSeq headers are not copied from the response.
// * These have to be added by the caller.
// * This method is useful for generating ACK messages from final
// * responses.
// *
// *@param requestURI is the request URI to use.
// *@param via is the via header to use.
// *@param cseq is the cseq header to use in the generated
// * request.
// */
func (this *SIPResponse) CreateRequest(requestURI address.SipURI, via *header.Via, cseq *header.CSeq) *SIPRequest {
newRequest := NewSIPRequest()
method := cseq.GetMethod()
newRequest.SetMethod(method)
newRequest.SetRequestURI(requestURI)
if (method == "ACK" || method == "CANCEL") && this.GetTopmostVia().GetBranch() != "" {
// Use the branch id from the OK.
//try {
via.SetBranch(this.GetTopmostVia().GetBranch())
//} catch (ParseException ex) {}
}
newRequest.SetHeader(via)
newRequest.SetHeader(cseq)
for headerIterator := this.getHeaders().Front(); headerIterator != nil; headerIterator = headerIterator.Next() {
nextHeader := headerIterator.Value.(header.Header)
// Some headers do not belong in a Request ....
if this.IsResponseHeader(nextHeader) {
continue
}
if _, ok := nextHeader.(*header.ViaList); ok {
continue
}
if _, ok := nextHeader.(*header.CSeq); ok {
continue
}
if _, ok := nextHeader.(*header.ContentType); ok {
continue
}
if _, ok := nextHeader.(*header.RecordRouteList); ok {
continue
}
// if _, ok:=nextHeader.(*header.To); ok{
// nextHeader = nextHeader.clone();
// }
// else if (nextHeader instanceof From)
// nextHeader = (SIPHeader)nextHeader.clone();
// try {
newRequest.AttachHeader2(nextHeader, false)
// } catch(SIPDuplicateHeaderException e){
// e.printStackTrace();
// }
}
return newRequest
}
// /**
// * Get the encoded first line.
// *
// *@return the status line encoded.
// *
// */
func (this *SIPResponse) GetFirstLine() string {
if this.statusLine == nil {
return ""
} else {
return this.statusLine.String()
}
}
func (this *SIPResponse) SetSIPVersion(sipVersion string) {
this.statusLine.SetSipVersion(sipVersion)
}
func (this *SIPResponse) GetSIPVersion() string {
return this.statusLine.GetSipVersion()
}
| {
retval := this.SIPMessage.GetMessageAsEncodedStrings()
if this.statusLine != nil {
retval.PushFront(this.statusLine.String())
}
return retval
} | identifier_body |
SIPResponse.go | package message
import (
"bytes"
"container/list"
"errors"
"github.com/sssgun/gosips/core"
"github.com/sssgun/gosips/sip/address"
"github.com/sssgun/gosips/sip/header"
"strings"
)
/**
* SIP Response structure.
*/
type SIPResponse struct {
SIPMessage
statusLine *header.StatusLine
}
/** Constructor.
*/
func NewSIPResponse() *SIPResponse {
this := &SIPResponse{}
this.SIPMessage.super()
return this
}
func (this *SIPResponse) GetReasonPhraseFromInt(rc int) string {
var retval string
switch rc {
case TRYING:
retval = "Trying"
case RINGING:
retval = "Ringing"
case CALL_IS_BEING_FORWARDED:
retval = "Call is being forwarded"
case QUEUED:
retval = "Queued"
case SESSION_PROGRESS:
retval = "Session progress"
case OK:
retval = "OK"
case ACCEPTED:
retval = "Accepted"
case MULTIPLE_CHOICES:
retval = "Multiple choices"
case MOVED_PERMANENTLY:
retval = "Moved permanently"
case MOVED_TEMPORARILY:
retval = "Moved Temporarily"
case USE_PROXY:
retval = "Use proxy"
case ALTERNATIVE_SERVICE:
retval = "Alternative service"
case BAD_REQUEST:
retval = "Bad request"
case UNAUTHORIZED:
retval = "Unauthorized"
case PAYMENT_REQUIRED:
retval = "Payment required"
case FORBIDDEN:
retval = "Forbidden"
case NOT_FOUND:
retval = "Not found"
case METHOD_NOT_ALLOWED:
retval = "Method not allowed"
case NOT_ACCEPTABLE:
retval = "Not acceptable"
case PROXY_AUTHENTICATION_REQUIRED:
retval = "Proxy Authentication required"
case REQUEST_TIMEOUT:
retval = "Request timeout"
case GONE:
retval = "Gone"
case TEMPORARILY_UNAVAILABLE:
retval = "Temporarily Unavailable"
case REQUEST_ENTITY_TOO_LARGE:
retval = "Request entity too large"
case REQUEST_URI_TOO_LONG:
retval = "Request-URI too large"
case UNSUPPORTED_MEDIA_TYPE:
retval = "Unsupported media type"
case UNSUPPORTED_URI_SCHEME:
retval = "Unsupported URI Scheme"
case BAD_EXTENSION:
retval = "Bad extension"
case EXTENSION_REQUIRED:
retval = "Etension Required"
case INTERVAL_TOO_BRIEF:
retval = "Interval too brief"
case CALL_OR_TRANSACTION_DOES_NOT_EXIST:
retval = "Call leg/Transaction does not exist"
case LOOP_DETECTED:
retval = "Loop detected"
case TOO_MANY_HOPS:
retval = "Too many hops"
case ADDRESS_INCOMPLETE:
retval = "Address incomplete"
case AMBIGUOUS:
retval = "Ambiguous"
case BUSY_HERE:
retval = "Busy here"
case REQUEST_TERMINATED:
retval = "Request Terminated"
case NOT_ACCEPTABLE_HERE:
retval = "Not Accpetable here"
case BAD_EVENT:
retval = "Bad Event"
case REQUEST_PENDING:
retval = "Request Pending"
case SERVER_INTERNAL_ERROR:
retval = "Server Internal Error"
case UNDECIPHERABLE:
retval = "Undecipherable"
case NOT_IMPLEMENTED:
retval = "Not implemented"
case BAD_GATEWAY:
retval = "Bad gateway"
case SERVICE_UNAVAILABLE:
retval = "Service unavailable"
case SERVER_TIMEOUT:
retval = "Gateway timeout"
case VERSION_NOT_SUPPORTED:
retval = "SIP version not supported"
case MESSAGE_TOO_LARGE:
retval = "Message Too Large"
case BUSY_EVERYWHERE:
retval = "Busy everywhere"
case DECLINE:
retval = "Decline"
case DOES_NOT_EXIST_ANYWHERE:
retval = "Does not exist anywhere"
case SESSION_NOT_ACCEPTABLE:
retval = "Session Not acceptable"
default:
retval = ""
}
return retval
}
// /** Set the status code.
// *@param statusCode is the status code to Set.
// *@throws IlegalArgumentException if invalid status code.
// */
func (this *SIPResponse) SetStatusCode(statusCode int) { //throws ParseException {
// if (statusCode < 100 || statusCode > 800)
// throw new ParseException("bad status code",0);
if this.statusLine == nil {
this.statusLine = header.NewStatusLine()
}
this.statusLine.SetStatusCode(statusCode)
}
// /**
// * Get the status line of the response.
// *@return StatusLine
// */
func (this *SIPResponse) GetStatusLine() *header.StatusLine {
return this.statusLine
}
// /** Get the staus code (conveniance function).
// *@return the status code of the status line.
// */
func (this *SIPResponse) GetStatusCode() int {
return this.statusLine.GetStatusCode()
}
// /** Set the reason phrase.
// *@param reasonPhrase the reason phrase.
// *@throws IllegalArgumentException if nil string
// */
func (this *SIPResponse) SetReasonPhrase(reasonPhrase string) {
//if this.reasonPhrase == nil)
// throw new IllegalArgumentException("Bad reason phrase");
if this.statusLine == nil {
this.statusLine = header.NewStatusLine()
}
this.statusLine.SetReasonPhrase(reasonPhrase)
}
// /** Get the reason phrase.
// *@return the reason phrase.
// */
func (this *SIPResponse) GetReasonPhrase() string {
if this.statusLine == nil || this.statusLine.GetReasonPhrase() == "" {
return ""
} else {
return this.statusLine.GetReasonPhrase()
}
}
// /** Return true if the response is a final response.
// *@param rc is the return code.
// *@return true if the parameter is between the range 200 and 700.
// */
func (this *SIPResponse) IsFinalResponseFromInt(rc int) bool {
return rc >= 200 && rc < 700
}
// /** Is this a final response?
// *@return true if this is a final response.
// */
func (this *SIPResponse) IsFinalResponse() bool {
return this.IsFinalResponseFromInt(this.statusLine.GetStatusCode())
}
// /**
// * Set the status line field.
// *@param sl Status line to Set.
// */
func (this *SIPResponse) SetStatusLine(sl *header.StatusLine) {
this.statusLine = sl
}
// /**
// * Print formatting function.
// *Indent and parenthesize for pretty printing.
// * Note -- use the encode method for formatting the message.
// * Hack here to XMLize.
// *
// *@return a string for pretty printing.
// */
// public String debugDump() {
// String superstring = super.debugDump();
// stringRepresentation = "";
// sprint(MESSAGE_PACKAGE + ".SIPResponse");
// sprint("{");
// if (statusLine != nil) {
// sprint(statusLine.debugDump());
// }
// sprint(superstring);
// sprint("}");
// return stringRepresentation;
// }
// /**
// * Check the response structure. Must have from, to CSEQ and VIA
// * headers.
// */
func (this *SIPResponse) CheckHeaders() (ParseException error) {
if this.GetCSeq() == nil {
return errors.New("ParseException: CSeq")
}
if this.GetTo() == nil {
return errors.New("ParseException: To")
}
if this.GetFrom() == nil {
return errors.New("ParseException: From")
}
if this.GetViaHeaders() == nil {
return errors.New("ParseException: Via")
}
return nil
}
// /**
// * Encode the SIP Request as a string.
// *@return The string encoded canonical form of the message.
// */
func (this *SIPResponse) String() string {
var retval string
if this.statusLine != nil {
retval = this.statusLine.String() + this.SIPMessage.String()
} else {
retval = this.SIPMessage.String()
}
return retval + core.SIPSeparatorNames_NEWLINE
}
// func (this *SIPResponse) String() string {
// return this.statusLine.String() + this.SIPMessage.String()
// }
// /** Get this message as a list of encoded strings.
// *@return LinkedList containing encoded strings for each header in
// * the message.
// */
func (this *SIPResponse) GetMessageAsEncodedStrings() *list.List {
retval := this.SIPMessage.GetMessageAsEncodedStrings()
if this.statusLine != nil {
retval.PushFront(this.statusLine.String())
}
return retval
}
// /**
// * Make a clone (deep copy) of this object.
// *@return a deep copy of this object.
// */
// public Object clone() {
// SIPResponse retval = (SIPResponse) super.clone();
// retval.statusLine = (StatusLine) this.statusLine.clone();
// return retval;
// }
// /**
// * Replace a portion of this response with a new structure (given by
// * newObj). This method finds a sub-structure that encodes to cText
// * and has the same type as the second arguement and replaces this
// * portion with the second argument.
// * @param cText is the text that we want to replace.
// * @param newObj is the new object that we want to put in place of
// * cText.
// * @param matchSubstring boolean to indicate whether to match on
// * substrings when searching for a replacement.
// */
// func (this *SIPResponse) replace(String cText, GenericObject newObj,
// boolean matchSubstring ) {
// if (cText == nil || newObj == nil)
// throw new
// IllegalArgumentException("nil args!");
// if (newObj instanceof SIPHeader)
// throw new
// IllegalArgumentException("Bad replacement class " +
// newObj.GetClass().GetName());
// if (statusLine != nil)
// statusLine.replace(cText,newObj,matchSubstring);
// super.replace(cText,newObj,matchSubstring);
// }
// /**
// * Compare for equality.
// *@param other other object to compare with.
// */
// public boolean equals(Object other) {
// if ( ! this.GetClass().equals(other.GetClass())) return false;
// SIPResponse that = (SIPResponse) other;
// return statusLine.equals(that.statusLine) &&
// super.equals(other);
// }
// /**
// * Match with a template.
// *@param matchObj template object to match ourselves with (nil
// * in any position in the template object matches wildcard)
// */
// public boolean match(Object matchObj) {
// if (matchObj == nil) return true;
// else if ( ! matchObj.GetClass().equals(this.GetClass())) {
// return false;
// } else if (matchObj == this) return true;
// SIPResponse that = (SIPResponse) matchObj;
// // System.out.println("---------------------------------------");
// // System.out.println("matching " + this.encode());
// // System.out.println("matchObj " + that.encode());
// StatusLine rline = that.statusLine;
// if (this.statusLine == nil && rline != nil) return false;
// else if (this.statusLine == rline) return super.match(matchObj);
// else {
// // System.out.println(statusLine.match(that.statusLine));
// // System.out.println(super.match(matchObj));
// // System.out.println("---------------------------------------");
// return statusLine.match(that.statusLine) &&
// super.match(matchObj);
// }
// }
// /** Encode this into a byte array.
// * This is used when the body has been Set as a binary array
// * and you want to encode the body as a byte array for transmission.
// *
// *@return a byte array containing the SIPRequest encoded as a byte
// * array.
// */
func (this *SIPResponse) EncodeAsBytes() []byte {
var slbytes []byte
if this.statusLine != nil {
//try {
slbytes = []byte(this.statusLine.String()) //.GetBytes("UTF-8");
// } catch (UnsupportedEncodingException ex){
// InternalErrorHandler.handleException(ex);
// }
}
superbytes := this.SIPMessage.EncodeAsBytes()
retval := make([]byte, len(slbytes)+len(superbytes))
i := 0
if slbytes != nil {
for i = 0; i < len(slbytes); i++ {
retval[i] = slbytes[i]
}
}
for j := 0; j < len(superbytes); j++ {
retval[i] = superbytes[j]
i++
}
return retval
}
/** Get the dialog identifier. Assume the incoming response
* corresponds to a client dialog for an outgoing request.
* Acknowledgement -- this was contributed by Lamine Brahimi.
*
*@return a string that can be used to identify the dialog.
public String GetDialogId() {
CallID cid = (CallID)this.GetCallId();
From from = (From) this.GetFrom();
String retval = cid.GetCallId();
retval += COLON + from.GetUserAtHostPort();
retval += COLON;
if (from.GetTag() != nil)
retval += from.GetTag();
return retval.toLowerCase();
}
*/
// /** Get a dialog identifier.
// * Generates a string that can be used as a dialog identifier.
// *
// * @param isServer is Set to true if this is the UAS
// * and Set to false if this is the UAC
// */
func (this *SIPResponse) GetDialogId(isServer bool) string {
cid := this.GetCallId()
from := this.GetFrom().(*header.From)
to := this.GetTo().(*header.To)
var retval bytes.Buffer
retval.WriteString(cid.GetCallId())
if !isServer {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if to.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetTag())
}
} else {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if to.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
}
return strings.ToLower(retval.String())
}
func (this *SIPResponse) | (isServer bool, toTag string) string {
cid := this.GetCallId()
from := this.GetFrom().(*header.From)
to := this.GetTo().(*header.To)
var retval bytes.Buffer
retval.WriteString(cid.GetCallId())
if !isServer {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if toTag != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(toTag)
}
} else {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if toTag != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(toTag)
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
}
return strings.ToLower(retval.String())
}
// /**
// * Create a new SIPRequest from the given response. Note that the
// * RecordRoute Via and CSeq headers are not copied from the response.
// * These have to be added by the caller.
// * This method is useful for generating ACK messages from final
// * responses.
// *
// *@param requestURI is the request URI to use.
// *@param via is the via header to use.
// *@param cseq is the cseq header to use in the generated
// * request.
// */
func (this *SIPResponse) CreateRequest(requestURI address.SipURI, via *header.Via, cseq *header.CSeq) *SIPRequest {
newRequest := NewSIPRequest()
method := cseq.GetMethod()
newRequest.SetMethod(method)
newRequest.SetRequestURI(requestURI)
if (method == "ACK" || method == "CANCEL") && this.GetTopmostVia().GetBranch() != "" {
// Use the branch id from the OK.
//try {
via.SetBranch(this.GetTopmostVia().GetBranch())
//} catch (ParseException ex) {}
}
newRequest.SetHeader(via)
newRequest.SetHeader(cseq)
for headerIterator := this.getHeaders().Front(); headerIterator != nil; headerIterator = headerIterator.Next() {
nextHeader := headerIterator.Value.(header.Header)
// Some headers do not belong in a Request ....
if this.IsResponseHeader(nextHeader) {
continue
}
if _, ok := nextHeader.(*header.ViaList); ok {
continue
}
if _, ok := nextHeader.(*header.CSeq); ok {
continue
}
if _, ok := nextHeader.(*header.ContentType); ok {
continue
}
if _, ok := nextHeader.(*header.RecordRouteList); ok {
continue
}
// if _, ok:=nextHeader.(*header.To); ok{
// nextHeader = nextHeader.clone();
// }
// else if (nextHeader instanceof From)
// nextHeader = (SIPHeader)nextHeader.clone();
// try {
newRequest.AttachHeader2(nextHeader, false)
// } catch(SIPDuplicateHeaderException e){
// e.printStackTrace();
// }
}
return newRequest
}
// /**
// * Get the encoded first line.
// *
// *@return the status line encoded.
// *
// */
func (this *SIPResponse) GetFirstLine() string {
if this.statusLine == nil {
return ""
} else {
return this.statusLine.String()
}
}
func (this *SIPResponse) SetSIPVersion(sipVersion string) {
this.statusLine.SetSipVersion(sipVersion)
}
func (this *SIPResponse) GetSIPVersion() string {
return this.statusLine.GetSipVersion()
}
| GetDialogId2 | identifier_name |
SIPResponse.go | package message
import (
"bytes"
"container/list"
"errors"
"github.com/sssgun/gosips/core"
"github.com/sssgun/gosips/sip/address"
"github.com/sssgun/gosips/sip/header"
"strings"
)
/**
* SIP Response structure.
*/
type SIPResponse struct {
SIPMessage
statusLine *header.StatusLine
}
/** Constructor.
*/
func NewSIPResponse() *SIPResponse {
this := &SIPResponse{}
this.SIPMessage.super()
return this
}
func (this *SIPResponse) GetReasonPhraseFromInt(rc int) string {
var retval string
switch rc {
case TRYING:
retval = "Trying"
case RINGING:
retval = "Ringing"
case CALL_IS_BEING_FORWARDED:
retval = "Call is being forwarded"
case QUEUED:
retval = "Queued"
case SESSION_PROGRESS:
retval = "Session progress"
case OK:
retval = "OK"
case ACCEPTED:
retval = "Accepted"
case MULTIPLE_CHOICES:
retval = "Multiple choices"
case MOVED_PERMANENTLY:
retval = "Moved permanently"
case MOVED_TEMPORARILY:
retval = "Moved Temporarily"
case USE_PROXY:
retval = "Use proxy"
case ALTERNATIVE_SERVICE:
retval = "Alternative service"
case BAD_REQUEST:
retval = "Bad request"
case UNAUTHORIZED:
retval = "Unauthorized"
case PAYMENT_REQUIRED:
retval = "Payment required"
case FORBIDDEN:
retval = "Forbidden"
case NOT_FOUND:
retval = "Not found"
case METHOD_NOT_ALLOWED:
retval = "Method not allowed"
case NOT_ACCEPTABLE:
retval = "Not acceptable"
case PROXY_AUTHENTICATION_REQUIRED:
retval = "Proxy Authentication required"
case REQUEST_TIMEOUT:
retval = "Request timeout"
case GONE:
retval = "Gone"
case TEMPORARILY_UNAVAILABLE:
retval = "Temporarily Unavailable"
case REQUEST_ENTITY_TOO_LARGE:
retval = "Request entity too large"
case REQUEST_URI_TOO_LONG:
retval = "Request-URI too large"
case UNSUPPORTED_MEDIA_TYPE:
retval = "Unsupported media type"
case UNSUPPORTED_URI_SCHEME:
retval = "Unsupported URI Scheme"
case BAD_EXTENSION:
retval = "Bad extension"
case EXTENSION_REQUIRED:
retval = "Etension Required"
case INTERVAL_TOO_BRIEF:
retval = "Interval too brief"
case CALL_OR_TRANSACTION_DOES_NOT_EXIST:
retval = "Call leg/Transaction does not exist"
case LOOP_DETECTED:
retval = "Loop detected"
case TOO_MANY_HOPS:
retval = "Too many hops"
case ADDRESS_INCOMPLETE:
retval = "Address incomplete"
case AMBIGUOUS:
retval = "Ambiguous"
case BUSY_HERE:
retval = "Busy here"
case REQUEST_TERMINATED:
retval = "Request Terminated"
case NOT_ACCEPTABLE_HERE:
retval = "Not Accpetable here"
case BAD_EVENT:
retval = "Bad Event"
case REQUEST_PENDING:
retval = "Request Pending"
case SERVER_INTERNAL_ERROR:
retval = "Server Internal Error"
case UNDECIPHERABLE:
retval = "Undecipherable"
case NOT_IMPLEMENTED:
retval = "Not implemented"
case BAD_GATEWAY:
retval = "Bad gateway"
case SERVICE_UNAVAILABLE:
retval = "Service unavailable"
case SERVER_TIMEOUT:
retval = "Gateway timeout"
case VERSION_NOT_SUPPORTED:
retval = "SIP version not supported"
case MESSAGE_TOO_LARGE:
retval = "Message Too Large"
case BUSY_EVERYWHERE:
retval = "Busy everywhere"
case DECLINE:
retval = "Decline"
case DOES_NOT_EXIST_ANYWHERE:
retval = "Does not exist anywhere"
case SESSION_NOT_ACCEPTABLE:
retval = "Session Not acceptable"
default:
retval = ""
}
return retval
}
// /** Set the status code.
// *@param statusCode is the status code to Set.
// *@throws IlegalArgumentException if invalid status code.
// */
func (this *SIPResponse) SetStatusCode(statusCode int) { //throws ParseException {
// if (statusCode < 100 || statusCode > 800)
// throw new ParseException("bad status code",0);
if this.statusLine == nil {
this.statusLine = header.NewStatusLine()
}
this.statusLine.SetStatusCode(statusCode)
}
// /**
// * Get the status line of the response.
// *@return StatusLine
// */
func (this *SIPResponse) GetStatusLine() *header.StatusLine {
return this.statusLine
}
// /** Get the staus code (conveniance function).
// *@return the status code of the status line.
// */
func (this *SIPResponse) GetStatusCode() int {
return this.statusLine.GetStatusCode()
}
// /** Set the reason phrase.
// *@param reasonPhrase the reason phrase.
// *@throws IllegalArgumentException if nil string
// */
func (this *SIPResponse) SetReasonPhrase(reasonPhrase string) {
//if this.reasonPhrase == nil)
// throw new IllegalArgumentException("Bad reason phrase");
if this.statusLine == nil {
this.statusLine = header.NewStatusLine()
}
this.statusLine.SetReasonPhrase(reasonPhrase)
}
// /** Get the reason phrase.
// *@return the reason phrase.
// */
func (this *SIPResponse) GetReasonPhrase() string {
if this.statusLine == nil || this.statusLine.GetReasonPhrase() == "" {
return ""
} else {
return this.statusLine.GetReasonPhrase()
}
}
// /** Return true if the response is a final response.
// *@param rc is the return code.
// *@return true if the parameter is between the range 200 and 700.
// */
func (this *SIPResponse) IsFinalResponseFromInt(rc int) bool {
return rc >= 200 && rc < 700
}
// /** Is this a final response?
// *@return true if this is a final response.
// */
func (this *SIPResponse) IsFinalResponse() bool {
return this.IsFinalResponseFromInt(this.statusLine.GetStatusCode())
}
// /**
// * Set the status line field.
// *@param sl Status line to Set.
// */
func (this *SIPResponse) SetStatusLine(sl *header.StatusLine) {
this.statusLine = sl
}
// /**
// * Print formatting function.
// *Indent and parenthesize for pretty printing.
// * Note -- use the encode method for formatting the message.
// * Hack here to XMLize.
// *
// *@return a string for pretty printing.
// */
// public String debugDump() {
// String superstring = super.debugDump();
// stringRepresentation = "";
// sprint(MESSAGE_PACKAGE + ".SIPResponse");
// sprint("{");
// if (statusLine != nil) {
// sprint(statusLine.debugDump());
// }
// sprint(superstring);
// sprint("}");
// return stringRepresentation;
// }
// /**
// * Check the response structure. Must have from, to CSEQ and VIA
// * headers.
// */
func (this *SIPResponse) CheckHeaders() (ParseException error) {
if this.GetCSeq() == nil {
return errors.New("ParseException: CSeq")
}
if this.GetTo() == nil {
return errors.New("ParseException: To")
}
if this.GetFrom() == nil {
return errors.New("ParseException: From")
}
if this.GetViaHeaders() == nil {
return errors.New("ParseException: Via")
}
return nil
}
// /**
// * Encode the SIP Request as a string.
// *@return The string encoded canonical form of the message.
// */
func (this *SIPResponse) String() string {
var retval string
if this.statusLine != nil {
retval = this.statusLine.String() + this.SIPMessage.String()
} else {
retval = this.SIPMessage.String()
}
return retval + core.SIPSeparatorNames_NEWLINE
}
// func (this *SIPResponse) String() string {
// return this.statusLine.String() + this.SIPMessage.String()
// }
// /** Get this message as a list of encoded strings.
// *@return LinkedList containing encoded strings for each header in
// * the message.
// */
func (this *SIPResponse) GetMessageAsEncodedStrings() *list.List {
retval := this.SIPMessage.GetMessageAsEncodedStrings()
if this.statusLine != nil {
retval.PushFront(this.statusLine.String())
}
return retval
}
// /**
// * Make a clone (deep copy) of this object.
// *@return a deep copy of this object.
// */
// public Object clone() {
// SIPResponse retval = (SIPResponse) super.clone();
// retval.statusLine = (StatusLine) this.statusLine.clone();
// return retval;
// }
// /**
// * Replace a portion of this response with a new structure (given by
// * newObj). This method finds a sub-structure that encodes to cText
// * and has the same type as the second arguement and replaces this
// * portion with the second argument.
// * @param cText is the text that we want to replace.
// * @param newObj is the new object that we want to put in place of
// * cText.
// * @param matchSubstring boolean to indicate whether to match on
// * substrings when searching for a replacement.
// */
// func (this *SIPResponse) replace(String cText, GenericObject newObj,
// boolean matchSubstring ) {
// if (cText == nil || newObj == nil)
// throw new
// IllegalArgumentException("nil args!");
// if (newObj instanceof SIPHeader)
// throw new
// IllegalArgumentException("Bad replacement class " +
// newObj.GetClass().GetName());
// if (statusLine != nil)
// statusLine.replace(cText,newObj,matchSubstring);
// super.replace(cText,newObj,matchSubstring);
// }
// /**
// * Compare for equality.
// *@param other other object to compare with.
// */
// public boolean equals(Object other) {
// if ( ! this.GetClass().equals(other.GetClass())) return false;
// SIPResponse that = (SIPResponse) other;
// return statusLine.equals(that.statusLine) &&
// super.equals(other);
// }
// /**
// * Match with a template.
// *@param matchObj template object to match ourselves with (nil
// * in any position in the template object matches wildcard)
// */
// public boolean match(Object matchObj) {
// if (matchObj == nil) return true;
// else if ( ! matchObj.GetClass().equals(this.GetClass())) {
// return false;
// } else if (matchObj == this) return true;
// SIPResponse that = (SIPResponse) matchObj;
// // System.out.println("---------------------------------------");
// // System.out.println("matching " + this.encode());
// // System.out.println("matchObj " + that.encode());
// StatusLine rline = that.statusLine;
// if (this.statusLine == nil && rline != nil) return false;
// else if (this.statusLine == rline) return super.match(matchObj);
// else {
// // System.out.println(statusLine.match(that.statusLine));
// // System.out.println(super.match(matchObj));
// // System.out.println("---------------------------------------");
// return statusLine.match(that.statusLine) &&
// super.match(matchObj);
// }
// }
// /** Encode this into a byte array.
// * This is used when the body has been Set as a binary array
// * and you want to encode the body as a byte array for transmission.
// *
// *@return a byte array containing the SIPRequest encoded as a byte
// * array.
// */
func (this *SIPResponse) EncodeAsBytes() []byte {
var slbytes []byte
if this.statusLine != nil {
//try {
slbytes = []byte(this.statusLine.String()) //.GetBytes("UTF-8");
// } catch (UnsupportedEncodingException ex){
// InternalErrorHandler.handleException(ex);
// }
}
superbytes := this.SIPMessage.EncodeAsBytes()
retval := make([]byte, len(slbytes)+len(superbytes))
i := 0
if slbytes != nil {
for i = 0; i < len(slbytes); i++ {
retval[i] = slbytes[i]
}
}
for j := 0; j < len(superbytes); j++ {
retval[i] = superbytes[j]
i++
}
return retval
}
/** Get the dialog identifier. Assume the incoming response
* corresponds to a client dialog for an outgoing request.
* Acknowledgement -- this was contributed by Lamine Brahimi.
*
*@return a string that can be used to identify the dialog.
public String GetDialogId() {
CallID cid = (CallID)this.GetCallId();
From from = (From) this.GetFrom();
String retval = cid.GetCallId();
retval += COLON + from.GetUserAtHostPort();
retval += COLON;
if (from.GetTag() != nil)
retval += from.GetTag();
return retval.toLowerCase();
}
*/
// /** Get a dialog identifier.
// * Generates a string that can be used as a dialog identifier.
// *
// * @param isServer is Set to true if this is the UAS
// * and Set to false if this is the UAC
// */
func (this *SIPResponse) GetDialogId(isServer bool) string {
cid := this.GetCallId()
from := this.GetFrom().(*header.From)
to := this.GetTo().(*header.To)
var retval bytes.Buffer
retval.WriteString(cid.GetCallId())
if !isServer {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" |
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if to.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetTag())
}
} else {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if to.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
}
return strings.ToLower(retval.String())
}
func (this *SIPResponse) GetDialogId2(isServer bool, toTag string) string {
cid := this.GetCallId()
from := this.GetFrom().(*header.From)
to := this.GetTo().(*header.To)
var retval bytes.Buffer
retval.WriteString(cid.GetCallId())
if !isServer {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if toTag != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(toTag)
}
} else {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if toTag != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(toTag)
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
}
return strings.ToLower(retval.String())
}
// /**
// * Create a new SIPRequest from the given response. Note that the
// * RecordRoute Via and CSeq headers are not copied from the response.
// * These have to be added by the caller.
// * This method is useful for generating ACK messages from final
// * responses.
// *
// *@param requestURI is the request URI to use.
// *@param via is the via header to use.
// *@param cseq is the cseq header to use in the generated
// * request.
// */
func (this *SIPResponse) CreateRequest(requestURI address.SipURI, via *header.Via, cseq *header.CSeq) *SIPRequest {
newRequest := NewSIPRequest()
method := cseq.GetMethod()
newRequest.SetMethod(method)
newRequest.SetRequestURI(requestURI)
if (method == "ACK" || method == "CANCEL") && this.GetTopmostVia().GetBranch() != "" {
// Use the branch id from the OK.
//try {
via.SetBranch(this.GetTopmostVia().GetBranch())
//} catch (ParseException ex) {}
}
newRequest.SetHeader(via)
newRequest.SetHeader(cseq)
for headerIterator := this.getHeaders().Front(); headerIterator != nil; headerIterator = headerIterator.Next() {
nextHeader := headerIterator.Value.(header.Header)
// Some headers do not belong in a Request ....
if this.IsResponseHeader(nextHeader) {
continue
}
if _, ok := nextHeader.(*header.ViaList); ok {
continue
}
if _, ok := nextHeader.(*header.CSeq); ok {
continue
}
if _, ok := nextHeader.(*header.ContentType); ok {
continue
}
if _, ok := nextHeader.(*header.RecordRouteList); ok {
continue
}
// if _, ok:=nextHeader.(*header.To); ok{
// nextHeader = nextHeader.clone();
// }
// else if (nextHeader instanceof From)
// nextHeader = (SIPHeader)nextHeader.clone();
// try {
newRequest.AttachHeader2(nextHeader, false)
// } catch(SIPDuplicateHeaderException e){
// e.printStackTrace();
// }
}
return newRequest
}
// /**
// * Get the encoded first line.
// *
// *@return the status line encoded.
// *
// */
func (this *SIPResponse) GetFirstLine() string {
if this.statusLine == nil {
return ""
} else {
return this.statusLine.String()
}
}
func (this *SIPResponse) SetSIPVersion(sipVersion string) {
this.statusLine.SetSipVersion(sipVersion)
}
func (this *SIPResponse) GetSIPVersion() string {
return this.statusLine.GetSipVersion()
}
| {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
} | conditional_block |
SIPResponse.go | package message
import (
"bytes"
"container/list"
"errors"
"github.com/sssgun/gosips/core"
"github.com/sssgun/gosips/sip/address"
"github.com/sssgun/gosips/sip/header"
"strings"
)
/**
* SIP Response structure.
*/
type SIPResponse struct {
SIPMessage
statusLine *header.StatusLine
}
/** Constructor.
*/
func NewSIPResponse() *SIPResponse {
this := &SIPResponse{}
this.SIPMessage.super()
return this
}
func (this *SIPResponse) GetReasonPhraseFromInt(rc int) string {
var retval string
switch rc {
case TRYING:
retval = "Trying"
case RINGING:
retval = "Ringing"
case CALL_IS_BEING_FORWARDED:
retval = "Call is being forwarded"
case QUEUED:
retval = "Queued"
case SESSION_PROGRESS:
retval = "Session progress"
case OK:
retval = "OK"
case ACCEPTED:
retval = "Accepted"
case MULTIPLE_CHOICES:
retval = "Multiple choices"
case MOVED_PERMANENTLY:
retval = "Moved permanently"
case MOVED_TEMPORARILY:
retval = "Moved Temporarily"
case USE_PROXY:
retval = "Use proxy"
case ALTERNATIVE_SERVICE:
retval = "Alternative service"
case BAD_REQUEST:
retval = "Bad request"
case UNAUTHORIZED:
retval = "Unauthorized"
case PAYMENT_REQUIRED:
retval = "Payment required"
case FORBIDDEN:
retval = "Forbidden"
case NOT_FOUND:
retval = "Not found"
case METHOD_NOT_ALLOWED:
retval = "Method not allowed"
case NOT_ACCEPTABLE:
retval = "Not acceptable"
case PROXY_AUTHENTICATION_REQUIRED:
retval = "Proxy Authentication required"
case REQUEST_TIMEOUT:
retval = "Request timeout"
case GONE:
retval = "Gone"
case TEMPORARILY_UNAVAILABLE:
retval = "Temporarily Unavailable"
case REQUEST_ENTITY_TOO_LARGE:
retval = "Request entity too large"
case REQUEST_URI_TOO_LONG:
retval = "Request-URI too large"
case UNSUPPORTED_MEDIA_TYPE:
retval = "Unsupported media type"
case UNSUPPORTED_URI_SCHEME:
retval = "Unsupported URI Scheme"
case BAD_EXTENSION:
retval = "Bad extension"
case EXTENSION_REQUIRED:
retval = "Etension Required"
case INTERVAL_TOO_BRIEF:
retval = "Interval too brief"
case CALL_OR_TRANSACTION_DOES_NOT_EXIST:
retval = "Call leg/Transaction does not exist"
case LOOP_DETECTED:
retval = "Loop detected"
case TOO_MANY_HOPS:
retval = "Too many hops"
case ADDRESS_INCOMPLETE:
retval = "Address incomplete"
case AMBIGUOUS:
retval = "Ambiguous"
case BUSY_HERE:
retval = "Busy here"
case REQUEST_TERMINATED:
retval = "Request Terminated"
case NOT_ACCEPTABLE_HERE:
retval = "Not Accpetable here"
case BAD_EVENT:
retval = "Bad Event"
case REQUEST_PENDING:
retval = "Request Pending"
case SERVER_INTERNAL_ERROR:
retval = "Server Internal Error"
case UNDECIPHERABLE:
retval = "Undecipherable"
case NOT_IMPLEMENTED:
retval = "Not implemented"
case BAD_GATEWAY:
retval = "Bad gateway"
case SERVICE_UNAVAILABLE:
retval = "Service unavailable"
case SERVER_TIMEOUT:
retval = "Gateway timeout"
case VERSION_NOT_SUPPORTED:
retval = "SIP version not supported"
case MESSAGE_TOO_LARGE:
retval = "Message Too Large"
case BUSY_EVERYWHERE:
retval = "Busy everywhere"
case DECLINE:
retval = "Decline"
case DOES_NOT_EXIST_ANYWHERE:
retval = "Does not exist anywhere"
case SESSION_NOT_ACCEPTABLE:
retval = "Session Not acceptable"
default:
retval = ""
}
return retval
}
// /** Set the status code.
// *@param statusCode is the status code to Set.
// *@throws IlegalArgumentException if invalid status code.
// */
func (this *SIPResponse) SetStatusCode(statusCode int) { //throws ParseException {
// if (statusCode < 100 || statusCode > 800)
// throw new ParseException("bad status code",0);
if this.statusLine == nil {
this.statusLine = header.NewStatusLine()
}
this.statusLine.SetStatusCode(statusCode)
}
// /**
// * Get the status line of the response.
// *@return StatusLine
// */
func (this *SIPResponse) GetStatusLine() *header.StatusLine {
return this.statusLine
}
// /** Get the staus code (conveniance function).
// *@return the status code of the status line.
// */
func (this *SIPResponse) GetStatusCode() int {
return this.statusLine.GetStatusCode()
}
// /** Set the reason phrase.
// *@param reasonPhrase the reason phrase.
// *@throws IllegalArgumentException if nil string
// */
func (this *SIPResponse) SetReasonPhrase(reasonPhrase string) {
//if this.reasonPhrase == nil)
// throw new IllegalArgumentException("Bad reason phrase");
if this.statusLine == nil {
this.statusLine = header.NewStatusLine()
}
this.statusLine.SetReasonPhrase(reasonPhrase)
}
// /** Get the reason phrase.
// *@return the reason phrase.
// */
func (this *SIPResponse) GetReasonPhrase() string {
if this.statusLine == nil || this.statusLine.GetReasonPhrase() == "" {
return ""
} else {
return this.statusLine.GetReasonPhrase()
}
}
// /** Return true if the response is a final response.
// *@param rc is the return code.
// *@return true if the parameter is between the range 200 and 700.
// */
func (this *SIPResponse) IsFinalResponseFromInt(rc int) bool {
return rc >= 200 && rc < 700
}
// /** Is this a final response?
// *@return true if this is a final response.
// */
func (this *SIPResponse) IsFinalResponse() bool {
return this.IsFinalResponseFromInt(this.statusLine.GetStatusCode())
}
// /**
// * Set the status line field.
// *@param sl Status line to Set.
// */
func (this *SIPResponse) SetStatusLine(sl *header.StatusLine) {
this.statusLine = sl
}
// /**
// * Print formatting function.
// *Indent and parenthesize for pretty printing.
// * Note -- use the encode method for formatting the message.
// * Hack here to XMLize.
// *
// *@return a string for pretty printing.
// */
// public String debugDump() {
// String superstring = super.debugDump();
// stringRepresentation = "";
// sprint(MESSAGE_PACKAGE + ".SIPResponse");
// sprint("{");
// if (statusLine != nil) {
// sprint(statusLine.debugDump());
// }
// sprint(superstring);
// sprint("}");
// return stringRepresentation;
// }
// /**
// * Check the response structure. Must have from, to CSEQ and VIA
// * headers.
// */
func (this *SIPResponse) CheckHeaders() (ParseException error) {
if this.GetCSeq() == nil {
return errors.New("ParseException: CSeq")
}
if this.GetTo() == nil {
return errors.New("ParseException: To")
}
if this.GetFrom() == nil {
return errors.New("ParseException: From")
}
if this.GetViaHeaders() == nil {
return errors.New("ParseException: Via")
}
return nil
}
// /**
// * Encode the SIP Request as a string.
// *@return The string encoded canonical form of the message.
// */
func (this *SIPResponse) String() string {
var retval string
if this.statusLine != nil {
retval = this.statusLine.String() + this.SIPMessage.String()
} else {
retval = this.SIPMessage.String()
}
return retval + core.SIPSeparatorNames_NEWLINE
}
// func (this *SIPResponse) String() string {
// return this.statusLine.String() + this.SIPMessage.String()
// }
// /** Get this message as a list of encoded strings.
// *@return LinkedList containing encoded strings for each header in
// * the message.
// */
func (this *SIPResponse) GetMessageAsEncodedStrings() *list.List {
retval := this.SIPMessage.GetMessageAsEncodedStrings()
if this.statusLine != nil {
retval.PushFront(this.statusLine.String())
}
return retval
}
// /**
// * Make a clone (deep copy) of this object.
// *@return a deep copy of this object.
// */
// public Object clone() {
// SIPResponse retval = (SIPResponse) super.clone();
// retval.statusLine = (StatusLine) this.statusLine.clone();
// return retval;
// }
// /**
// * Replace a portion of this response with a new structure (given by
// * newObj). This method finds a sub-structure that encodes to cText
// * and has the same type as the second arguement and replaces this
// * portion with the second argument.
// * @param cText is the text that we want to replace.
// * @param newObj is the new object that we want to put in place of
// * cText.
// * @param matchSubstring boolean to indicate whether to match on
// * substrings when searching for a replacement.
// */
// func (this *SIPResponse) replace(String cText, GenericObject newObj,
// boolean matchSubstring ) {
// if (cText == nil || newObj == nil)
// throw new
// IllegalArgumentException("nil args!");
// if (newObj instanceof SIPHeader)
// throw new
// IllegalArgumentException("Bad replacement class " +
// newObj.GetClass().GetName());
// if (statusLine != nil)
// statusLine.replace(cText,newObj,matchSubstring);
// super.replace(cText,newObj,matchSubstring);
// }
// /**
// * Compare for equality.
// *@param other other object to compare with.
// */
// public boolean equals(Object other) {
// if ( ! this.GetClass().equals(other.GetClass())) return false;
// SIPResponse that = (SIPResponse) other;
// return statusLine.equals(that.statusLine) && | // /**
// * Match with a template.
// *@param matchObj template object to match ourselves with (nil
// * in any position in the template object matches wildcard)
// */
// public boolean match(Object matchObj) {
// if (matchObj == nil) return true;
// else if ( ! matchObj.GetClass().equals(this.GetClass())) {
// return false;
// } else if (matchObj == this) return true;
// SIPResponse that = (SIPResponse) matchObj;
// // System.out.println("---------------------------------------");
// // System.out.println("matching " + this.encode());
// // System.out.println("matchObj " + that.encode());
// StatusLine rline = that.statusLine;
// if (this.statusLine == nil && rline != nil) return false;
// else if (this.statusLine == rline) return super.match(matchObj);
// else {
// // System.out.println(statusLine.match(that.statusLine));
// // System.out.println(super.match(matchObj));
// // System.out.println("---------------------------------------");
// return statusLine.match(that.statusLine) &&
// super.match(matchObj);
// }
// }
// /** Encode this into a byte array.
// * This is used when the body has been Set as a binary array
// * and you want to encode the body as a byte array for transmission.
// *
// *@return a byte array containing the SIPRequest encoded as a byte
// * array.
// */
func (this *SIPResponse) EncodeAsBytes() []byte {
var slbytes []byte
if this.statusLine != nil {
//try {
slbytes = []byte(this.statusLine.String()) //.GetBytes("UTF-8");
// } catch (UnsupportedEncodingException ex){
// InternalErrorHandler.handleException(ex);
// }
}
superbytes := this.SIPMessage.EncodeAsBytes()
retval := make([]byte, len(slbytes)+len(superbytes))
i := 0
if slbytes != nil {
for i = 0; i < len(slbytes); i++ {
retval[i] = slbytes[i]
}
}
for j := 0; j < len(superbytes); j++ {
retval[i] = superbytes[j]
i++
}
return retval
}
/** Get the dialog identifier. Assume the incoming response
* corresponds to a client dialog for an outgoing request.
* Acknowledgement -- this was contributed by Lamine Brahimi.
*
*@return a string that can be used to identify the dialog.
public String GetDialogId() {
CallID cid = (CallID)this.GetCallId();
From from = (From) this.GetFrom();
String retval = cid.GetCallId();
retval += COLON + from.GetUserAtHostPort();
retval += COLON;
if (from.GetTag() != nil)
retval += from.GetTag();
return retval.toLowerCase();
}
*/
// /** Get a dialog identifier.
// * Generates a string that can be used as a dialog identifier.
// *
// * @param isServer is Set to true if this is the UAS
// * and Set to false if this is the UAC
// */
func (this *SIPResponse) GetDialogId(isServer bool) string {
cid := this.GetCallId()
from := this.GetFrom().(*header.From)
to := this.GetTo().(*header.To)
var retval bytes.Buffer
retval.WriteString(cid.GetCallId())
if !isServer {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if to.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetTag())
}
} else {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if to.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
}
return strings.ToLower(retval.String())
}
func (this *SIPResponse) GetDialogId2(isServer bool, toTag string) string {
cid := this.GetCallId()
from := this.GetFrom().(*header.From)
to := this.GetTo().(*header.To)
var retval bytes.Buffer
retval.WriteString(cid.GetCallId())
if !isServer {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if toTag != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(toTag)
}
} else {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(to.GetUserAtHostPort())
if toTag != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(toTag)
}
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetUserAtHostPort())
if from.GetTag() != "" {
retval.WriteString(core.SIPSeparatorNames_COLON)
retval.WriteString(from.GetTag())
}
}
return strings.ToLower(retval.String())
}
// /**
// * Create a new SIPRequest from the given response. Note that the
// * RecordRoute Via and CSeq headers are not copied from the response.
// * These have to be added by the caller.
// * This method is useful for generating ACK messages from final
// * responses.
// *
// *@param requestURI is the request URI to use.
// *@param via is the via header to use.
// *@param cseq is the cseq header to use in the generated
// * request.
// */
func (this *SIPResponse) CreateRequest(requestURI address.SipURI, via *header.Via, cseq *header.CSeq) *SIPRequest {
newRequest := NewSIPRequest()
method := cseq.GetMethod()
newRequest.SetMethod(method)
newRequest.SetRequestURI(requestURI)
if (method == "ACK" || method == "CANCEL") && this.GetTopmostVia().GetBranch() != "" {
// Use the branch id from the OK.
//try {
via.SetBranch(this.GetTopmostVia().GetBranch())
//} catch (ParseException ex) {}
}
newRequest.SetHeader(via)
newRequest.SetHeader(cseq)
for headerIterator := this.getHeaders().Front(); headerIterator != nil; headerIterator = headerIterator.Next() {
nextHeader := headerIterator.Value.(header.Header)
// Some headers do not belong in a Request ....
if this.IsResponseHeader(nextHeader) {
continue
}
if _, ok := nextHeader.(*header.ViaList); ok {
continue
}
if _, ok := nextHeader.(*header.CSeq); ok {
continue
}
if _, ok := nextHeader.(*header.ContentType); ok {
continue
}
if _, ok := nextHeader.(*header.RecordRouteList); ok {
continue
}
// if _, ok:=nextHeader.(*header.To); ok{
// nextHeader = nextHeader.clone();
// }
// else if (nextHeader instanceof From)
// nextHeader = (SIPHeader)nextHeader.clone();
// try {
newRequest.AttachHeader2(nextHeader, false)
// } catch(SIPDuplicateHeaderException e){
// e.printStackTrace();
// }
}
return newRequest
}
// /**
// * Get the encoded first line.
// *
// *@return the status line encoded.
// *
// */
func (this *SIPResponse) GetFirstLine() string {
if this.statusLine == nil {
return ""
} else {
return this.statusLine.String()
}
}
func (this *SIPResponse) SetSIPVersion(sipVersion string) {
this.statusLine.SetSipVersion(sipVersion)
}
func (this *SIPResponse) GetSIPVersion() string {
return this.statusLine.GetSipVersion()
} | // super.equals(other);
// }
| random_line_split |
clone.go | package clone
// ////////////////////////////////////////////////////////////////////////////////// //
// //
// Copyright (c) 2023 ESSENTIAL KAOS //
// Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0> //
// //
// ////////////////////////////////////////////////////////////////////////////////// //
import (
"bufio"
"fmt"
"io"
"os"
"runtime"
"strings"
"time"
"github.com/essentialkaos/ek/v12/fmtc"
"github.com/essentialkaos/ek/v12/fmtutil"
"github.com/essentialkaos/ek/v12/fsutil"
"github.com/essentialkaos/ek/v12/httputil"
"github.com/essentialkaos/ek/v12/jsonutil"
"github.com/essentialkaos/ek/v12/options"
"github.com/essentialkaos/ek/v12/path"
"github.com/essentialkaos/ek/v12/pluralize"
"github.com/essentialkaos/ek/v12/progress"
"github.com/essentialkaos/ek/v12/req"
"github.com/essentialkaos/ek/v12/terminal"
"github.com/essentialkaos/ek/v12/timeutil"
"github.com/essentialkaos/ek/v12/usage"
"github.com/essentialkaos/ek/v12/usage/completion/bash"
"github.com/essentialkaos/ek/v12/usage/completion/fish"
"github.com/essentialkaos/ek/v12/usage/completion/zsh"
"github.com/essentialkaos/ek/v12/usage/man"
"github.com/essentialkaos/rbinstall/index"
"github.com/essentialkaos/rbinstall/support"
)
// ////////////////////////////////////////////////////////////////////////////////// //
// App info
const (
APP = "RBInstall Clone"
VER = "3.0.2"
DESC = "Utility for cloning RBInstall repository"
)
// Options
const (
OPT_YES = "y:yes"
OPT_NO_COLOR = "nc:no-color"
OPT_HELP = "h:help"
OPT_VER = "v:version"
OPT_VERB_VER = "vv:verbose-version"
OPT_COMPLETION = "completion"
OPT_GENERATE_MAN = "generate-man"
)
// ////////////////////////////////////////////////////////////////////////////////// //
// INDEX_NAME is name of index file
const INDEX_NAME = "index3.json"
// ////////////////////////////////////////////////////////////////////////////////// //
// FileInfo contains info about file with Ruby data
type FileInfo struct {
File string
URL string
OS string
Arch string
Size int64
}
// ////////////////////////////////////////////////////////////////////////////////// //
var optMap = options.Map{
OPT_YES: {Type: options.BOOL},
OPT_NO_COLOR: {Type: options.BOOL},
OPT_HELP: {Type: options.BOOL, Alias: "u:usage"},
OPT_VER: {Type: options.BOOL, Alias: "ver"},
OPT_VERB_VER: {Type: options.BOOL},
OPT_COMPLETION: {},
OPT_GENERATE_MAN: {Type: options.BOOL},
}
var colorTagApp string
var colorTagVer string
// ////////////////////////////////////////////////////////////////////////////////// //
func Run(gitRev string, gomod []byte) {
runtime.GOMAXPROCS(1)
preConfigureUI()
args, errs := options.Parse(optMap)
if len(errs) != 0 {
printError(errs[0].Error())
os.Exit(1)
}
configureUI()
switch {
case options.Has(OPT_COMPLETION):
os.Exit(printCompletion())
case options.Has(OPT_GENERATE_MAN):
printMan()
os.Exit(0)
case options.GetB(OPT_VER):
genAbout(gitRev).Print()
os.Exit(0)
case options.GetB(OPT_VERB_VER):
support.Print(APP, VER, gitRev, gomod)
os.Exit(0)
case options.GetB(OPT_HELP) || len(args) != 2:
genUsage().Print()
os.Exit(0)
}
req.SetUserAgent("RBInstall-Clone", VER)
url := args.Get(0).String()
dir := args.Get(1).String()
fmtc.NewLine()
checkArguments(url, dir)
cloneRepository(url, dir)
fmtc.NewLine()
}
// preConfigureUI preconfigures UI based on information about user terminal
func preConfigureUI() {
term := os.Getenv("TERM")
fmtc.DisableColors = true
if term != "" {
switch {
case strings.Contains(term, "xterm"),
strings.Contains(term, "color"),
term == "screen":
fmtc.DisableColors = false
}
}
if !fsutil.IsCharacterDevice("/dev/stdout") && os.Getenv("FAKETTY") == "" {
fmtc.DisableColors = true
}
if os.Getenv("NO_COLOR") != "" {
fmtc.DisableColors = true
}
}
// configureUI configures user interface
func configureUI() {
terminal.Prompt = "› "
terminal.TitleColorTag = "{s}"
if options.GetB(OPT_NO_COLOR) {
fmtc.DisableColors = true
}
switch {
case fmtc.IsTrueColorSupported():
colorTagApp, colorTagVer = "{#CC1E2C}", "{#CC1E2C}"
case fmtc.Is256ColorsSupported():
colorTagApp, colorTagVer = "{#160}", "{#160}"
default:
colorTagApp, colorTagVer = "{r}", "{r}"
}
}
// checkArguments checks command line arguments
func checkArguments(url, dir string) {
if !httputil.IsURL(url) {
printErrorAndExit("Url %s doesn't look like valid url", url)
}
if !fsutil.IsExist(dir) {
printErrorAndExit("Directory %s does not exist", dir)
}
if !fsutil.IsDir(dir) {
printErrorAndExit("Target %s is not a directory", dir)
}
if !fsutil.IsReadable(dir) {
printErrorAndExit("Directory %s is not readable", dir)
}
if !fsutil.IsExecutable(dir) {
printErrorAndExit("Directory %s is not executable", dir)
}
}
// cloneRepository start repository clone process
func cloneRepository(url, dir string) {
fmtc.Printf("Fetching index from {*}%s{!}…\n", url)
i, err := fetchIndex(url)
if err != nil {
printErrorAndExit(err.Error())
}
if i.Meta.Items == 0 {
printErrorAndExit("Repository is empty")
}
printRepositoryInfo(i)
uuid := getCurrentIndexUUID(dir)
if uuid == i.UUID {
fmtc.Println("{g}Looks like you already have the same set of data{!}")
return
}
if !options.GetB(OPT_YES) {
ok, err := terminal.ReadAnswer("Clone this repository?", "N")
fmtc.NewLine()
if !ok || err != nil {
os.Exit(0)
}
}
downloadRepositoryData(i, url, dir)
saveIndex(i, dir)
fmtc.NewLine()
fmtc.Printf("{g}Repository successfully cloned to {g*}%s{!}\n", dir)
}
// printRepositoryInfo prints basic info about repository data
func printRepositoryInfo(i *index.Index) {
fmtutil.Separator(false, "REPOSITORY INFO")
updated := timeutil.Format(time.Unix(i.Meta.Created, 0), "%Y/%m/%d %H:%M:%S")
fmtc.Printf(" {*}UUID{!}: %s\n", i.UUID)
fmtc.Printf(" {*}Updated{!}: %s\n\n", updated)
for _, distName := range i.Data.Keys() {
size, items := int64(0), 0
for archName, arch := range i.Data[distName] {
for _, category := range arch {
for _, version := range category {
size += version.Size
items++
if len(version.Variations) != 0 {
for _, variation := range version.Variations {
items++
size += variation.Size
}
}
}
}
fmtc.Printf(
" {c*}%s{!}{c}/%s:{!} %3s {s-}|{!} %s\n", distName, archName,
fmtutil.PrettyNum(items), fmtutil.PrettySize(size, " "),
)
}
}
fmtc.NewLine()
fmtc.Printf(
" {*}Total:{!} %s {s-}|{!} %s\n",
fmtutil.PrettyNum(i.Meta.Items),
fmtutil.PrettySize(i.Meta.Size, " "),
)
fmtutil.Separator(false)
}
// fetchIndex downloads remote repository index
func fetchIndex(url string) (*index.Index, error) {
resp, err := req.Request{URL: url + "/" + INDEX_NAME}.Get()
if err != nil {
return nil, fmtc.Errorf("Can't fetch repository index: %v", err)
}
if resp.StatusCode != 200 {
return nil, fmtc.Errorf("Can't fetch repository index: server return status code %d", resp.StatusCode)
}
repoIndex := &index.Index{}
err = resp.JSON(repoIndex)
if err != nil {
return nil, fmtc.Errorf("Can't decode repository index: %v", err)
}
return repoIndex, nil
}
// downloadRepositoryData downloads all files from repository
func downloadRepositoryData(i *index.Index, url, dir string) {
items := getItems(i, url)
pb := progress.New(int64(len(items)), "Starting…")
pbs := progress.DefaultSettings
pbs.IsSize = false
pbs.ShowSpeed = false
pbs.ShowRemaining = false
pbs.ShowName = false
pbs.NameColorTag = "{*}"
pbs.BarFgColorTag = colorTagApp
pbs.PercentColorTag = ""
pbs.RemainingColorTag = "{s}"
pb.UpdateSettings(pbs)
pb.Start()
fmtc.Printf(
"Downloading %s %s from remote repository…\n",
fmtutil.PrettyNum(len(items)),
pluralize.Pluralize(len(items), "file", "files"),
)
for _, item := range items {
fileDir := path.Join(dir, item.OS, item.Arch)
filePath := path.Join(dir, item.OS, item.Arch, item.File)
if !fsutil.IsExist(fileDir) {
err := os.MkdirAll(fileDir, 0755)
if err != nil {
pb.Finish()
fmtc.NewLine()
printErrorAndExit("Can't create directory %s: %v", fileDir, err)
}
}
if fsutil.IsExist(filePath) {
fileSize := fsutil.GetSize(filePath)
if fileSize == item.Size {
pb.Add(1)
continue
}
}
err := downloadFile(item.URL, filePath)
if err != nil {
pb.Finish()
fmtc.NewLine()
printErrorAndExit("%v", err)
}
pb.Add(1)
}
pb.Finish()
fmtc.Printf("\n{g}Repository successfully cloned into %s{!}\n")
}
// getItems returns slice with info about items in repository
func getItems(repoIndex *index.Index, url string) []FileInfo {
var items []FileInfo
for _, os := range repoIndex.Data.Keys() {
for _, arch := range repoIndex.Data[os].Keys() {
for _, category := range repoIndex.Data[os][arch].Keys() {
for _, version := range repoIndex.Data[os][arch][category] {
items = append(items, FileInfo{
File: version.File,
URL: url + "/" + version.Path + "/" + version.File,
OS: os,
Arch: arch,
Size: version.Size,
})
if len(version.Variations) != 0 {
for _, subVersion := range version.Variations {
items = append(items, FileInfo{
File: subVersion.File,
URL: url + "/" + subVersion.Path + "/" + subVersion.File,
OS: os,
Arch: arch,
Size: subVersion.Size,
})
}
}
}
}
}
}
return items
}
// downloadFile downloads and saves remote file
func downloadFile(url, output string) error {
if fsutil.IsExist(output) {
os.Remove(output)
}
fd, err := os.OpenFile(output, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmtc.Errorf("Can't create file: %v", err)
}
defer fd.Close()
resp, err := req.Request{URL: url}.Get()
if err != nil {
return fmtc.Errorf("Can't download file: %v", err)
}
if resp.StatusCode != 200 {
return fmtc.Errorf("Can't download file: server return status code %d", resp.StatusCode)
}
w := bufio.NewWriter(fd)
_, err = io.Copy(w, resp.Body)
w.Flush()
if err != nil {
return fmtc.Errorf("Can't write file: %v", err)
}
return nil
}
// saveIndex encodes index to JSON format and saves it into the file
func saveIndex(repoIndex *index.Index, dir string) {
indexPath := path.Join(dir, INDEX_NAME)
fmtc.Printf("Saving index… ")
err := jsonutil.Write(indexPath, repoIndex)
if err != nil {
fmtc.Println("{r}ERROR{!}")
printErrorAndExit("Can't save index as %s: %v", indexPath, err)
}
fmtc.Println("{g}DONE{!}")
}
// getCurrentIndexUUID returns current index UUID (if exist)
func getCurrentIndexUUID(dir string) string {
indexFile := path.Join(dir, INDEX_NAME)
if !fsutil.IsExist(indexFile) {
return ""
}
i := &index.Index{}
if jsonutil.Read(indexFile, i) != nil {
return ""
}
return i.UUID
}
// printError prints error message to console
func printError(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{r}▲ "+f+"{!}\n", a...)
}
// printError prints warning message to console
func printWarn(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{y}▲ "+f+"{!}\n", a...)
}
// printErrorAndExit print error message and exit with non-zero exit code
func printErrorAndExit(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{r}▲ "+f+"{!}\n", a...)
fmtc.NewLine()
os.Exit(1)
}
// ////////////////////////////////////////////////////////////////////////////////// //
// printCompletion prints completion for given shell
func printCompletion() int {
info := genUsage()
switch options.GetS(OPT_COMPLETION) {
case "bash":
fmt.Printf(bash.Generate(info, "rbinstall-clone"))
case "fish":
fmt.Printf(fish.Generate(info, "rbinstall-clone"))
case "zsh":
fmt.Printf(zsh.Generate(info, optMap, "rbinstall-clone"))
default:
return 1
}
return 0
}
// printMan prints man page
func printMan() {
fmt.Println(
man.Generate(
genUsage(),
genAbout(""),
),
)
}
// genUsage generates usage info
func genUsage() *usage.Info {
info := usage.NewInfo("", "url", "path")
info.AppNameColorTag = "{*}" + colorTagApp
info.AddOption(OPT_YES, `Answer "yes" to all questions`)
info.AddOption(OPT_NO_COLOR, "Disable colors in output")
info.AddOption(OPT_HELP, "Show this help message") | "Clone EK repository to /path/to/clone",
)
return info
}
// genAbout generates info about version
func genAbout(gitRev string) *usage.About {
about := &usage.About{
App: APP,
Version: VER,
Desc: DESC,
Year: 2006,
Owner: "ESSENTIAL KAOS",
License: "Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0>",
AppNameColorTag: "{*}" + colorTagApp,
VersionColorTag: colorTagVer,
}
if gitRev != "" {
about.Build = "git:" + gitRev
}
return about
} | info.AddOption(OPT_VER, "Show version")
info.AddExample(
"https://rbinstall.kaos.st /path/to/clone", | random_line_split |
clone.go | package clone
// ////////////////////////////////////////////////////////////////////////////////// //
// //
// Copyright (c) 2023 ESSENTIAL KAOS //
// Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0> //
// //
// ////////////////////////////////////////////////////////////////////////////////// //
import (
"bufio"
"fmt"
"io"
"os"
"runtime"
"strings"
"time"
"github.com/essentialkaos/ek/v12/fmtc"
"github.com/essentialkaos/ek/v12/fmtutil"
"github.com/essentialkaos/ek/v12/fsutil"
"github.com/essentialkaos/ek/v12/httputil"
"github.com/essentialkaos/ek/v12/jsonutil"
"github.com/essentialkaos/ek/v12/options"
"github.com/essentialkaos/ek/v12/path"
"github.com/essentialkaos/ek/v12/pluralize"
"github.com/essentialkaos/ek/v12/progress"
"github.com/essentialkaos/ek/v12/req"
"github.com/essentialkaos/ek/v12/terminal"
"github.com/essentialkaos/ek/v12/timeutil"
"github.com/essentialkaos/ek/v12/usage"
"github.com/essentialkaos/ek/v12/usage/completion/bash"
"github.com/essentialkaos/ek/v12/usage/completion/fish"
"github.com/essentialkaos/ek/v12/usage/completion/zsh"
"github.com/essentialkaos/ek/v12/usage/man"
"github.com/essentialkaos/rbinstall/index"
"github.com/essentialkaos/rbinstall/support"
)
// ////////////////////////////////////////////////////////////////////////////////// //
// App info
const (
APP = "RBInstall Clone"
VER = "3.0.2"
DESC = "Utility for cloning RBInstall repository"
)
// Options
const (
OPT_YES = "y:yes"
OPT_NO_COLOR = "nc:no-color"
OPT_HELP = "h:help"
OPT_VER = "v:version"
OPT_VERB_VER = "vv:verbose-version"
OPT_COMPLETION = "completion"
OPT_GENERATE_MAN = "generate-man"
)
// ////////////////////////////////////////////////////////////////////////////////// //
// INDEX_NAME is name of index file
const INDEX_NAME = "index3.json"
// ////////////////////////////////////////////////////////////////////////////////// //
// FileInfo contains info about file with Ruby data
type FileInfo struct {
File string
URL string
OS string
Arch string
Size int64
}
// ////////////////////////////////////////////////////////////////////////////////// //
var optMap = options.Map{
OPT_YES: {Type: options.BOOL},
OPT_NO_COLOR: {Type: options.BOOL},
OPT_HELP: {Type: options.BOOL, Alias: "u:usage"},
OPT_VER: {Type: options.BOOL, Alias: "ver"},
OPT_VERB_VER: {Type: options.BOOL},
OPT_COMPLETION: {},
OPT_GENERATE_MAN: {Type: options.BOOL},
}
var colorTagApp string
var colorTagVer string
// ////////////////////////////////////////////////////////////////////////////////// //
func Run(gitRev string, gomod []byte) {
runtime.GOMAXPROCS(1)
preConfigureUI()
args, errs := options.Parse(optMap)
if len(errs) != 0 {
printError(errs[0].Error())
os.Exit(1)
}
configureUI()
switch {
case options.Has(OPT_COMPLETION):
os.Exit(printCompletion())
case options.Has(OPT_GENERATE_MAN):
printMan()
os.Exit(0)
case options.GetB(OPT_VER):
genAbout(gitRev).Print()
os.Exit(0)
case options.GetB(OPT_VERB_VER):
support.Print(APP, VER, gitRev, gomod)
os.Exit(0)
case options.GetB(OPT_HELP) || len(args) != 2:
genUsage().Print()
os.Exit(0)
}
req.SetUserAgent("RBInstall-Clone", VER)
url := args.Get(0).String()
dir := args.Get(1).String()
fmtc.NewLine()
checkArguments(url, dir)
cloneRepository(url, dir)
fmtc.NewLine()
}
// preConfigureUI preconfigures UI based on information about user terminal
func preConfigureUI() {
term := os.Getenv("TERM")
fmtc.DisableColors = true
if term != "" {
switch {
case strings.Contains(term, "xterm"),
strings.Contains(term, "color"),
term == "screen":
fmtc.DisableColors = false
}
}
if !fsutil.IsCharacterDevice("/dev/stdout") && os.Getenv("FAKETTY") == "" {
fmtc.DisableColors = true
}
if os.Getenv("NO_COLOR") != "" {
fmtc.DisableColors = true
}
}
// configureUI configures user interface
func configureUI() {
terminal.Prompt = "› "
terminal.TitleColorTag = "{s}"
if options.GetB(OPT_NO_COLOR) {
fmtc.DisableColors = true
}
switch {
case fmtc.IsTrueColorSupported():
colorTagApp, colorTagVer = "{#CC1E2C}", "{#CC1E2C}"
case fmtc.Is256ColorsSupported():
colorTagApp, colorTagVer = "{#160}", "{#160}"
default:
colorTagApp, colorTagVer = "{r}", "{r}"
}
}
// checkArguments checks command line arguments
func checkArguments(url, dir string) {
if !httputil.IsURL(url) {
printErrorAndExit("Url %s doesn't look like valid url", url)
}
if !fsutil.IsExist(dir) {
printErrorAndExit("Directory %s does not exist", dir)
}
if !fsutil.IsDir(dir) {
printErrorAndExit("Target %s is not a directory", dir)
}
if !fsutil.IsReadable(dir) {
printErrorAndExit("Directory %s is not readable", dir)
}
if !fsutil.IsExecutable(dir) {
printErrorAndExit("Directory %s is not executable", dir)
}
}
// cloneRepository start repository clone process
func cloneRepository(url, dir string) {
fmtc.Printf("Fetching index from {*}%s{!}…\n", url)
i, err := fetchIndex(url)
if err != nil {
printErrorAndExit(err.Error())
}
if i.Meta.Items == 0 {
printErrorAndExit("Repository is empty")
}
printRepositoryInfo(i)
uuid := getCurrentIndexUUID(dir)
if uuid == i.UUID {
fmtc.Println("{g}Looks like you already have the same set of data{!}")
return
}
if !options.GetB(OPT_YES) {
ok, err := terminal.ReadAnswer("Clone this repository?", "N")
fmtc.NewLine()
if !ok || err != nil {
os.Exit(0)
}
}
downloadRepositoryData(i, url, dir)
saveIndex(i, dir)
fmtc.NewLine()
fmtc.Printf("{g}Repository successfully cloned to {g*}%s{!}\n", dir)
}
// printRepositoryInfo prints basic info about repository data
func printRepositoryInfo(i *index.Index) {
fmtutil.Separator(false, "REPOSITORY INFO")
updated := timeutil.Format(time.Unix(i.Meta.Created, 0), "%Y/%m/%d %H:%M:%S")
fmtc.Printf(" {*}UUID{!}: %s\n", i.UUID)
fmtc.Printf(" {*}Updated{!}: %s\n\n", updated)
for _, distName := range i.Data.Keys() {
size, items := int64(0), 0
for archName, arch := range i.Data[distName] {
for _, category := range arch {
for _, version := range category {
size += version.Size
items++
if len(version.Variations) != 0 {
for _, variation := range version.Variations {
items++
size += variation.Size
}
}
}
}
fmtc.Printf(
" {c*}%s{!}{c}/%s:{!} %3s {s-}|{!} %s\n", distName, archName,
fmtutil.PrettyNum(items), fmtutil.PrettySize(size, " "),
)
}
}
fmtc.NewLine()
fmtc.Printf(
" {*}Total:{!} %s {s-}|{!} %s\n",
fmtutil.PrettyNum(i.Meta.Items),
fmtutil.PrettySize(i.Meta.Size, " "),
)
fmtutil.Separator(false)
}
// fetchIndex downloads remote repository index
func fetchIndex(url string) (*index.Index, error) {
resp, err := req.Request{URL: url + "/" + INDEX_NAME}.Get()
if err != nil {
return nil, fmtc.Errorf("Can't fetch repository index: %v", err)
}
if resp.StatusCode != 200 {
return nil, fmtc.Errorf("Can't fetch repository index: server return status code %d", resp.StatusCode)
}
repoIndex := &index.Index{}
err = resp.JSON(repoIndex)
if err != nil {
return nil, fmtc.Errorf("Can't decode repository index: %v", err)
}
return repoIndex, nil
}
// downloadRepositoryData downloads all files from repository
func downloadRepositoryData(i *index.Index, url, dir string) {
items := getItems(i, url)
pb := progress.New(int64(len(items)), "Starting…")
pbs := progress.DefaultSettings
pbs.IsSize = false
pbs.ShowSpeed = false
pbs.ShowRemaining = false
pbs.ShowName = false
pbs.NameColorTag = "{*}"
pbs.BarFgColorTag = colorTagApp
pbs.PercentColorTag = ""
pbs.RemainingColorTag = "{s}"
pb.UpdateSettings(pbs)
pb.Start()
fmtc.Printf(
"Downloading %s %s from remote repository…\n",
fmtutil.PrettyNum(len(items)),
pluralize.Pluralize(len(items), "file", "files"),
)
for _, item := range items {
fileDir := path.Join(dir, item.OS, item.Arch)
filePath := path.Join(dir, item.OS, item.Arch, item.File)
if !fsutil.IsExist(fileDir) {
err := os.MkdirAll(fileDir, 0755)
if err != nil {
pb.Finish()
fmtc.NewLine()
printErrorAndExit("Can't create directory %s: %v", fileDir, err)
}
}
if fsutil.IsExist(filePath) {
fileSize := fsutil.GetSize(filePath)
if fileSize == item.Size {
pb.Add(1)
continue
}
}
err := downloadFile(item.URL, filePath)
if err != nil {
pb.Finish()
fmtc.NewLine()
printErrorAndExit("%v", err)
}
pb.Add(1)
}
pb.Finish()
fmtc.Printf("\n{g}Repository successfully cloned into %s{!}\n")
}
// getItems returns slice with info about items in repository
func getItems(repoIndex *index.Index, url string) []FileInfo {
var items []FileInfo
for _, os := range repoIndex.Data.Keys() {
for _, arch := range repoIndex.Data[os].Keys() {
for _, category := range repoIndex.Data[os][arch].Keys() {
for _, version := range repoIndex.Data[os][arch][category] {
items = append(items, FileInfo{
File: version.File,
URL: url + "/" + version.Path + "/" + version.File,
OS: os,
Arch: arch,
Size: version.Size,
})
if len(version.Variations) != 0 {
for _, subVersion := range version.Variations {
items = append(items, FileInfo{
File: subVersion.File,
URL: url + "/" + subVersion.Path + "/" + subVersion.File,
OS: os,
Arch: arch,
Size: subVersion.Size,
})
}
}
}
}
}
}
return items
}
// downloadFile downloads and saves remote file
func downloadFile(url, output string) error {
if fsutil.IsExist(output) {
os.Remove(output)
}
fd, err := os.OpenFile(output, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmtc.Errorf("Can't create file: %v", err)
}
defer fd.Close()
resp, err := req.Request{URL: url}.Get()
if err != nil {
return fmtc.Errorf("Can't download file: %v", err)
}
if resp.StatusCode != 200 {
return fmtc.Errorf("Can't download file: server return status code %d", resp.StatusCode)
}
w := bufio.NewWriter(fd)
_, err = io.Copy(w, resp.Body)
w.Flush()
if err != nil {
return fmtc.Errorf("Can't write file: %v", err)
}
return nil
}
// saveIndex encodes index to JSON format and saves it into the file
func saveIndex(repoIndex *index.Index, dir string) {
indexPath := path.Join(dir, INDEX_NAME)
fmtc.Printf("Saving index… ")
err := jsonutil.Write(indexPath, repoIndex)
if err != nil {
fmtc.Println("{r}ERROR{!}")
printErrorAndExit("Can't save index as %s: %v", indexPath, err)
}
fmtc.Println("{g}DONE{!}")
}
// getCurrentIndexUUID returns current index UUID (if exist)
func getCurrentIndexUUID(dir string) string {
indexFile := path.Join(dir, INDEX_NAME)
if !fsutil.IsExist(indexFile) {
return ""
}
i := &index.Index{}
if jsonutil.Read(indexFile, i) != nil {
return ""
}
return i.UUID
}
// printError prints error message to console
func printError(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{r}▲ "+f+"{!}\n", a...)
}
// printError prints warning message to console
func printWarn(f | ...interface{}) {
fmtc.Fprintf(os.Stderr, "{y}▲ "+f+"{!}\n", a...)
}
// printErrorAndExit print error message and exit with non-zero exit code
func printErrorAndExit(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{r}▲ "+f+"{!}\n", a...)
fmtc.NewLine()
os.Exit(1)
}
// ////////////////////////////////////////////////////////////////////////////////// //
// printCompletion prints completion for given shell
func printCompletion() int {
info := genUsage()
switch options.GetS(OPT_COMPLETION) {
case "bash":
fmt.Printf(bash.Generate(info, "rbinstall-clone"))
case "fish":
fmt.Printf(fish.Generate(info, "rbinstall-clone"))
case "zsh":
fmt.Printf(zsh.Generate(info, optMap, "rbinstall-clone"))
default:
return 1
}
return 0
}
// printMan prints man page
func printMan() {
fmt.Println(
man.Generate(
genUsage(),
genAbout(""),
),
)
}
// genUsage generates usage info
func genUsage() *usage.Info {
info := usage.NewInfo("", "url", "path")
info.AppNameColorTag = "{*}" + colorTagApp
info.AddOption(OPT_YES, `Answer "yes" to all questions`)
info.AddOption(OPT_NO_COLOR, "Disable colors in output")
info.AddOption(OPT_HELP, "Show this help message")
info.AddOption(OPT_VER, "Show version")
info.AddExample(
"https://rbinstall.kaos.st /path/to/clone",
"Clone EK repository to /path/to/clone",
)
return info
}
// genAbout generates info about version
func genAbout(gitRev string) *usage.About {
about := &usage.About{
App: APP,
Version: VER,
Desc: DESC,
Year: 2006,
Owner: "ESSENTIAL KAOS",
License: "Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0>",
AppNameColorTag: "{*}" + colorTagApp,
VersionColorTag: colorTagVer,
}
if gitRev != "" {
about.Build = "git:" + gitRev
}
return about
}
| string, a | identifier_name |
clone.go | package clone
// ////////////////////////////////////////////////////////////////////////////////// //
// //
// Copyright (c) 2023 ESSENTIAL KAOS //
// Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0> //
// //
// ////////////////////////////////////////////////////////////////////////////////// //
import (
"bufio"
"fmt"
"io"
"os"
"runtime"
"strings"
"time"
"github.com/essentialkaos/ek/v12/fmtc"
"github.com/essentialkaos/ek/v12/fmtutil"
"github.com/essentialkaos/ek/v12/fsutil"
"github.com/essentialkaos/ek/v12/httputil"
"github.com/essentialkaos/ek/v12/jsonutil"
"github.com/essentialkaos/ek/v12/options"
"github.com/essentialkaos/ek/v12/path"
"github.com/essentialkaos/ek/v12/pluralize"
"github.com/essentialkaos/ek/v12/progress"
"github.com/essentialkaos/ek/v12/req"
"github.com/essentialkaos/ek/v12/terminal"
"github.com/essentialkaos/ek/v12/timeutil"
"github.com/essentialkaos/ek/v12/usage"
"github.com/essentialkaos/ek/v12/usage/completion/bash"
"github.com/essentialkaos/ek/v12/usage/completion/fish"
"github.com/essentialkaos/ek/v12/usage/completion/zsh"
"github.com/essentialkaos/ek/v12/usage/man"
"github.com/essentialkaos/rbinstall/index"
"github.com/essentialkaos/rbinstall/support"
)
// ////////////////////////////////////////////////////////////////////////////////// //
// App info
const (
APP = "RBInstall Clone"
VER = "3.0.2"
DESC = "Utility for cloning RBInstall repository"
)
// Options
const (
OPT_YES = "y:yes"
OPT_NO_COLOR = "nc:no-color"
OPT_HELP = "h:help"
OPT_VER = "v:version"
OPT_VERB_VER = "vv:verbose-version"
OPT_COMPLETION = "completion"
OPT_GENERATE_MAN = "generate-man"
)
// ////////////////////////////////////////////////////////////////////////////////// //
// INDEX_NAME is name of index file
const INDEX_NAME = "index3.json"
// ////////////////////////////////////////////////////////////////////////////////// //
// FileInfo contains info about file with Ruby data
type FileInfo struct {
File string
URL string
OS string
Arch string
Size int64
}
// ////////////////////////////////////////////////////////////////////////////////// //
var optMap = options.Map{
OPT_YES: {Type: options.BOOL},
OPT_NO_COLOR: {Type: options.BOOL},
OPT_HELP: {Type: options.BOOL, Alias: "u:usage"},
OPT_VER: {Type: options.BOOL, Alias: "ver"},
OPT_VERB_VER: {Type: options.BOOL},
OPT_COMPLETION: {},
OPT_GENERATE_MAN: {Type: options.BOOL},
}
var colorTagApp string
var colorTagVer string
// ////////////////////////////////////////////////////////////////////////////////// //
func Run(gitRev string, gomod []byte) {
runtime.GOMAXPROCS(1)
preConfigureUI()
args, errs := options.Parse(optMap)
if len(errs) != 0 {
printError(errs[0].Error())
os.Exit(1)
}
configureUI()
switch {
case options.Has(OPT_COMPLETION):
os.Exit(printCompletion())
case options.Has(OPT_GENERATE_MAN):
printMan()
os.Exit(0)
case options.GetB(OPT_VER):
genAbout(gitRev).Print()
os.Exit(0)
case options.GetB(OPT_VERB_VER):
support.Print(APP, VER, gitRev, gomod)
os.Exit(0)
case options.GetB(OPT_HELP) || len(args) != 2:
genUsage().Print()
os.Exit(0)
}
req.SetUserAgent("RBInstall-Clone", VER)
url := args.Get(0).String()
dir := args.Get(1).String()
fmtc.NewLine()
checkArguments(url, dir)
cloneRepository(url, dir)
fmtc.NewLine()
}
// preConfigureUI preconfigures UI based on information about user terminal
func preConfigureUI() {
term := os.Getenv("TERM")
fmtc.DisableColors = true
if term != "" {
switch {
case strings.Contains(term, "xterm"),
strings.Contains(term, "color"),
term == "screen":
fmtc.DisableColors = false
}
}
if !fsutil.IsCharacterDevice("/dev/stdout") && os.Getenv("FAKETTY") == "" {
fmtc.DisableColors = true
}
if os.Getenv("NO_COLOR") != "" {
fmtc.DisableColors = true
}
}
// configureUI configures user interface
func configureUI() {
terminal.Prompt = "› "
terminal.TitleColorTag = "{s}"
if options.GetB(OPT_NO_COLOR) {
fmtc.DisableColors = true
}
switch {
case fmtc.IsTrueColorSupported():
colorTagApp, colorTagVer = "{#CC1E2C}", "{#CC1E2C}"
case fmtc.Is256ColorsSupported():
colorTagApp, colorTagVer = "{#160}", "{#160}"
default:
colorTagApp, colorTagVer = "{r}", "{r}"
}
}
// checkArguments checks command line arguments
func checkArguments(url, dir string) {
if !httputil.IsURL(url) {
printErrorAndExit("Url %s doesn't look like valid url", url)
}
if !fsutil.IsExist(dir) {
printErrorAndExit("Directory %s does not exist", dir)
}
if !fsutil.IsDir(dir) {
printErrorAndExit("Target %s is not a directory", dir)
}
if !fsutil.IsReadable(dir) {
| if !fsutil.IsExecutable(dir) {
printErrorAndExit("Directory %s is not executable", dir)
}
}
// cloneRepository start repository clone process
func cloneRepository(url, dir string) {
fmtc.Printf("Fetching index from {*}%s{!}…\n", url)
i, err := fetchIndex(url)
if err != nil {
printErrorAndExit(err.Error())
}
if i.Meta.Items == 0 {
printErrorAndExit("Repository is empty")
}
printRepositoryInfo(i)
uuid := getCurrentIndexUUID(dir)
if uuid == i.UUID {
fmtc.Println("{g}Looks like you already have the same set of data{!}")
return
}
if !options.GetB(OPT_YES) {
ok, err := terminal.ReadAnswer("Clone this repository?", "N")
fmtc.NewLine()
if !ok || err != nil {
os.Exit(0)
}
}
downloadRepositoryData(i, url, dir)
saveIndex(i, dir)
fmtc.NewLine()
fmtc.Printf("{g}Repository successfully cloned to {g*}%s{!}\n", dir)
}
// printRepositoryInfo prints basic info about repository data
func printRepositoryInfo(i *index.Index) {
fmtutil.Separator(false, "REPOSITORY INFO")
updated := timeutil.Format(time.Unix(i.Meta.Created, 0), "%Y/%m/%d %H:%M:%S")
fmtc.Printf(" {*}UUID{!}: %s\n", i.UUID)
fmtc.Printf(" {*}Updated{!}: %s\n\n", updated)
for _, distName := range i.Data.Keys() {
size, items := int64(0), 0
for archName, arch := range i.Data[distName] {
for _, category := range arch {
for _, version := range category {
size += version.Size
items++
if len(version.Variations) != 0 {
for _, variation := range version.Variations {
items++
size += variation.Size
}
}
}
}
fmtc.Printf(
" {c*}%s{!}{c}/%s:{!} %3s {s-}|{!} %s\n", distName, archName,
fmtutil.PrettyNum(items), fmtutil.PrettySize(size, " "),
)
}
}
fmtc.NewLine()
fmtc.Printf(
" {*}Total:{!} %s {s-}|{!} %s\n",
fmtutil.PrettyNum(i.Meta.Items),
fmtutil.PrettySize(i.Meta.Size, " "),
)
fmtutil.Separator(false)
}
// fetchIndex downloads remote repository index
func fetchIndex(url string) (*index.Index, error) {
resp, err := req.Request{URL: url + "/" + INDEX_NAME}.Get()
if err != nil {
return nil, fmtc.Errorf("Can't fetch repository index: %v", err)
}
if resp.StatusCode != 200 {
return nil, fmtc.Errorf("Can't fetch repository index: server return status code %d", resp.StatusCode)
}
repoIndex := &index.Index{}
err = resp.JSON(repoIndex)
if err != nil {
return nil, fmtc.Errorf("Can't decode repository index: %v", err)
}
return repoIndex, nil
}
// downloadRepositoryData downloads all files from repository
func downloadRepositoryData(i *index.Index, url, dir string) {
items := getItems(i, url)
pb := progress.New(int64(len(items)), "Starting…")
pbs := progress.DefaultSettings
pbs.IsSize = false
pbs.ShowSpeed = false
pbs.ShowRemaining = false
pbs.ShowName = false
pbs.NameColorTag = "{*}"
pbs.BarFgColorTag = colorTagApp
pbs.PercentColorTag = ""
pbs.RemainingColorTag = "{s}"
pb.UpdateSettings(pbs)
pb.Start()
fmtc.Printf(
"Downloading %s %s from remote repository…\n",
fmtutil.PrettyNum(len(items)),
pluralize.Pluralize(len(items), "file", "files"),
)
for _, item := range items {
fileDir := path.Join(dir, item.OS, item.Arch)
filePath := path.Join(dir, item.OS, item.Arch, item.File)
if !fsutil.IsExist(fileDir) {
err := os.MkdirAll(fileDir, 0755)
if err != nil {
pb.Finish()
fmtc.NewLine()
printErrorAndExit("Can't create directory %s: %v", fileDir, err)
}
}
if fsutil.IsExist(filePath) {
fileSize := fsutil.GetSize(filePath)
if fileSize == item.Size {
pb.Add(1)
continue
}
}
err := downloadFile(item.URL, filePath)
if err != nil {
pb.Finish()
fmtc.NewLine()
printErrorAndExit("%v", err)
}
pb.Add(1)
}
pb.Finish()
fmtc.Printf("\n{g}Repository successfully cloned into %s{!}\n")
}
// getItems returns slice with info about items in repository
func getItems(repoIndex *index.Index, url string) []FileInfo {
var items []FileInfo
for _, os := range repoIndex.Data.Keys() {
for _, arch := range repoIndex.Data[os].Keys() {
for _, category := range repoIndex.Data[os][arch].Keys() {
for _, version := range repoIndex.Data[os][arch][category] {
items = append(items, FileInfo{
File: version.File,
URL: url + "/" + version.Path + "/" + version.File,
OS: os,
Arch: arch,
Size: version.Size,
})
if len(version.Variations) != 0 {
for _, subVersion := range version.Variations {
items = append(items, FileInfo{
File: subVersion.File,
URL: url + "/" + subVersion.Path + "/" + subVersion.File,
OS: os,
Arch: arch,
Size: subVersion.Size,
})
}
}
}
}
}
}
return items
}
// downloadFile downloads and saves remote file
func downloadFile(url, output string) error {
if fsutil.IsExist(output) {
os.Remove(output)
}
fd, err := os.OpenFile(output, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmtc.Errorf("Can't create file: %v", err)
}
defer fd.Close()
resp, err := req.Request{URL: url}.Get()
if err != nil {
return fmtc.Errorf("Can't download file: %v", err)
}
if resp.StatusCode != 200 {
return fmtc.Errorf("Can't download file: server return status code %d", resp.StatusCode)
}
w := bufio.NewWriter(fd)
_, err = io.Copy(w, resp.Body)
w.Flush()
if err != nil {
return fmtc.Errorf("Can't write file: %v", err)
}
return nil
}
// saveIndex encodes index to JSON format and saves it into the file
func saveIndex(repoIndex *index.Index, dir string) {
indexPath := path.Join(dir, INDEX_NAME)
fmtc.Printf("Saving index… ")
err := jsonutil.Write(indexPath, repoIndex)
if err != nil {
fmtc.Println("{r}ERROR{!}")
printErrorAndExit("Can't save index as %s: %v", indexPath, err)
}
fmtc.Println("{g}DONE{!}")
}
// getCurrentIndexUUID returns current index UUID (if exist)
func getCurrentIndexUUID(dir string) string {
indexFile := path.Join(dir, INDEX_NAME)
if !fsutil.IsExist(indexFile) {
return ""
}
i := &index.Index{}
if jsonutil.Read(indexFile, i) != nil {
return ""
}
return i.UUID
}
// printError prints error message to console
func printError(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{r}▲ "+f+"{!}\n", a...)
}
// printError prints warning message to console
func printWarn(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{y}▲ "+f+"{!}\n", a...)
}
// printErrorAndExit print error message and exit with non-zero exit code
func printErrorAndExit(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{r}▲ "+f+"{!}\n", a...)
fmtc.NewLine()
os.Exit(1)
}
// ////////////////////////////////////////////////////////////////////////////////// //
// printCompletion prints completion for given shell
func printCompletion() int {
info := genUsage()
switch options.GetS(OPT_COMPLETION) {
case "bash":
fmt.Printf(bash.Generate(info, "rbinstall-clone"))
case "fish":
fmt.Printf(fish.Generate(info, "rbinstall-clone"))
case "zsh":
fmt.Printf(zsh.Generate(info, optMap, "rbinstall-clone"))
default:
return 1
}
return 0
}
// printMan prints man page
func printMan() {
fmt.Println(
man.Generate(
genUsage(),
genAbout(""),
),
)
}
// genUsage generates usage info
func genUsage() *usage.Info {
info := usage.NewInfo("", "url", "path")
info.AppNameColorTag = "{*}" + colorTagApp
info.AddOption(OPT_YES, `Answer "yes" to all questions`)
info.AddOption(OPT_NO_COLOR, "Disable colors in output")
info.AddOption(OPT_HELP, "Show this help message")
info.AddOption(OPT_VER, "Show version")
info.AddExample(
"https://rbinstall.kaos.st /path/to/clone",
"Clone EK repository to /path/to/clone",
)
return info
}
// genAbout generates info about version
func genAbout(gitRev string) *usage.About {
about := &usage.About{
App: APP,
Version: VER,
Desc: DESC,
Year: 2006,
Owner: "ESSENTIAL KAOS",
License: "Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0>",
AppNameColorTag: "{*}" + colorTagApp,
VersionColorTag: colorTagVer,
}
if gitRev != "" {
about.Build = "git:" + gitRev
}
return about
}
| printErrorAndExit("Directory %s is not readable", dir)
}
| conditional_block |
clone.go | package clone
// ////////////////////////////////////////////////////////////////////////////////// //
// //
// Copyright (c) 2023 ESSENTIAL KAOS //
// Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0> //
// //
// ////////////////////////////////////////////////////////////////////////////////// //
import (
"bufio"
"fmt"
"io"
"os"
"runtime"
"strings"
"time"
"github.com/essentialkaos/ek/v12/fmtc"
"github.com/essentialkaos/ek/v12/fmtutil"
"github.com/essentialkaos/ek/v12/fsutil"
"github.com/essentialkaos/ek/v12/httputil"
"github.com/essentialkaos/ek/v12/jsonutil"
"github.com/essentialkaos/ek/v12/options"
"github.com/essentialkaos/ek/v12/path"
"github.com/essentialkaos/ek/v12/pluralize"
"github.com/essentialkaos/ek/v12/progress"
"github.com/essentialkaos/ek/v12/req"
"github.com/essentialkaos/ek/v12/terminal"
"github.com/essentialkaos/ek/v12/timeutil"
"github.com/essentialkaos/ek/v12/usage"
"github.com/essentialkaos/ek/v12/usage/completion/bash"
"github.com/essentialkaos/ek/v12/usage/completion/fish"
"github.com/essentialkaos/ek/v12/usage/completion/zsh"
"github.com/essentialkaos/ek/v12/usage/man"
"github.com/essentialkaos/rbinstall/index"
"github.com/essentialkaos/rbinstall/support"
)
// ////////////////////////////////////////////////////////////////////////////////// //
// App info
const (
APP = "RBInstall Clone"
VER = "3.0.2"
DESC = "Utility for cloning RBInstall repository"
)
// Options
const (
OPT_YES = "y:yes"
OPT_NO_COLOR = "nc:no-color"
OPT_HELP = "h:help"
OPT_VER = "v:version"
OPT_VERB_VER = "vv:verbose-version"
OPT_COMPLETION = "completion"
OPT_GENERATE_MAN = "generate-man"
)
// ////////////////////////////////////////////////////////////////////////////////// //
// INDEX_NAME is name of index file
const INDEX_NAME = "index3.json"
// ////////////////////////////////////////////////////////////////////////////////// //
// FileInfo contains info about file with Ruby data
type FileInfo struct {
File string
URL string
OS string
Arch string
Size int64
}
// ////////////////////////////////////////////////////////////////////////////////// //
var optMap = options.Map{
OPT_YES: {Type: options.BOOL},
OPT_NO_COLOR: {Type: options.BOOL},
OPT_HELP: {Type: options.BOOL, Alias: "u:usage"},
OPT_VER: {Type: options.BOOL, Alias: "ver"},
OPT_VERB_VER: {Type: options.BOOL},
OPT_COMPLETION: {},
OPT_GENERATE_MAN: {Type: options.BOOL},
}
var colorTagApp string
var colorTagVer string
// ////////////////////////////////////////////////////////////////////////////////// //
func Run(gitRev string, gomod []byte) {
runtime.GOMAXPROCS(1)
preConfigureUI()
args, errs := options.Parse(optMap)
if len(errs) != 0 {
printError(errs[0].Error())
os.Exit(1)
}
configureUI()
switch {
case options.Has(OPT_COMPLETION):
os.Exit(printCompletion())
case options.Has(OPT_GENERATE_MAN):
printMan()
os.Exit(0)
case options.GetB(OPT_VER):
genAbout(gitRev).Print()
os.Exit(0)
case options.GetB(OPT_VERB_VER):
support.Print(APP, VER, gitRev, gomod)
os.Exit(0)
case options.GetB(OPT_HELP) || len(args) != 2:
genUsage().Print()
os.Exit(0)
}
req.SetUserAgent("RBInstall-Clone", VER)
url := args.Get(0).String()
dir := args.Get(1).String()
fmtc.NewLine()
checkArguments(url, dir)
cloneRepository(url, dir)
fmtc.NewLine()
}
// preConfigureUI preconfigures UI based on information about user terminal
func preConfigureUI() {
term := os.Getenv("TERM")
fmtc.DisableColors = true
if term != "" {
switch {
case strings.Contains(term, "xterm"),
strings.Contains(term, "color"),
term == "screen":
fmtc.DisableColors = false
}
}
if !fsutil.IsCharacterDevice("/dev/stdout") && os.Getenv("FAKETTY") == "" {
fmtc.DisableColors = true
}
if os.Getenv("NO_COLOR") != "" {
fmtc.DisableColors = true
}
}
// configureUI configures user interface
func configureUI() {
terminal.Prompt = "› "
terminal.TitleColorTag = "{s}"
if options.GetB(OPT_NO_COLOR) {
fmtc.DisableColors = true
}
switch {
case fmtc.IsTrueColorSupported():
colorTagApp, colorTagVer = "{#CC1E2C}", "{#CC1E2C}"
case fmtc.Is256ColorsSupported():
colorTagApp, colorTagVer = "{#160}", "{#160}"
default:
colorTagApp, colorTagVer = "{r}", "{r}"
}
}
// checkArguments checks command line arguments
func checkArguments(url, dir string) {
if !httputil.IsURL(url) {
printErrorAndExit("Url %s doesn't look like valid url", url)
}
if !fsutil.IsExist(dir) {
printErrorAndExit("Directory %s does not exist", dir)
}
if !fsutil.IsDir(dir) {
printErrorAndExit("Target %s is not a directory", dir)
}
if !fsutil.IsReadable(dir) {
printErrorAndExit("Directory %s is not readable", dir)
}
if !fsutil.IsExecutable(dir) {
printErrorAndExit("Directory %s is not executable", dir)
}
}
// cloneRepository start repository clone process
func cloneRepository(url, dir string) {
fmtc.Printf("Fetching index from {*}%s{!}…\n", url)
i, err := fetchIndex(url)
if err != nil {
printErrorAndExit(err.Error())
}
if i.Meta.Items == 0 {
printErrorAndExit("Repository is empty")
}
printRepositoryInfo(i)
uuid := getCurrentIndexUUID(dir)
if uuid == i.UUID {
fmtc.Println("{g}Looks like you already have the same set of data{!}")
return
}
if !options.GetB(OPT_YES) {
ok, err := terminal.ReadAnswer("Clone this repository?", "N")
fmtc.NewLine()
if !ok || err != nil {
os.Exit(0)
}
}
downloadRepositoryData(i, url, dir)
saveIndex(i, dir)
fmtc.NewLine()
fmtc.Printf("{g}Repository successfully cloned to {g*}%s{!}\n", dir)
}
// printRepositoryInfo prints basic info about repository data
func printRepositoryInfo(i *index.Index) {
fmtutil.Separator(false, "REPOSITORY INFO")
updated := timeutil.Format(time.Unix(i.Meta.Created, 0), "%Y/%m/%d %H:%M:%S")
fmtc.Printf(" {*}UUID{!}: %s\n", i.UUID)
fmtc.Printf(" {*}Updated{!}: %s\n\n", updated)
for _, distName := range i.Data.Keys() {
size, items := int64(0), 0
for archName, arch := range i.Data[distName] {
for _, category := range arch {
for _, version := range category {
size += version.Size
items++
if len(version.Variations) != 0 {
for _, variation := range version.Variations {
items++
size += variation.Size
}
}
}
}
fmtc.Printf(
" {c*}%s{!}{c}/%s:{!} %3s {s-}|{!} %s\n", distName, archName,
fmtutil.PrettyNum(items), fmtutil.PrettySize(size, " "),
)
}
}
fmtc.NewLine()
fmtc.Printf(
" {*}Total:{!} %s {s-}|{!} %s\n",
fmtutil.PrettyNum(i.Meta.Items),
fmtutil.PrettySize(i.Meta.Size, " "),
)
fmtutil.Separator(false)
}
// fetchIndex downloads remote repository index
func fetchIndex(url string) (*index.Index, error) {
resp, err := req.Request{URL: url + "/" + INDEX_NAME}.Get()
if err != nil {
return nil, fmtc.Errorf("Can't fetch repository index: %v", err)
}
if resp.StatusCode != 200 {
return nil, fmtc.Errorf("Can't fetch repository index: server return status code %d", resp.StatusCode)
}
repoIndex := &index.Index{}
err = resp.JSON(repoIndex)
if err != nil {
return nil, fmtc.Errorf("Can't decode repository index: %v", err)
}
return repoIndex, nil
}
// downloadRepositoryData downloads all files from repository
func downloadRepositoryData(i *index.Index, url, dir string) {
items := getItems(i, url)
pb := progress.New(int64(len(items)), "Starting…")
pbs := progress.DefaultSettings
pbs.IsSize = false
pbs.ShowSpeed = false
pbs.ShowRemaining = false
pbs.ShowName = false
pbs.NameColorTag = "{*}"
pbs.BarFgColorTag = colorTagApp
pbs.PercentColorTag = ""
pbs.RemainingColorTag = "{s}"
pb.UpdateSettings(pbs)
pb.Start()
fmtc.Printf(
"Downloading %s %s from remote repository…\n",
fmtutil.PrettyNum(len(items)),
pluralize.Pluralize(len(items), "file", "files"),
)
for _, item := range items {
fileDir := path.Join(dir, item.OS, item.Arch)
filePath := path.Join(dir, item.OS, item.Arch, item.File)
if !fsutil.IsExist(fileDir) {
err := os.MkdirAll(fileDir, 0755)
if err != nil {
pb.Finish()
fmtc.NewLine()
printErrorAndExit("Can't create directory %s: %v", fileDir, err)
}
}
if fsutil.IsExist(filePath) {
fileSize := fsutil.GetSize(filePath)
if fileSize == item.Size {
pb.Add(1)
continue
}
}
err := downloadFile(item.URL, filePath)
if err != nil {
pb.Finish()
fmtc.NewLine()
printErrorAndExit("%v", err)
}
pb.Add(1)
}
pb.Finish()
fmtc.Printf("\n{g}Repository successfully cloned into %s{!}\n")
}
// getItems returns slice with info about items in repository
func getItems(repoIndex *index.Index, url string) []FileInfo {
var items []FileInfo
for _, os := range repoIndex.Data.Keys() {
for _, arch := range repoIndex.Data[os].Keys() {
for _, category := range repoIndex.Data[os][arch].Keys() {
for _, version := range repoIndex.Data[os][arch][category] {
items = append(items, FileInfo{
File: version.File,
URL: url + "/" + version.Path + "/" + version.File,
OS: os,
Arch: arch,
Size: version.Size,
})
if len(version.Variations) != 0 {
for _, subVersion := range version.Variations {
items = append(items, FileInfo{
File: subVersion.File,
URL: url + "/" + subVersion.Path + "/" + subVersion.File,
OS: os,
Arch: arch,
Size: subVersion.Size,
})
}
}
}
}
}
}
return items
}
// downloadFile downloads and saves remote file
func downloadFile(url, output string) error {
if fsutil.IsExist(output) {
os.Remove(output)
}
fd, err := os.OpenFile(output, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmtc.Errorf("Can't create file: %v", err)
}
defer fd.Close()
resp, err := req.Request{URL: url}.Get()
if err != nil {
return fmtc.Errorf("Can't download file: %v", err)
}
if resp.StatusCode != 200 {
return fmtc.Errorf("Can't download file: server return status code %d", resp.StatusCode)
}
w := bufio.NewWriter(fd)
_, err = io.Copy(w, resp.Body)
w.Flush()
if err != nil {
return fmtc.Errorf("Can't write file: %v", err)
}
return nil
}
// saveIndex encodes index to JSON format and saves it into the file
func saveIndex(repoIndex *index.Index, dir string) {
indexPath := path.Join(dir, INDEX_NAME)
fmtc.Printf("Saving index… ")
err := jsonutil.Write(indexPath, repoIndex)
if err != nil {
fmtc.Println("{r}ERROR{!}")
printErrorAndExit("Can't save index as %s: %v", indexPath, err)
}
fmtc.Println("{g}DONE{!}")
}
// getCurrentIndexUUID returns current index UUID (if exist)
func getCurrentIndexUUID(dir string) string {
indexFile := path.Join(dir, INDEX_NAME)
if !fsutil.IsExist(indexFile) {
return ""
}
i := &index.Index{}
if jsonutil.Read(indexFile, i) != nil {
return ""
}
return i.UUID
}
// printError prints error message to console
func printError(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{r}▲ "+f+"{!}\n", a...)
}
// printError prints warning message to console
func printWarn(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{y}▲ "+f+"{!}\n", a...)
}
// printErrorAndExit print error message and exit with non-zero exit code
func printErrorAndExit(f string, a ...interface{}) {
fmtc.Fprintf(os.Stderr, "{r}▲ "+f+"{!}\n", a...)
fmtc.NewLine()
os.Exit(1)
}
// ////////////////////////////////////////////////////////////////////////////////// //
// printCompletion prints completion for given shell
func printCompletion() int {
info := genUs | ints man page
func printMan() {
fmt.Println(
man.Generate(
genUsage(),
genAbout(""),
),
)
}
// genUsage generates usage info
func genUsage() *usage.Info {
info := usage.NewInfo("", "url", "path")
info.AppNameColorTag = "{*}" + colorTagApp
info.AddOption(OPT_YES, `Answer "yes" to all questions`)
info.AddOption(OPT_NO_COLOR, "Disable colors in output")
info.AddOption(OPT_HELP, "Show this help message")
info.AddOption(OPT_VER, "Show version")
info.AddExample(
"https://rbinstall.kaos.st /path/to/clone",
"Clone EK repository to /path/to/clone",
)
return info
}
// genAbout generates info about version
func genAbout(gitRev string) *usage.About {
about := &usage.About{
App: APP,
Version: VER,
Desc: DESC,
Year: 2006,
Owner: "ESSENTIAL KAOS",
License: "Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0>",
AppNameColorTag: "{*}" + colorTagApp,
VersionColorTag: colorTagVer,
}
if gitRev != "" {
about.Build = "git:" + gitRev
}
return about
}
| age()
switch options.GetS(OPT_COMPLETION) {
case "bash":
fmt.Printf(bash.Generate(info, "rbinstall-clone"))
case "fish":
fmt.Printf(fish.Generate(info, "rbinstall-clone"))
case "zsh":
fmt.Printf(zsh.Generate(info, optMap, "rbinstall-clone"))
default:
return 1
}
return 0
}
// printMan pr | identifier_body |
lib.rs | //! This crate contains structures and generators for specifying how to generate
//! historical and real-time test data for Delorean. The rules for how to
//! generate data and what shape it should take can be specified in a TOML file.
//!
//! Generators can output in line protocol, Parquet, or can be used to generate
//! real-time load on a server that implements the [InfluxDB 2.0 write
//! path][write-api].
//!
//! [write-api]: https://v2.docs.influxdata.com/v2.0/api/#tag/Write
//!
//! While this generator could be compared to [the Go based one that creates TSM
//! data][go-gen], its purpose is meant to be more far reaching. In addition to
//! generating historical data, it should be useful for generating data in a
//! sequence as you would expect it to arrive in a production environment. That
//! means many agents sending data with their different tags and timestamps.
//!
//! [go-gen]: https://github.com/influxdata/influxdb/pull/12710
#![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)]
#![warn(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
clippy::explicit_iter_loop,
clippy::future_not_send,
clippy::use_self,
clippy::clone_on_ref_ptr
)]
use crate::substitution::Substitute;
use rand::Rng;
use rand_seeder::Seeder;
use snafu::{ResultExt, Snafu};
use std::{
convert::TryFrom,
time::{SystemTime, UNIX_EPOCH},
};
pub mod agent;
pub mod field;
pub mod measurement;
pub mod specification;
pub mod substitution;
pub mod tag;
mod tag_set;
pub mod write;
/// Errors that may happen while generating points.
#[derive(Snafu, Debug)]
pub enum Error {
/// Error that may happen when waiting on a tokio task
#[snafu(display("Could not join tokio task: {}", source))]
TokioError {
/// Underlying tokio error that caused this problem
source: tokio::task::JoinError,
},
/// Error that may happen when constructing an agent name
#[snafu(display("Could not create agent name, caused by:\n{}", source))]
CouldNotCreateAgentName {
/// Underlying `substitution` module error that caused this problem
source: substitution::Error,
},
/// Error that may happen when an agent generates points
#[snafu(display("Agent could not generate points, caused by:\n{}", source))]
AgentCouldNotGeneratePoints {
/// Underlying `agent` module error that caused this problem
source: agent::Error,
},
/// Error that may happen when creating agents
#[snafu(display("Could not create agent `{}`, caused by:\n{}", name, source))]
CouldNotCreateAgent {
/// The name of the relevant agent
name: String,
/// Underlying `agent` module error that caused this problem
source: agent::Error,
},
/// Error that may happen when constructing an agent's writer
#[snafu(display("Could not create writer for agent `{}`, caused by:\n{}", name, source))]
CouldNotCreateAgentWriter {
/// The name of the relevant agent
name: String,
/// Underlying `write` module error that caused this problem
source: write::Error,
},
}
type Result<T, E = Error> = std::result::Result<T, E>;
/// Generate data from the configuration in the spec.
///
/// Provide a writer that the line protocol should be written to.
///
/// If `start_datetime` or `end_datetime` are `None`, the current datetime will
/// be used.
pub async fn generate<T: DataGenRng>(
spec: &specification::DataSpec,
points_writer_builder: &mut write::PointsWriterBuilder,
start_datetime: Option<i64>,
end_datetime: Option<i64>,
execution_start_time: i64,
continue_on: bool,
batch_size: usize,
) -> Result<usize> {
let seed = spec.base_seed.to_owned().unwrap_or_else(|| {
let mut rng = rand::thread_rng();
format!("{:04}", rng.gen_range(0..10000))
});
let mut handles = vec![];
// for each agent specification
for agent_spec in &spec.agents {
// create iterators to `cycle` through for `agent_spec.tags`
let tag_set_iterator = tag::AgentTagIterator::new(&agent_spec.tags);
// create `count` number of agent instances, or 1 agent if no count is specified
let n_agents = agent_spec.count.unwrap_or(1);
for (agent_id, mut agent_tags) in tag_set_iterator.take(n_agents).enumerate() {
let agent_name =
Substitute::once(&agent_spec.name, &[("agent_id", &agent_id.to_string())])
.context(CouldNotCreateAgentName)?;
agent_tags.push(tag::Tag::new("data_spec", &spec.name));
if let Some(name_tag_key) = &agent_spec.name_tag_key {
agent_tags.push(tag::Tag::new(name_tag_key, &agent_name));
}
let mut agent = agent::Agent::<T>::new(
agent_spec,
&agent_name,
agent_id,
&seed,
agent_tags,
start_datetime,
end_datetime,
execution_start_time,
continue_on,
)
.context(CouldNotCreateAgent { name: &agent_name })?;
let agent_points_writer = points_writer_builder
.build_for_agent(&agent_name)
.context(CouldNotCreateAgentWriter { name: &agent_name })?;
handles.push(tokio::task::spawn(async move {
agent.generate_all(agent_points_writer, batch_size).await
}));
}
}
let mut total_points = 0;
for handle in handles {
total_points += handle
.await
.context(TokioError)?
.context(AgentCouldNotGeneratePoints)?;
}
Ok(total_points)
}
/// Shorthand trait for the functionality this crate needs a random number generator to have
pub trait DataGenRng: rand::Rng + rand::SeedableRng + Send + 'static {}
impl<T: rand::Rng + rand::SeedableRng + Send + 'static> DataGenRng for T {}
/// Encapsulating the creation of an optionally-seedable random number generator
/// to make this easy to change. Uses a 4-digit number expressed as a `String`
/// as the seed type to enable easy creation of another instance using the same
/// seed.
#[derive(Debug)]
pub struct RandomNumberGenerator<T: DataGenRng> {
rng: T,
/// The seed used for this instance.
pub seed: String,
}
impl<T: DataGenRng> Default for RandomNumberGenerator<T> {
fn default() -> Self {
let mut rng = rand::thread_rng();
let seed = format!("{:04}", rng.gen_range(0..10000));
Self::new(seed)
}
}
impl<T: DataGenRng> RandomNumberGenerator<T> {
/// Create a new instance using the specified seed.
pub fn new(seed: impl Into<String>) -> Self {
let seed = seed.into();
Self {
rng: Seeder::from(&seed).make_rng(),
seed,
}
}
/// Generate a random GUID
pub fn guid(&mut self) -> uuid::Uuid {
let mut bytes = [0u8; 16];
self.rng.fill_bytes(&mut bytes);
uuid::Builder::from_bytes(bytes)
.set_variant(uuid::Variant::RFC4122)
.set_version(uuid::Version::Random)
.build()
}
}
impl<T: DataGenRng> rand::RngCore for RandomNumberGenerator<T> {
fn next_u32(&mut self) -> u32 {
self.rng.next_u32()
}
fn next_u64(&mut self) -> u64 {
self.rng.next_u64()
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.rng.fill_bytes(dest);
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> {
self.rng.try_fill_bytes(dest)
}
}
/// Gets the current time in nanoseconds since the epoch
pub fn now_ns() -> i64 {
let since_the_epoch = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
i64::try_from(since_the_epoch.as_nanos()).expect("Time does not fit")
}
// Always returns 0.
#[cfg(test)]
#[derive(Default)]
struct ZeroRng;
#[cfg(test)]
impl rand::RngCore for ZeroRng {
fn next_u32(&mut self) -> u32 {
self.next_u64() as u32
}
fn next_u64(&mut self) -> u64 {
0
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
rand_core::impls::fill_bytes_via_next(self, dest)
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
impl rand::SeedableRng for ZeroRng {
type Seed = Vec<u8>;
// Ignore the seed value
fn from_seed(_seed: Self::Seed) -> Self {
Self
}
}
// The test rng ignores the seed anyway, so the seed specified doesn't matter.
#[cfg(test)]
const TEST_SEED: &str = "";
#[cfg(test)]
fn test_rng() -> RandomNumberGenerator<ZeroRng> {
RandomNumberGenerator::<ZeroRng>::new(TEST_SEED)
}
// A random number type that does *not* have a predictable sequence of values for use in tests
// that assert on properties rather than exact values. Aliased for convenience in changing to
// a different Rng type.
#[cfg(test)]
type DynamicRng = rand::rngs::SmallRng;
#[cfg(test)]
mod test {
use super::*;
use crate::specification::*;
use influxdb2_client::models::WriteDataPoint;
use std::str::FromStr;
type Error = Box<dyn std::error::Error>;
type Result<T = (), E = Error> = std::result::Result<T, E>;
#[tokio::test]
async fn historical_data_sampling_interval() -> Result<()> {
let toml = r#"
name = "demo_schema"
[[agents]]
name = "basic"
sampling_interval = "10s" # seconds
[[agents.measurements]]
name = "cpu"
[[agents.measurements.fields]]
name = "up"
bool = true"#;
let data_spec = DataSpec::from_str(toml).unwrap();
let agent_id = 0;
let agent_spec = &data_spec.agents[0];
// Take agent_tags out of the equation for the purposes of this test
let agent_tags = vec![];
let execution_start_time = now_ns();
// imagine we've specified at the command line that we want to generate metrics
// for 1970
let start_datetime = Some(0);
// for the first 15 seconds of the year
let end_datetime = Some(15 * 1_000_000_000);
let mut agent = agent::Agent::<ZeroRng>::new(
agent_spec,
&agent_spec.name,
agent_id,
TEST_SEED,
agent_tags,
start_datetime,
end_datetime,
execution_start_time,
false,
)?;
let data_points = agent.generate().await?;
let mut v = Vec::new();
for data_point in data_points {
data_point.write_data_point_to(&mut v).unwrap();
}
let line_protocol = String::from_utf8(v).unwrap();
// Get a point for time 0
let expected_line_protocol = "cpu up=f 0\n";
assert_eq!(line_protocol, expected_line_protocol);
let data_points = agent.generate().await?;
let mut v = Vec::new();
for data_point in data_points {
data_point.write_data_point_to(&mut v).unwrap();
}
let line_protocol = String::from_utf8(v).unwrap();
// Get a point for time 10s | let data_points = agent.generate().await?;
assert!(
data_points.is_empty(),
"expected no data points, got {:?}",
data_points
);
Ok(())
}
} | let expected_line_protocol = "cpu up=f 10000000000\n";
assert_eq!(line_protocol, expected_line_protocol);
// Don't get any points anymore because we're past the ending datetime | random_line_split |
lib.rs | //! This crate contains structures and generators for specifying how to generate
//! historical and real-time test data for Delorean. The rules for how to
//! generate data and what shape it should take can be specified in a TOML file.
//!
//! Generators can output in line protocol, Parquet, or can be used to generate
//! real-time load on a server that implements the [InfluxDB 2.0 write
//! path][write-api].
//!
//! [write-api]: https://v2.docs.influxdata.com/v2.0/api/#tag/Write
//!
//! While this generator could be compared to [the Go based one that creates TSM
//! data][go-gen], its purpose is meant to be more far reaching. In addition to
//! generating historical data, it should be useful for generating data in a
//! sequence as you would expect it to arrive in a production environment. That
//! means many agents sending data with their different tags and timestamps.
//!
//! [go-gen]: https://github.com/influxdata/influxdb/pull/12710
#![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)]
#![warn(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
clippy::explicit_iter_loop,
clippy::future_not_send,
clippy::use_self,
clippy::clone_on_ref_ptr
)]
use crate::substitution::Substitute;
use rand::Rng;
use rand_seeder::Seeder;
use snafu::{ResultExt, Snafu};
use std::{
convert::TryFrom,
time::{SystemTime, UNIX_EPOCH},
};
pub mod agent;
pub mod field;
pub mod measurement;
pub mod specification;
pub mod substitution;
pub mod tag;
mod tag_set;
pub mod write;
/// Errors that may happen while generating points.
#[derive(Snafu, Debug)]
pub enum Error {
/// Error that may happen when waiting on a tokio task
#[snafu(display("Could not join tokio task: {}", source))]
TokioError {
/// Underlying tokio error that caused this problem
source: tokio::task::JoinError,
},
/// Error that may happen when constructing an agent name
#[snafu(display("Could not create agent name, caused by:\n{}", source))]
CouldNotCreateAgentName {
/// Underlying `substitution` module error that caused this problem
source: substitution::Error,
},
/// Error that may happen when an agent generates points
#[snafu(display("Agent could not generate points, caused by:\n{}", source))]
AgentCouldNotGeneratePoints {
/// Underlying `agent` module error that caused this problem
source: agent::Error,
},
/// Error that may happen when creating agents
#[snafu(display("Could not create agent `{}`, caused by:\n{}", name, source))]
CouldNotCreateAgent {
/// The name of the relevant agent
name: String,
/// Underlying `agent` module error that caused this problem
source: agent::Error,
},
/// Error that may happen when constructing an agent's writer
#[snafu(display("Could not create writer for agent `{}`, caused by:\n{}", name, source))]
CouldNotCreateAgentWriter {
/// The name of the relevant agent
name: String,
/// Underlying `write` module error that caused this problem
source: write::Error,
},
}
type Result<T, E = Error> = std::result::Result<T, E>;
/// Generate data from the configuration in the spec.
///
/// Provide a writer that the line protocol should be written to.
///
/// If `start_datetime` or `end_datetime` are `None`, the current datetime will
/// be used.
pub async fn generate<T: DataGenRng>(
spec: &specification::DataSpec,
points_writer_builder: &mut write::PointsWriterBuilder,
start_datetime: Option<i64>,
end_datetime: Option<i64>,
execution_start_time: i64,
continue_on: bool,
batch_size: usize,
) -> Result<usize> {
let seed = spec.base_seed.to_owned().unwrap_or_else(|| {
let mut rng = rand::thread_rng();
format!("{:04}", rng.gen_range(0..10000))
});
let mut handles = vec![];
// for each agent specification
for agent_spec in &spec.agents {
// create iterators to `cycle` through for `agent_spec.tags`
let tag_set_iterator = tag::AgentTagIterator::new(&agent_spec.tags);
// create `count` number of agent instances, or 1 agent if no count is specified
let n_agents = agent_spec.count.unwrap_or(1);
for (agent_id, mut agent_tags) in tag_set_iterator.take(n_agents).enumerate() {
let agent_name =
Substitute::once(&agent_spec.name, &[("agent_id", &agent_id.to_string())])
.context(CouldNotCreateAgentName)?;
agent_tags.push(tag::Tag::new("data_spec", &spec.name));
if let Some(name_tag_key) = &agent_spec.name_tag_key {
agent_tags.push(tag::Tag::new(name_tag_key, &agent_name));
}
let mut agent = agent::Agent::<T>::new(
agent_spec,
&agent_name,
agent_id,
&seed,
agent_tags,
start_datetime,
end_datetime,
execution_start_time,
continue_on,
)
.context(CouldNotCreateAgent { name: &agent_name })?;
let agent_points_writer = points_writer_builder
.build_for_agent(&agent_name)
.context(CouldNotCreateAgentWriter { name: &agent_name })?;
handles.push(tokio::task::spawn(async move {
agent.generate_all(agent_points_writer, batch_size).await
}));
}
}
let mut total_points = 0;
for handle in handles {
total_points += handle
.await
.context(TokioError)?
.context(AgentCouldNotGeneratePoints)?;
}
Ok(total_points)
}
/// Shorthand trait for the functionality this crate needs a random number generator to have
pub trait DataGenRng: rand::Rng + rand::SeedableRng + Send + 'static {}
impl<T: rand::Rng + rand::SeedableRng + Send + 'static> DataGenRng for T {}
/// Encapsulating the creation of an optionally-seedable random number generator
/// to make this easy to change. Uses a 4-digit number expressed as a `String`
/// as the seed type to enable easy creation of another instance using the same
/// seed.
#[derive(Debug)]
pub struct RandomNumberGenerator<T: DataGenRng> {
rng: T,
/// The seed used for this instance.
pub seed: String,
}
impl<T: DataGenRng> Default for RandomNumberGenerator<T> {
fn default() -> Self {
let mut rng = rand::thread_rng();
let seed = format!("{:04}", rng.gen_range(0..10000));
Self::new(seed)
}
}
impl<T: DataGenRng> RandomNumberGenerator<T> {
/// Create a new instance using the specified seed.
pub fn new(seed: impl Into<String>) -> Self {
let seed = seed.into();
Self {
rng: Seeder::from(&seed).make_rng(),
seed,
}
}
/// Generate a random GUID
pub fn guid(&mut self) -> uuid::Uuid {
let mut bytes = [0u8; 16];
self.rng.fill_bytes(&mut bytes);
uuid::Builder::from_bytes(bytes)
.set_variant(uuid::Variant::RFC4122)
.set_version(uuid::Version::Random)
.build()
}
}
impl<T: DataGenRng> rand::RngCore for RandomNumberGenerator<T> {
fn next_u32(&mut self) -> u32 {
self.rng.next_u32()
}
fn next_u64(&mut self) -> u64 {
self.rng.next_u64()
}
fn | (&mut self, dest: &mut [u8]) {
self.rng.fill_bytes(dest);
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> {
self.rng.try_fill_bytes(dest)
}
}
/// Gets the current time in nanoseconds since the epoch
pub fn now_ns() -> i64 {
let since_the_epoch = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
i64::try_from(since_the_epoch.as_nanos()).expect("Time does not fit")
}
// Always returns 0.
#[cfg(test)]
#[derive(Default)]
struct ZeroRng;
#[cfg(test)]
impl rand::RngCore for ZeroRng {
fn next_u32(&mut self) -> u32 {
self.next_u64() as u32
}
fn next_u64(&mut self) -> u64 {
0
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
rand_core::impls::fill_bytes_via_next(self, dest)
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
impl rand::SeedableRng for ZeroRng {
type Seed = Vec<u8>;
// Ignore the seed value
fn from_seed(_seed: Self::Seed) -> Self {
Self
}
}
// The test rng ignores the seed anyway, so the seed specified doesn't matter.
#[cfg(test)]
const TEST_SEED: &str = "";
#[cfg(test)]
fn test_rng() -> RandomNumberGenerator<ZeroRng> {
RandomNumberGenerator::<ZeroRng>::new(TEST_SEED)
}
// A random number type that does *not* have a predictable sequence of values for use in tests
// that assert on properties rather than exact values. Aliased for convenience in changing to
// a different Rng type.
#[cfg(test)]
type DynamicRng = rand::rngs::SmallRng;
#[cfg(test)]
mod test {
use super::*;
use crate::specification::*;
use influxdb2_client::models::WriteDataPoint;
use std::str::FromStr;
type Error = Box<dyn std::error::Error>;
type Result<T = (), E = Error> = std::result::Result<T, E>;
#[tokio::test]
async fn historical_data_sampling_interval() -> Result<()> {
let toml = r#"
name = "demo_schema"
[[agents]]
name = "basic"
sampling_interval = "10s" # seconds
[[agents.measurements]]
name = "cpu"
[[agents.measurements.fields]]
name = "up"
bool = true"#;
let data_spec = DataSpec::from_str(toml).unwrap();
let agent_id = 0;
let agent_spec = &data_spec.agents[0];
// Take agent_tags out of the equation for the purposes of this test
let agent_tags = vec![];
let execution_start_time = now_ns();
// imagine we've specified at the command line that we want to generate metrics
// for 1970
let start_datetime = Some(0);
// for the first 15 seconds of the year
let end_datetime = Some(15 * 1_000_000_000);
let mut agent = agent::Agent::<ZeroRng>::new(
agent_spec,
&agent_spec.name,
agent_id,
TEST_SEED,
agent_tags,
start_datetime,
end_datetime,
execution_start_time,
false,
)?;
let data_points = agent.generate().await?;
let mut v = Vec::new();
for data_point in data_points {
data_point.write_data_point_to(&mut v).unwrap();
}
let line_protocol = String::from_utf8(v).unwrap();
// Get a point for time 0
let expected_line_protocol = "cpu up=f 0\n";
assert_eq!(line_protocol, expected_line_protocol);
let data_points = agent.generate().await?;
let mut v = Vec::new();
for data_point in data_points {
data_point.write_data_point_to(&mut v).unwrap();
}
let line_protocol = String::from_utf8(v).unwrap();
// Get a point for time 10s
let expected_line_protocol = "cpu up=f 10000000000\n";
assert_eq!(line_protocol, expected_line_protocol);
// Don't get any points anymore because we're past the ending datetime
let data_points = agent.generate().await?;
assert!(
data_points.is_empty(),
"expected no data points, got {:?}",
data_points
);
Ok(())
}
}
| fill_bytes | identifier_name |
lib.rs | //! This crate contains structures and generators for specifying how to generate
//! historical and real-time test data for Delorean. The rules for how to
//! generate data and what shape it should take can be specified in a TOML file.
//!
//! Generators can output in line protocol, Parquet, or can be used to generate
//! real-time load on a server that implements the [InfluxDB 2.0 write
//! path][write-api].
//!
//! [write-api]: https://v2.docs.influxdata.com/v2.0/api/#tag/Write
//!
//! While this generator could be compared to [the Go based one that creates TSM
//! data][go-gen], its purpose is meant to be more far reaching. In addition to
//! generating historical data, it should be useful for generating data in a
//! sequence as you would expect it to arrive in a production environment. That
//! means many agents sending data with their different tags and timestamps.
//!
//! [go-gen]: https://github.com/influxdata/influxdb/pull/12710
#![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)]
#![warn(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
clippy::explicit_iter_loop,
clippy::future_not_send,
clippy::use_self,
clippy::clone_on_ref_ptr
)]
use crate::substitution::Substitute;
use rand::Rng;
use rand_seeder::Seeder;
use snafu::{ResultExt, Snafu};
use std::{
convert::TryFrom,
time::{SystemTime, UNIX_EPOCH},
};
pub mod agent;
pub mod field;
pub mod measurement;
pub mod specification;
pub mod substitution;
pub mod tag;
mod tag_set;
pub mod write;
/// Errors that may happen while generating points.
#[derive(Snafu, Debug)]
pub enum Error {
/// Error that may happen when waiting on a tokio task
#[snafu(display("Could not join tokio task: {}", source))]
TokioError {
/// Underlying tokio error that caused this problem
source: tokio::task::JoinError,
},
/// Error that may happen when constructing an agent name
#[snafu(display("Could not create agent name, caused by:\n{}", source))]
CouldNotCreateAgentName {
/// Underlying `substitution` module error that caused this problem
source: substitution::Error,
},
/// Error that may happen when an agent generates points
#[snafu(display("Agent could not generate points, caused by:\n{}", source))]
AgentCouldNotGeneratePoints {
/// Underlying `agent` module error that caused this problem
source: agent::Error,
},
/// Error that may happen when creating agents
#[snafu(display("Could not create agent `{}`, caused by:\n{}", name, source))]
CouldNotCreateAgent {
/// The name of the relevant agent
name: String,
/// Underlying `agent` module error that caused this problem
source: agent::Error,
},
/// Error that may happen when constructing an agent's writer
#[snafu(display("Could not create writer for agent `{}`, caused by:\n{}", name, source))]
CouldNotCreateAgentWriter {
/// The name of the relevant agent
name: String,
/// Underlying `write` module error that caused this problem
source: write::Error,
},
}
type Result<T, E = Error> = std::result::Result<T, E>;
/// Generate data from the configuration in the spec.
///
/// Provide a writer that the line protocol should be written to.
///
/// If `start_datetime` or `end_datetime` are `None`, the current datetime will
/// be used.
pub async fn generate<T: DataGenRng>(
spec: &specification::DataSpec,
points_writer_builder: &mut write::PointsWriterBuilder,
start_datetime: Option<i64>,
end_datetime: Option<i64>,
execution_start_time: i64,
continue_on: bool,
batch_size: usize,
) -> Result<usize> {
let seed = spec.base_seed.to_owned().unwrap_or_else(|| {
let mut rng = rand::thread_rng();
format!("{:04}", rng.gen_range(0..10000))
});
let mut handles = vec![];
// for each agent specification
for agent_spec in &spec.agents {
// create iterators to `cycle` through for `agent_spec.tags`
let tag_set_iterator = tag::AgentTagIterator::new(&agent_spec.tags);
// create `count` number of agent instances, or 1 agent if no count is specified
let n_agents = agent_spec.count.unwrap_or(1);
for (agent_id, mut agent_tags) in tag_set_iterator.take(n_agents).enumerate() {
let agent_name =
Substitute::once(&agent_spec.name, &[("agent_id", &agent_id.to_string())])
.context(CouldNotCreateAgentName)?;
agent_tags.push(tag::Tag::new("data_spec", &spec.name));
if let Some(name_tag_key) = &agent_spec.name_tag_key {
agent_tags.push(tag::Tag::new(name_tag_key, &agent_name));
}
let mut agent = agent::Agent::<T>::new(
agent_spec,
&agent_name,
agent_id,
&seed,
agent_tags,
start_datetime,
end_datetime,
execution_start_time,
continue_on,
)
.context(CouldNotCreateAgent { name: &agent_name })?;
let agent_points_writer = points_writer_builder
.build_for_agent(&agent_name)
.context(CouldNotCreateAgentWriter { name: &agent_name })?;
handles.push(tokio::task::spawn(async move {
agent.generate_all(agent_points_writer, batch_size).await
}));
}
}
let mut total_points = 0;
for handle in handles {
total_points += handle
.await
.context(TokioError)?
.context(AgentCouldNotGeneratePoints)?;
}
Ok(total_points)
}
/// Shorthand trait for the functionality this crate needs a random number generator to have
pub trait DataGenRng: rand::Rng + rand::SeedableRng + Send + 'static {}
impl<T: rand::Rng + rand::SeedableRng + Send + 'static> DataGenRng for T {}
/// Encapsulating the creation of an optionally-seedable random number generator
/// to make this easy to change. Uses a 4-digit number expressed as a `String`
/// as the seed type to enable easy creation of another instance using the same
/// seed.
#[derive(Debug)]
pub struct RandomNumberGenerator<T: DataGenRng> {
rng: T,
/// The seed used for this instance.
pub seed: String,
}
impl<T: DataGenRng> Default for RandomNumberGenerator<T> {
fn default() -> Self {
let mut rng = rand::thread_rng();
let seed = format!("{:04}", rng.gen_range(0..10000));
Self::new(seed)
}
}
impl<T: DataGenRng> RandomNumberGenerator<T> {
/// Create a new instance using the specified seed.
pub fn new(seed: impl Into<String>) -> Self |
/// Generate a random GUID
pub fn guid(&mut self) -> uuid::Uuid {
let mut bytes = [0u8; 16];
self.rng.fill_bytes(&mut bytes);
uuid::Builder::from_bytes(bytes)
.set_variant(uuid::Variant::RFC4122)
.set_version(uuid::Version::Random)
.build()
}
}
impl<T: DataGenRng> rand::RngCore for RandomNumberGenerator<T> {
fn next_u32(&mut self) -> u32 {
self.rng.next_u32()
}
fn next_u64(&mut self) -> u64 {
self.rng.next_u64()
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.rng.fill_bytes(dest);
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> {
self.rng.try_fill_bytes(dest)
}
}
/// Gets the current time in nanoseconds since the epoch
pub fn now_ns() -> i64 {
let since_the_epoch = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Time went backwards");
i64::try_from(since_the_epoch.as_nanos()).expect("Time does not fit")
}
// Always returns 0.
#[cfg(test)]
#[derive(Default)]
struct ZeroRng;
#[cfg(test)]
impl rand::RngCore for ZeroRng {
fn next_u32(&mut self) -> u32 {
self.next_u64() as u32
}
fn next_u64(&mut self) -> u64 {
0
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
rand_core::impls::fill_bytes_via_next(self, dest)
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
impl rand::SeedableRng for ZeroRng {
type Seed = Vec<u8>;
// Ignore the seed value
fn from_seed(_seed: Self::Seed) -> Self {
Self
}
}
// The test rng ignores the seed anyway, so the seed specified doesn't matter.
#[cfg(test)]
const TEST_SEED: &str = "";
#[cfg(test)]
fn test_rng() -> RandomNumberGenerator<ZeroRng> {
RandomNumberGenerator::<ZeroRng>::new(TEST_SEED)
}
// A random number type that does *not* have a predictable sequence of values for use in tests
// that assert on properties rather than exact values. Aliased for convenience in changing to
// a different Rng type.
#[cfg(test)]
type DynamicRng = rand::rngs::SmallRng;
#[cfg(test)]
mod test {
use super::*;
use crate::specification::*;
use influxdb2_client::models::WriteDataPoint;
use std::str::FromStr;
type Error = Box<dyn std::error::Error>;
type Result<T = (), E = Error> = std::result::Result<T, E>;
#[tokio::test]
async fn historical_data_sampling_interval() -> Result<()> {
let toml = r#"
name = "demo_schema"
[[agents]]
name = "basic"
sampling_interval = "10s" # seconds
[[agents.measurements]]
name = "cpu"
[[agents.measurements.fields]]
name = "up"
bool = true"#;
let data_spec = DataSpec::from_str(toml).unwrap();
let agent_id = 0;
let agent_spec = &data_spec.agents[0];
// Take agent_tags out of the equation for the purposes of this test
let agent_tags = vec![];
let execution_start_time = now_ns();
// imagine we've specified at the command line that we want to generate metrics
// for 1970
let start_datetime = Some(0);
// for the first 15 seconds of the year
let end_datetime = Some(15 * 1_000_000_000);
let mut agent = agent::Agent::<ZeroRng>::new(
agent_spec,
&agent_spec.name,
agent_id,
TEST_SEED,
agent_tags,
start_datetime,
end_datetime,
execution_start_time,
false,
)?;
let data_points = agent.generate().await?;
let mut v = Vec::new();
for data_point in data_points {
data_point.write_data_point_to(&mut v).unwrap();
}
let line_protocol = String::from_utf8(v).unwrap();
// Get a point for time 0
let expected_line_protocol = "cpu up=f 0\n";
assert_eq!(line_protocol, expected_line_protocol);
let data_points = agent.generate().await?;
let mut v = Vec::new();
for data_point in data_points {
data_point.write_data_point_to(&mut v).unwrap();
}
let line_protocol = String::from_utf8(v).unwrap();
// Get a point for time 10s
let expected_line_protocol = "cpu up=f 10000000000\n";
assert_eq!(line_protocol, expected_line_protocol);
// Don't get any points anymore because we're past the ending datetime
let data_points = agent.generate().await?;
assert!(
data_points.is_empty(),
"expected no data points, got {:?}",
data_points
);
Ok(())
}
}
| {
let seed = seed.into();
Self {
rng: Seeder::from(&seed).make_rng(),
seed,
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.