file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
ipProcess.py | # -*- coding: utf-8 -*-
'''
Created on Mon May 14 16:31:21 2018
@author: TimL
'''
# Copyright (c) 2019. Induced Polarization Associates, LLC, Seattle, WA
import os
import scipy as sp
import commonSense as cs
from scipy import fftpack as spfftpack
from datetime import datetime
import pickle
def ipProcess():
'''
Reads text files in a data folder and saves frequency domain results to a
pickled file to be opened later for plotting.
'''
# Processed result choice.
# 'raw': raw voltage waveforms from one packet in each file.
# 'zAnyF': impedance phase and mag at non-zero fft frequencies,
# not skipping.
saveThis = 'raw'
# Number nonzero frequencies saved to the .pkl file with the zero frequency
freqCount = 200
# Whether to save absolute phase results.
savePhase = False
# Whether to select a specific file for processing, as opposed to all of
# them.
selectedFile = True
selectedFileNum = 5
pklFolder = r'C:\Users\timl\Documents\IP_data_plots\190506_eagle'
rawFolder = os.path.join(pklFolder, 'rawData')
fileList = ([f for f in os.listdir(rawFolder)
if os.path.isfile(os.path.join(rawFolder, f))])
# Catalog the file numbers stored in each file title. Remove from the list
# files that don't have a number.
fileNumList = []
keepFileList = sp.zeros_like(fileList, dtype=bool)
for t in range(len(fileList)):
uscoreIdx = fileList[t].find('_')
dotIdx = fileList[t].find('.')
if uscoreIdx >= 0 and dotIdx >= 0:
try:
fileNum = int(fileList[t][uscoreIdx+1: dotIdx])
except ValueError:
pass
else:
if not selectedFile or (selectedFile and
fileNum == selectedFileNum):
fileNumList.append(fileNum)
# Keep this file in the list.
keepFileList[t] = True
# Drop all files from the list that didn't have a number.
# Walk through in reverse order so as to not disturb index numbers as
# elements are removed.
for t in range(len(fileList))[::-1]:
if ~keepFileList[t]:
del fileList[t]
# Convert to arrays.
fileArr = sp.array(fileList)
fileNumArr = sp.array(fileNumList, dtype=int)
del fileList
del keepFileList
# Sort by file numbers.
sortKey = fileNumArr.argsort()
fileArr = fileArr[sortKey]
fileNumArr = fileNumArr[sortKey]
# List of class instances containing recorded data.
a = []
for t in range(len(fileArr)):
a.append(fileClass())
# Read the data in from the files.
for t in range(len(a)):
# Packet choice if saving raw voltages.
rawPkt = 102 # 1-indexed.
filePath = os.path.join(rawFolder, fileArr[t])
a[t].introduce(fileArr[t])
a[t].readTxt(filePath, saveThis)
# Remove unwanted fields to cut down on the saved file size.
if saveThis == 'zAnyF' or saveThis == 'upsideDown':
del a[t].raw
del a[t].fft
del a[t].phaseUnCorr
del a[t].mag16Bit
if not savePhase:
del a[t].phase
# Array mask for saving.
mask = sp.zeros(a[t].n, dtype=bool)
# Save non-zero frequencies and the DC frequency.
mask[:freqCount + 1] = True
a[t].freq = a[t].freq[mask]
a[t].phaseDiff = a[t].phaseDiff[..., mask]
a[t].magPhys = a[t].magPhys[..., mask]
a[t].zMag = a[t].zMag[..., mask]
if savePhase:
a[t].phase = a[t].phase[..., mask]
elif saveThis == 'raw':
del a[t].fft
del a[t].phaseUnCorr
del a[t].mag16Bit
del a[t].phase
del a[t].freq
del a[t].phaseDiff
del a[t].magPhys
del a[t].zMag
p = cs.find(a[t].pkt, rawPkt)
if p == -1:
# Save the last packet if the requested packet number isn't in
# the file.
rawPkt = a[t].pkt[p]
a[t].raw = a[t].raw[:, p, :]
# Overwrite the list of packet numbers with the one packet number
# that was saved.
a[t].pkt = rawPkt
# Save the object to a file named after the folder name.
lastSlash = pklFolder.rfind('\\')
saveFile = pklFolder[lastSlash+1:] + '_' + saveThis + '.pkl'
savePath = os.path.join(pklFolder, saveFile)
# Saving the list object:
with open(savePath, 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump(a, f)
class fileClass:
def introduce(self, fileName):
print('Creating %s from %s.' % (self, fileName))
def readTxt(self, filePath, saveThis):
# Read IP measurements from a text file.
with open(filePath, 'r') as fh:
# Number of lines in the file.
lineCount = self.countLines(fh)
# Rewind the pointer in the file back to the beginning.
fh.seek(0)
# Initialize the packet counter.
p = -1
# Initialize the sample index.
s = -1
for lidx, line in enumerate(fh, 1):
# Strip off trailing newline characters.
line = line.rstrip('\n')
if s >= 0:
# Read in raw voltage values.
self.raw[:, p, s] = (
sp.fromstring(line, dtype=float, sep=','))
if s == self.n - 1:
# Reset the counter to below zero.
s = -1
else:
# Increment the sample counter for the next read.
s += 1
elif lidx > 10:
if line[0] == '$':
# Increment the packet index.
p += 1
# Reset the time domain quality parameter index.
qp = 0
# Packet number
self.pkt[p] = int(line[1:])
elif line[0] == '\'':
# CPU UTC Date and Time Strings.
(self.cpuDTStr[p].d,
self.cpuDTStr[p].t) = line[1:].split(',')
# Translate to datetime object.
self.cpuDT[p] = self.str2DateTime(self.cpuDTStr[p])
elif line[0] == '@':
# GPS UTC Date and Time Strings,
# and latitude and longitude fixes.
(self.gpsDTStr[p].d,
self.gpsDTStr[p].t,
self.lat[p],
self.longi[p]) = line[1:].split(',')
# Translate to datetime object.
self.gpsDT[p] = self.str2DateTime(self.gpsDTStr[p])
# Type casting.
self.lat[p] = float(self.lat[p]) | if qp == 3 or qp == 4 or qp == 5:
typ = float # Means are saved as floats.
else:
typ = int # Counts are saved as integers.
assignArr = sp.fromstring(line, dtype=typ, sep=',')
if qp == 1:
# Count of measurements clipped on the high end of
# the MccDaq board's input range.
self.clipHi[:, p] = assignArr
elif qp == 2:
# Count of measurements clipped on the low end of
# the MccDaq board's input range.
self.clipLo[:, p] = assignArr
elif qp == 3:
# Mean measurement value over the packet as a
# percentage of the AIn() half range.
self.meanPct[:, p] = assignArr
elif qp == 4:
# (pct) Mean value of sample measurements above
# or equal to the mean.
self.meanUpPct[:, p] = assignArr
elif qp == 5:
# (pct) Mean value of sample measurements below
# the mean.
self.meanDnPct[:, p] = assignArr
elif qp == 6:
# Count of measurements above or equal to the mean.
self.countUp[:, p] = assignArr
elif qp == 7:
# Count of measurements below the mean.
self.countDn[:, p] = assignArr
# Set the sample index to 0 to start.
s = 0
elif lidx == 1:
(self.fileDateStr, # UTC date file was created.
self.fileNum) = line.split(',') # File number in set.
# Type casting.
self.fileNum = int(self.fileNum)
elif lidx == 2:
self.descript = line # Description of the test.
elif lidx == 3:
self.minor = line # Minor note.
elif lidx == 4:
self.major = line # Major note.
elif lidx == 5:
(self.scanChCount, # number of channels in each A/D scan.
self.chCount, # number of channels written to the file.
self.n, # Number of samples in the FFT time series.
self.fs, # (Hz) FFT sampling frequency.
self.xmitFund) = line.split(',') # (Hz) Transmit Square
# wave fundamental frequency.
# Type casting.
self.scanChCount = int(self.scanChCount)
self.chCount = int(self.chCount)
self.n = int(self.n)
self.fs = int(self.fs)
self.xmitFund = float(self.xmitFund)
# Each file contains a file header of length 10 lines,
# followed by packets. Packets contain (11 + n) lines each.
self.pktCount = int((lineCount - 10)/(11 + self.n))
# Dimension arrays indexed by packet.
self.dimArrays()
elif lidx == 6:
(self.rCurrentMeas, # (Ohm) resistance.
self.rExtraSeries) = line.split(',') # (Ohm).
# Type casting.
self.rCurrentMeas = float(self.rCurrentMeas)
self.rExtraSeries = float(self.rCurrentMeas)
elif lidx == 7:
# Voltage measurement names.
# 0-indexed by channel number.
self.measStr = line.split(',')
elif lidx == 8:
# Construct arrays using the scipy package.
# 5B amplifier maximum of the input range (V).
# 0-indexed by channel number.
self.In5BHi = sp.fromstring(line, dtype=float, sep=',')
elif lidx == 9:
# 5B amplifier maximum of the output range (V).
# 0-indexed by channel number.
self.Out5BHi = sp.fromstring(line, dtype=float, sep=',')
elif lidx == 10:
# MccDaq board AIn() maximum of the input range (V).
# 0-indexed by channel number.
self.ALoadQHi = sp.fromstring(line, dtype=float, sep=',')
# After the file has been read, perform some calculations.
self.postRead(saveThis)
def dimArrays(self):
# Initialize numpy arrays and python lists as zeros.
shape2D = (self.chCount, self.pktCount)
# 0-indexed by packet number.
self.pkt = sp.zeros(self.pktCount, dtype=int)
self.cpuDTStr = [cs.emptyClass()]*self.pktCount
self.cpuDT = [0]*self.pktCount
self.gpsDTStr = [cs.emptyClass()]*self.pktCount
self.gpsDT = [0]*self.pktCount
self.lat = sp.zeros(self.pktCount, dtype=float)
self.longi = sp.zeros(self.pktCount, dtype=float)
# 0-indexed by channel number.
# 0-indexed by packet number.
self.clipHi = sp.zeros(shape2D, dtype=int)
self.clipLo = sp.zeros(shape2D, dtype=int)
self.meanPct = sp.zeros(shape2D, dtype=float)
self.meanUpPct = sp.zeros(shape2D, dtype=float)
self.meanDnPct = sp.zeros(shape2D, dtype=float)
self.meanPhys = sp.zeros(shape2D, dtype=float)
self.meanUpPhys = sp.zeros(shape2D, dtype=float)
self.meanDnPhys = sp.zeros(shape2D, dtype=float)
self.countUp = sp.zeros(shape2D, dtype=int)
self.countDn = sp.zeros(shape2D, dtype=int)
# 0-indexed by channel number.
# 0-indexed by packet number.
# 0-indexed by sample number.
self.raw = sp.zeros((self.chCount, self.pktCount, self.n), dtype=float)
def str2DateTime(self, dTStr):
YY = 2000 + int(dTStr.d[0: 0+2])
MO = int(dTStr.d[2: 2+2])
DD = int(dTStr.d[4: 4+2])
HH = int(dTStr.t[0: 0+2])
MM = int(dTStr.t[2: 2+2])
SS = int(dTStr.t[4: 4+2])
micro = 1000 * int(dTStr.t[7: 7+3])
if YY == 2000:
return datetime.min
else:
return datetime(YY, MO, DD, HH, MM, SS, micro)
def computePhys(self, currentCh):
self.meanPhys = self.pct2Phys(self.meanPct, currentCh)
self.meanUpPhys = self.pct2Phys(self.meanUpPct, currentCh)
self.meanDnPhys = self.pct2Phys(self.meanDnPct, currentCh)
def pct2Phys(self, pct, currentCh):
phys = sp.zeros_like(pct, dtype=float)
for ch in range(self.chCount):
phys[ch, :] = (pct[ch, :] / 100 *
self.ALoadQHi[ch] * self.In5BHi[ch] /
self.Out5BHi[ch]) # (V)
# Convert the voltage on the current measurement channel to a current.
phys[currentCh, :] /= self.rCurrentMeas # (A)
return phys
def countLines(self, fh):
# Counter lidx starts counting at 1 for the first line.
for lidx, line in enumerate(fh, 1):
pass
return lidx
def postRead(self, saveThis):
# Whether to correct for channel skew.
corrChSkewBool = True
# Channel on which the current is measured. This channel's phase is
# subtracted from the other channels in phase difference calculation.
# This channel's voltage is divided by the current measurement
# resistance to obtain a physical magnitude in Ampere units.
# Other channels voltages are divided by this channel's current to find
# impedance magnitude.
currentCh = 0
# Flip voltage channels upside-down if requested.
if saveThis == 'upsideDown':
for ch in range(self.chCount):
if ch != currentCh:
self.raw[ch, ...] *= -1
self.raw[ch, ...] += 2**16 - 1
self.computePhys(currentCh)
# Compute FFTs.
self.freq = spfftpack.fftfreq(self.n, 1 / self.fs, ) # (Hz)
self.fft = spfftpack.fft(self.raw) / self.n
# Magnitude and uncorrected phase.
self.phaseUnCorr = sp.angle(self.fft) # (rad)
self.mag16Bit = sp.absolute(self.fft)
# Convert magnitude to physical units.
f215 = float(2**15)
self.magPhys = self.mag16Bit / f215
for ch in range(self.chCount):
self.magPhys[ch, :, :] *= (self.ALoadQHi[ch] * self.In5BHi[ch] /
self.Out5BHi[ch]) # (V)
# Convert the voltage on ch0 to a current.
self.magPhys[0, :, :] /= self.rCurrentMeas # (A)
# Correct phase for channel skew.
self.phase = self.phaseUnCorr
if corrChSkewBool:
for ch in range(self.chCount):
deltaT = ch / (self.fs * self.scanChCount) # (s)
corrSlope = 2*sp.pi*deltaT # (rad/Hz)
for p in range(self.pktCount):
self.phase[ch, p, :] = sp.subtract(self.phase[ch, p, :],
self.freq * corrSlope)
# Compute phase differences.
# Be careful about angles looping through +/- pi.
# A phase difference absolute value is less than pi radian.
self.phaseDiff = sp.zeros_like(self.phase, dtype=float)
for ch in range(self.chCount):
self.phaseDiff[ch, :, :] = sp.subtract(self.phase[ch, :, :],
self.phase[currentCh, :, :])
self.phaseDiff[self.phaseDiff < -sp.pi] += 2*sp.pi
self.phaseDiff[self.phaseDiff > sp.pi] -= 2*sp.pi
# Convert phase differences from radian to milliradian.
self.phaseDiff *= 1000 # (mrad)
# Calculate apparent impedance magnitude.
self.zMag = sp.zeros_like(self.magPhys)
for ch in range(self.chCount):
# (Ohm)
self.zMag[ch, :, :] = sp.divide(self.magPhys[ch, :, :],
self.magPhys[currentCh, :, :])
# Convert to milliOhm.
self.zMag *= 1000
# Invoke the main function here.
if __name__ == "__main__":
ipProcess() | self.longi[p] = float(self.longi[p])
elif qp < 7:
qp += 1 | random_line_split |
tracer.go | package tortoise
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/spacemeshos/go-spacemesh/common/types"
"github.com/spacemeshos/go-spacemesh/common/types/result"
)
type output struct {
Type eventType `json:"t"`
Event json.RawMessage `json:"o"`
}
type tracer struct {
logger *zap.Logger
}
func (t *tracer) On(event traceEvent) {
buf, err := json.Marshal(event)
if err != nil {
panic(err.Error())
}
raw := json.RawMessage(buf)
t.logger.Info("",
zap.Uint16("t", event.Type()),
zap.Any("o", &raw),
)
}
type TraceOpt func(*zap.Config)
func WithOutput(path string) TraceOpt {
return func(cfg *zap.Config) {
cfg.OutputPaths = []string{path}
}
}
func newTracer(opts ...TraceOpt) *tracer {
cfg := zap.NewProductionConfig()
cfg.Sampling = nil
cfg.EncoderConfig.CallerKey = zapcore.OmitKey
cfg.EncoderConfig.MessageKey = zapcore.OmitKey
cfg.EncoderConfig.LevelKey = zapcore.OmitKey
cfg.DisableCaller = true
for _, opt := range opts {
opt(&cfg)
}
logger, err := cfg.Build()
if err != nil {
panic(err.Error())
}
return &tracer{
logger: logger.Named("tracer"),
}
}
type traceRunner struct {
opts []Opt
trt *Tortoise
pending map[types.BallotID]*DecodedBallot
assertOutputs bool
assertErrors bool
}
func RunTrace(path string, breakpoint func(), opts ...Opt) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
dec := json.NewDecoder(bufio.NewReaderSize(f, 1<<20))
enum := newEventEnum()
runner := &traceRunner{
opts: opts,
pending: map[types.BallotID]*DecodedBallot{},
assertOutputs: true,
assertErrors: true,
}
for {
ev, err := enum.Decode(dec)
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return err
}
if err := ev.Run(runner); err != nil {
return err
}
if breakpoint != nil {
breakpoint()
}
}
}
type eventType = uint16
const (
traceStart eventType = 1 + iota
traceWeakCoin
traceBeacon
traceAtx
traceBallot
traceDecode
traceStore
traceEncode
traceTally
traceBlock
traceHare
traceActiveset
traceResults
traceUpdates
traceMalfeasence
)
type traceEvent interface {
Type() eventType
New() traceEvent
Run(*traceRunner) error
}
type ConfigTrace struct {
Hdist uint32 `json:"hdist"`
Zdist uint32 `json:"zdist"`
WindowSize uint32 `json:"window"`
MaxExceptions uint32 `json:"exceptions"`
BadBeaconVoteDelayLayers uint32 `json:"delay"`
LayerSize uint32 `json:"layer-size"`
EpochSize uint32 `json:"epoch-size"` // this field is not set in the original config
EffectiveGenesis uint32 `json:"effective-genesis"`
}
func (c *ConfigTrace) Type() eventType {
return traceStart
}
func (c *ConfigTrace) New() traceEvent {
return &ConfigTrace{}
}
func (c *ConfigTrace) Run(r *traceRunner) error {
types.SetLayersPerEpoch(c.EpochSize)
types.SetEffectiveGenesis(c.EffectiveGenesis)
trt, err := New(append(r.opts, WithConfig(Config{
Hdist: c.Hdist,
Zdist: c.Zdist,
WindowSize: c.WindowSize,
MaxExceptions: int(c.MaxExceptions),
BadBeaconVoteDelayLayers: c.BadBeaconVoteDelayLayers,
LayerSize: c.LayerSize,
}))...)
if err != nil {
return err
}
r.trt = trt
return nil
}
type AtxTrace struct {
Header *types.AtxTortoiseData `json:",inline"`
}
func (a *AtxTrace) Type() eventType {
return traceAtx
}
func (a *AtxTrace) New() traceEvent {
return &AtxTrace{}
}
func (a *AtxTrace) Run(r *traceRunner) error {
r.trt.OnAtx(a.Header)
return nil
}
type WeakCoinTrace struct {
Layer types.LayerID `json:"lid"`
Coin bool `json:"coin"`
}
func (w *WeakCoinTrace) Type() eventType {
return traceWeakCoin
}
func (w *WeakCoinTrace) New() traceEvent {
return &WeakCoinTrace{}
}
func (w *WeakCoinTrace) Run(r *traceRunner) error {
r.trt.OnWeakCoin(w.Layer, w.Coin)
return nil
}
type BeaconTrace struct {
Epoch types.EpochID `json:"epoch"`
Beacon types.Beacon `json:"beacon"`
}
func (b *BeaconTrace) Type() eventType {
return traceBeacon
}
func (b *BeaconTrace) New() traceEvent {
return &BeaconTrace{}
}
func (b *BeaconTrace) Run(r *traceRunner) error {
r.trt.OnBeacon(b.Epoch, b.Beacon)
return nil
}
type BallotTrace struct {
Ballot *types.BallotTortoiseData `json:",inline"`
}
func (b *BallotTrace) Type() eventType {
return traceBallot
}
func (b *BallotTrace) New() traceEvent {
return &BallotTrace{}
}
func (b *BallotTrace) Run(r *traceRunner) error {
r.trt.OnBallot(b.Ballot)
return nil
}
type DecodeBallotTrace struct {
Ballot *types.BallotTortoiseData `json:",inline"`
Error string `json:"e"`
// TODO(dshulyak) want to assert decoding results somehow
}
func (d *DecodeBallotTrace) Type() eventType {
return traceDecode
}
func (d *DecodeBallotTrace) New() traceEvent {
return &DecodeBallotTrace{}
}
func (b *DecodeBallotTrace) Run(r *traceRunner) error {
decoded, err := r.trt.DecodeBallot(b.Ballot)
if r.assertErrors {
if err := assertErrors(err, b.Error); err != nil {
return err
}
}
if err == nil {
r.pending[decoded.ID] = decoded
}
return nil
}
type StoreBallotTrace struct {
ID types.BallotID `json:"id"`
Malicious bool `json:"mal"`
Error string `json:"e,omitempty"`
}
func (s *StoreBallotTrace) Type() eventType {
return traceStore
}
func (s *StoreBallotTrace) New() traceEvent {
return &StoreBallotTrace{}
}
func (s *StoreBallotTrace) Run(r *traceRunner) error {
pending, exist := r.pending[s.ID]
if !exist {
return fmt.Errorf("id %v should be pending", s.ID)
}
if s.Malicious {
pending.SetMalicious()
}
delete(r.pending, s.ID)
err := r.trt.StoreBallot(pending)
if r.assertErrors {
if err := assertErrors(err, s.Error); err != nil {
return err
}
}
return nil
}
type EncodeVotesTrace struct {
Layer types.LayerID `json:"lid"`
Opinion *types.Opinion `json:"opinion"`
Error string `json:"e"`
}
func (e *EncodeVotesTrace) Type() eventType {
return traceEncode
}
func (e *EncodeVotesTrace) New() traceEvent {
return &EncodeVotesTrace{}
}
func (e *EncodeVotesTrace) Run(r *traceRunner) error {
opinion, err := r.trt.EncodeVotes(context.Background(), EncodeVotesWithCurrent(e.Layer))
if r.assertErrors {
if err := assertErrors(err, e.Error); err != nil {
return err
}
}
if err == nil {
if diff := cmp.Diff(opinion, e.Opinion); len(diff) > 0 && r.assertOutputs {
return errors.New(diff)
}
}
return nil
}
type TallyTrace struct {
Layer types.LayerID `json:"lid"`
}
func (t *TallyTrace) Type() eventType {
return traceTally
}
func (t *TallyTrace) New() traceEvent {
return &TallyTrace{}
}
func (t *TallyTrace) Run(r *traceRunner) error {
r.trt.TallyVotes(context.Background(), t.Layer)
return nil
}
type HareTrace struct {
Layer types.LayerID `json:"lid"`
Vote types.BlockID `json:"vote"`
}
func (h *HareTrace) Type() eventType {
return traceHare
}
func (h *HareTrace) New() traceEvent {
return &HareTrace{}
}
func (h *HareTrace) Run(r *traceRunner) error {
r.trt.OnHareOutput(h.Layer, h.Vote)
return nil
}
type ResultsTrace struct {
From types.LayerID `json:"from"`
To types.LayerID `json:"to"`
Error string `json:"e"`
Results []result.Layer `json:"results"`
}
func (r *ResultsTrace) Type() eventType {
return traceResults
}
func (r *ResultsTrace) New() traceEvent {
return &ResultsTrace{}
}
func (r *ResultsTrace) Run(rt *traceRunner) error {
rst, err := rt.trt.Results(r.From, r.To)
if rt.assertErrors {
if err := assertErrors(err, r.Error); err != nil |
}
if err == nil {
if diff := cmp.Diff(rst, r.Results, cmpopts.EquateEmpty()); len(diff) > 0 && rt.assertOutputs {
return errors.New(diff)
}
}
return nil
}
type UpdatesTrace struct {
ResultsTrace `json:",inline"`
}
func (u *UpdatesTrace) Type() eventType {
return traceUpdates
}
func (u *UpdatesTrace) New() traceEvent {
return &UpdatesTrace{}
}
func (u *UpdatesTrace) Run(r *traceRunner) error {
rst := r.trt.Updates()
if diff := cmp.Diff(rst, u.Results, cmpopts.EquateEmpty()); len(diff) > 0 && r.assertOutputs {
return errors.New(diff)
}
return nil
}
type BlockTrace struct {
Header types.BlockHeader `json:",inline"`
Valid bool `json:"v"`
}
func (b *BlockTrace) Type() eventType {
return traceBlock
}
func (b *BlockTrace) New() traceEvent {
return &BlockTrace{}
}
func (b *BlockTrace) Run(r *traceRunner) error {
if b.Valid {
r.trt.OnValidBlock(b.Header)
} else {
r.trt.OnBlock(b.Header)
}
return nil
}
type MalfeasanceTrace struct {
ID types.NodeID `json:"id"`
}
func (m *MalfeasanceTrace) Type() eventType {
return traceMalfeasence
}
func (m *MalfeasanceTrace) New() traceEvent {
return &MalfeasanceTrace{}
}
func (m *MalfeasanceTrace) Run(r *traceRunner) error {
r.trt.OnMalfeasance(m.ID)
return nil
}
func assertErrors(err error, expect string) error {
msg := ""
if err != nil {
msg = err.Error()
}
if expect != msg {
return fmt.Errorf("%s != %s", expect, msg)
}
return nil
}
func newEventEnum() eventEnum {
enum := eventEnum{types: map[uint16]traceEvent{}}
enum.Register(&ConfigTrace{})
enum.Register(&WeakCoinTrace{})
enum.Register(&BeaconTrace{})
enum.Register(&AtxTrace{})
enum.Register(&BallotTrace{})
enum.Register(&DecodeBallotTrace{})
enum.Register(&StoreBallotTrace{})
enum.Register(&EncodeVotesTrace{})
enum.Register(&TallyTrace{})
enum.Register(&BlockTrace{})
enum.Register(&HareTrace{})
enum.Register(&ResultsTrace{})
enum.Register(&UpdatesTrace{})
enum.Register(&MalfeasanceTrace{})
return enum
}
type eventEnum struct {
types map[eventType]traceEvent
}
func (e *eventEnum) Register(ev traceEvent) {
e.types[ev.Type()] = ev
}
func (e *eventEnum) Decode(dec *json.Decoder) (traceEvent, error) {
var event output
if err := dec.Decode(&event); err != nil {
return nil, err
}
ev := e.types[event.Type]
if ev == nil {
return nil, fmt.Errorf("type %d is not registered", event.Type)
}
obj := ev.New()
if err := json.Unmarshal(event.Event, obj); err != nil {
return nil, err
}
return obj, nil
}
| {
return err
} | conditional_block |
tracer.go | package tortoise
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/spacemeshos/go-spacemesh/common/types"
"github.com/spacemeshos/go-spacemesh/common/types/result"
)
type output struct {
Type eventType `json:"t"`
Event json.RawMessage `json:"o"`
}
type tracer struct {
logger *zap.Logger
}
func (t *tracer) On(event traceEvent) {
buf, err := json.Marshal(event)
if err != nil {
panic(err.Error())
}
raw := json.RawMessage(buf)
t.logger.Info("",
zap.Uint16("t", event.Type()),
zap.Any("o", &raw),
)
}
type TraceOpt func(*zap.Config)
func WithOutput(path string) TraceOpt {
return func(cfg *zap.Config) {
cfg.OutputPaths = []string{path}
}
}
func newTracer(opts ...TraceOpt) *tracer {
cfg := zap.NewProductionConfig()
cfg.Sampling = nil
cfg.EncoderConfig.CallerKey = zapcore.OmitKey
cfg.EncoderConfig.MessageKey = zapcore.OmitKey
cfg.EncoderConfig.LevelKey = zapcore.OmitKey
cfg.DisableCaller = true
for _, opt := range opts {
opt(&cfg)
}
logger, err := cfg.Build()
if err != nil {
panic(err.Error())
}
return &tracer{
logger: logger.Named("tracer"),
}
}
type traceRunner struct {
opts []Opt
trt *Tortoise
pending map[types.BallotID]*DecodedBallot
assertOutputs bool
assertErrors bool
}
func RunTrace(path string, breakpoint func(), opts ...Opt) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
dec := json.NewDecoder(bufio.NewReaderSize(f, 1<<20))
enum := newEventEnum()
runner := &traceRunner{
opts: opts,
pending: map[types.BallotID]*DecodedBallot{},
assertOutputs: true,
assertErrors: true,
}
for {
ev, err := enum.Decode(dec)
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return err
}
if err := ev.Run(runner); err != nil {
return err
}
if breakpoint != nil {
breakpoint()
}
}
}
type eventType = uint16
const (
traceStart eventType = 1 + iota
traceWeakCoin
traceBeacon
traceAtx
traceBallot
traceDecode
traceStore
traceEncode
traceTally
traceBlock
traceHare
traceActiveset
traceResults
traceUpdates
traceMalfeasence
)
type traceEvent interface {
Type() eventType
New() traceEvent
Run(*traceRunner) error
}
type ConfigTrace struct {
Hdist uint32 `json:"hdist"`
Zdist uint32 `json:"zdist"`
WindowSize uint32 `json:"window"`
MaxExceptions uint32 `json:"exceptions"`
BadBeaconVoteDelayLayers uint32 `json:"delay"`
LayerSize uint32 `json:"layer-size"`
EpochSize uint32 `json:"epoch-size"` // this field is not set in the original config
EffectiveGenesis uint32 `json:"effective-genesis"`
}
func (c *ConfigTrace) Type() eventType {
return traceStart
}
func (c *ConfigTrace) New() traceEvent {
return &ConfigTrace{}
}
func (c *ConfigTrace) Run(r *traceRunner) error {
types.SetLayersPerEpoch(c.EpochSize)
types.SetEffectiveGenesis(c.EffectiveGenesis)
trt, err := New(append(r.opts, WithConfig(Config{
Hdist: c.Hdist,
Zdist: c.Zdist,
WindowSize: c.WindowSize,
MaxExceptions: int(c.MaxExceptions),
BadBeaconVoteDelayLayers: c.BadBeaconVoteDelayLayers,
LayerSize: c.LayerSize,
}))...)
if err != nil {
return err
}
r.trt = trt
return nil
}
type AtxTrace struct {
Header *types.AtxTortoiseData `json:",inline"`
}
func (a *AtxTrace) Type() eventType {
return traceAtx
}
func (a *AtxTrace) New() traceEvent {
return &AtxTrace{}
}
func (a *AtxTrace) Run(r *traceRunner) error {
r.trt.OnAtx(a.Header)
return nil
}
type WeakCoinTrace struct {
Layer types.LayerID `json:"lid"`
Coin bool `json:"coin"`
}
func (w *WeakCoinTrace) Type() eventType {
return traceWeakCoin
}
func (w *WeakCoinTrace) New() traceEvent {
return &WeakCoinTrace{}
}
func (w *WeakCoinTrace) Run(r *traceRunner) error {
r.trt.OnWeakCoin(w.Layer, w.Coin)
return nil
}
type BeaconTrace struct {
Epoch types.EpochID `json:"epoch"`
Beacon types.Beacon `json:"beacon"`
}
func (b *BeaconTrace) Type() eventType {
return traceBeacon
}
func (b *BeaconTrace) New() traceEvent {
return &BeaconTrace{}
}
func (b *BeaconTrace) Run(r *traceRunner) error {
r.trt.OnBeacon(b.Epoch, b.Beacon)
return nil
}
type BallotTrace struct {
Ballot *types.BallotTortoiseData `json:",inline"`
}
func (b *BallotTrace) Type() eventType {
return traceBallot
}
func (b *BallotTrace) New() traceEvent {
return &BallotTrace{}
}
func (b *BallotTrace) Run(r *traceRunner) error {
r.trt.OnBallot(b.Ballot)
return nil
}
type DecodeBallotTrace struct {
Ballot *types.BallotTortoiseData `json:",inline"`
Error string `json:"e"`
// TODO(dshulyak) want to assert decoding results somehow
}
func (d *DecodeBallotTrace) Type() eventType {
return traceDecode
}
func (d *DecodeBallotTrace) New() traceEvent {
return &DecodeBallotTrace{}
}
func (b *DecodeBallotTrace) Run(r *traceRunner) error {
decoded, err := r.trt.DecodeBallot(b.Ballot)
if r.assertErrors {
if err := assertErrors(err, b.Error); err != nil {
return err
}
}
if err == nil {
r.pending[decoded.ID] = decoded
}
return nil
}
type StoreBallotTrace struct {
ID types.BallotID `json:"id"`
Malicious bool `json:"mal"`
Error string `json:"e,omitempty"`
}
func (s *StoreBallotTrace) Type() eventType |
func (s *StoreBallotTrace) New() traceEvent {
return &StoreBallotTrace{}
}
func (s *StoreBallotTrace) Run(r *traceRunner) error {
pending, exist := r.pending[s.ID]
if !exist {
return fmt.Errorf("id %v should be pending", s.ID)
}
if s.Malicious {
pending.SetMalicious()
}
delete(r.pending, s.ID)
err := r.trt.StoreBallot(pending)
if r.assertErrors {
if err := assertErrors(err, s.Error); err != nil {
return err
}
}
return nil
}
type EncodeVotesTrace struct {
Layer types.LayerID `json:"lid"`
Opinion *types.Opinion `json:"opinion"`
Error string `json:"e"`
}
func (e *EncodeVotesTrace) Type() eventType {
return traceEncode
}
func (e *EncodeVotesTrace) New() traceEvent {
return &EncodeVotesTrace{}
}
func (e *EncodeVotesTrace) Run(r *traceRunner) error {
opinion, err := r.trt.EncodeVotes(context.Background(), EncodeVotesWithCurrent(e.Layer))
if r.assertErrors {
if err := assertErrors(err, e.Error); err != nil {
return err
}
}
if err == nil {
if diff := cmp.Diff(opinion, e.Opinion); len(diff) > 0 && r.assertOutputs {
return errors.New(diff)
}
}
return nil
}
type TallyTrace struct {
Layer types.LayerID `json:"lid"`
}
func (t *TallyTrace) Type() eventType {
return traceTally
}
func (t *TallyTrace) New() traceEvent {
return &TallyTrace{}
}
func (t *TallyTrace) Run(r *traceRunner) error {
r.trt.TallyVotes(context.Background(), t.Layer)
return nil
}
type HareTrace struct {
Layer types.LayerID `json:"lid"`
Vote types.BlockID `json:"vote"`
}
func (h *HareTrace) Type() eventType {
return traceHare
}
func (h *HareTrace) New() traceEvent {
return &HareTrace{}
}
func (h *HareTrace) Run(r *traceRunner) error {
r.trt.OnHareOutput(h.Layer, h.Vote)
return nil
}
type ResultsTrace struct {
From types.LayerID `json:"from"`
To types.LayerID `json:"to"`
Error string `json:"e"`
Results []result.Layer `json:"results"`
}
func (r *ResultsTrace) Type() eventType {
return traceResults
}
func (r *ResultsTrace) New() traceEvent {
return &ResultsTrace{}
}
func (r *ResultsTrace) Run(rt *traceRunner) error {
rst, err := rt.trt.Results(r.From, r.To)
if rt.assertErrors {
if err := assertErrors(err, r.Error); err != nil {
return err
}
}
if err == nil {
if diff := cmp.Diff(rst, r.Results, cmpopts.EquateEmpty()); len(diff) > 0 && rt.assertOutputs {
return errors.New(diff)
}
}
return nil
}
type UpdatesTrace struct {
ResultsTrace `json:",inline"`
}
func (u *UpdatesTrace) Type() eventType {
return traceUpdates
}
func (u *UpdatesTrace) New() traceEvent {
return &UpdatesTrace{}
}
func (u *UpdatesTrace) Run(r *traceRunner) error {
rst := r.trt.Updates()
if diff := cmp.Diff(rst, u.Results, cmpopts.EquateEmpty()); len(diff) > 0 && r.assertOutputs {
return errors.New(diff)
}
return nil
}
type BlockTrace struct {
Header types.BlockHeader `json:",inline"`
Valid bool `json:"v"`
}
func (b *BlockTrace) Type() eventType {
return traceBlock
}
func (b *BlockTrace) New() traceEvent {
return &BlockTrace{}
}
func (b *BlockTrace) Run(r *traceRunner) error {
if b.Valid {
r.trt.OnValidBlock(b.Header)
} else {
r.trt.OnBlock(b.Header)
}
return nil
}
type MalfeasanceTrace struct {
ID types.NodeID `json:"id"`
}
func (m *MalfeasanceTrace) Type() eventType {
return traceMalfeasence
}
func (m *MalfeasanceTrace) New() traceEvent {
return &MalfeasanceTrace{}
}
func (m *MalfeasanceTrace) Run(r *traceRunner) error {
r.trt.OnMalfeasance(m.ID)
return nil
}
func assertErrors(err error, expect string) error {
msg := ""
if err != nil {
msg = err.Error()
}
if expect != msg {
return fmt.Errorf("%s != %s", expect, msg)
}
return nil
}
func newEventEnum() eventEnum {
enum := eventEnum{types: map[uint16]traceEvent{}}
enum.Register(&ConfigTrace{})
enum.Register(&WeakCoinTrace{})
enum.Register(&BeaconTrace{})
enum.Register(&AtxTrace{})
enum.Register(&BallotTrace{})
enum.Register(&DecodeBallotTrace{})
enum.Register(&StoreBallotTrace{})
enum.Register(&EncodeVotesTrace{})
enum.Register(&TallyTrace{})
enum.Register(&BlockTrace{})
enum.Register(&HareTrace{})
enum.Register(&ResultsTrace{})
enum.Register(&UpdatesTrace{})
enum.Register(&MalfeasanceTrace{})
return enum
}
type eventEnum struct {
types map[eventType]traceEvent
}
func (e *eventEnum) Register(ev traceEvent) {
e.types[ev.Type()] = ev
}
func (e *eventEnum) Decode(dec *json.Decoder) (traceEvent, error) {
var event output
if err := dec.Decode(&event); err != nil {
return nil, err
}
ev := e.types[event.Type]
if ev == nil {
return nil, fmt.Errorf("type %d is not registered", event.Type)
}
obj := ev.New()
if err := json.Unmarshal(event.Event, obj); err != nil {
return nil, err
}
return obj, nil
}
| {
return traceStore
} | identifier_body |
tracer.go | package tortoise
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/spacemeshos/go-spacemesh/common/types"
"github.com/spacemeshos/go-spacemesh/common/types/result"
)
type output struct {
Type eventType `json:"t"`
Event json.RawMessage `json:"o"`
}
type tracer struct {
logger *zap.Logger
}
func (t *tracer) On(event traceEvent) {
buf, err := json.Marshal(event)
if err != nil {
panic(err.Error())
}
raw := json.RawMessage(buf)
t.logger.Info("",
zap.Uint16("t", event.Type()),
zap.Any("o", &raw),
)
}
type TraceOpt func(*zap.Config)
func WithOutput(path string) TraceOpt {
return func(cfg *zap.Config) {
cfg.OutputPaths = []string{path}
}
}
func newTracer(opts ...TraceOpt) *tracer {
cfg := zap.NewProductionConfig()
cfg.Sampling = nil
cfg.EncoderConfig.CallerKey = zapcore.OmitKey
cfg.EncoderConfig.MessageKey = zapcore.OmitKey
cfg.EncoderConfig.LevelKey = zapcore.OmitKey
cfg.DisableCaller = true
for _, opt := range opts {
opt(&cfg)
}
logger, err := cfg.Build()
if err != nil {
panic(err.Error())
}
return &tracer{
logger: logger.Named("tracer"),
}
}
type traceRunner struct {
opts []Opt
trt *Tortoise
pending map[types.BallotID]*DecodedBallot
assertOutputs bool
assertErrors bool
}
func RunTrace(path string, breakpoint func(), opts ...Opt) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
dec := json.NewDecoder(bufio.NewReaderSize(f, 1<<20))
enum := newEventEnum()
runner := &traceRunner{
opts: opts,
pending: map[types.BallotID]*DecodedBallot{},
assertOutputs: true,
assertErrors: true,
}
for {
ev, err := enum.Decode(dec)
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return err
}
if err := ev.Run(runner); err != nil {
return err
}
if breakpoint != nil {
breakpoint()
}
}
}
type eventType = uint16
const (
traceStart eventType = 1 + iota
traceWeakCoin
traceBeacon
traceAtx
traceBallot
traceDecode | traceActiveset
traceResults
traceUpdates
traceMalfeasence
)
type traceEvent interface {
Type() eventType
New() traceEvent
Run(*traceRunner) error
}
type ConfigTrace struct {
Hdist uint32 `json:"hdist"`
Zdist uint32 `json:"zdist"`
WindowSize uint32 `json:"window"`
MaxExceptions uint32 `json:"exceptions"`
BadBeaconVoteDelayLayers uint32 `json:"delay"`
LayerSize uint32 `json:"layer-size"`
EpochSize uint32 `json:"epoch-size"` // this field is not set in the original config
EffectiveGenesis uint32 `json:"effective-genesis"`
}
func (c *ConfigTrace) Type() eventType {
return traceStart
}
func (c *ConfigTrace) New() traceEvent {
return &ConfigTrace{}
}
func (c *ConfigTrace) Run(r *traceRunner) error {
types.SetLayersPerEpoch(c.EpochSize)
types.SetEffectiveGenesis(c.EffectiveGenesis)
trt, err := New(append(r.opts, WithConfig(Config{
Hdist: c.Hdist,
Zdist: c.Zdist,
WindowSize: c.WindowSize,
MaxExceptions: int(c.MaxExceptions),
BadBeaconVoteDelayLayers: c.BadBeaconVoteDelayLayers,
LayerSize: c.LayerSize,
}))...)
if err != nil {
return err
}
r.trt = trt
return nil
}
type AtxTrace struct {
Header *types.AtxTortoiseData `json:",inline"`
}
func (a *AtxTrace) Type() eventType {
return traceAtx
}
func (a *AtxTrace) New() traceEvent {
return &AtxTrace{}
}
func (a *AtxTrace) Run(r *traceRunner) error {
r.trt.OnAtx(a.Header)
return nil
}
type WeakCoinTrace struct {
Layer types.LayerID `json:"lid"`
Coin bool `json:"coin"`
}
func (w *WeakCoinTrace) Type() eventType {
return traceWeakCoin
}
func (w *WeakCoinTrace) New() traceEvent {
return &WeakCoinTrace{}
}
func (w *WeakCoinTrace) Run(r *traceRunner) error {
r.trt.OnWeakCoin(w.Layer, w.Coin)
return nil
}
type BeaconTrace struct {
Epoch types.EpochID `json:"epoch"`
Beacon types.Beacon `json:"beacon"`
}
func (b *BeaconTrace) Type() eventType {
return traceBeacon
}
func (b *BeaconTrace) New() traceEvent {
return &BeaconTrace{}
}
func (b *BeaconTrace) Run(r *traceRunner) error {
r.trt.OnBeacon(b.Epoch, b.Beacon)
return nil
}
type BallotTrace struct {
Ballot *types.BallotTortoiseData `json:",inline"`
}
func (b *BallotTrace) Type() eventType {
return traceBallot
}
func (b *BallotTrace) New() traceEvent {
return &BallotTrace{}
}
func (b *BallotTrace) Run(r *traceRunner) error {
r.trt.OnBallot(b.Ballot)
return nil
}
type DecodeBallotTrace struct {
Ballot *types.BallotTortoiseData `json:",inline"`
Error string `json:"e"`
// TODO(dshulyak) want to assert decoding results somehow
}
func (d *DecodeBallotTrace) Type() eventType {
return traceDecode
}
func (d *DecodeBallotTrace) New() traceEvent {
return &DecodeBallotTrace{}
}
func (b *DecodeBallotTrace) Run(r *traceRunner) error {
decoded, err := r.trt.DecodeBallot(b.Ballot)
if r.assertErrors {
if err := assertErrors(err, b.Error); err != nil {
return err
}
}
if err == nil {
r.pending[decoded.ID] = decoded
}
return nil
}
type StoreBallotTrace struct {
ID types.BallotID `json:"id"`
Malicious bool `json:"mal"`
Error string `json:"e,omitempty"`
}
func (s *StoreBallotTrace) Type() eventType {
return traceStore
}
func (s *StoreBallotTrace) New() traceEvent {
return &StoreBallotTrace{}
}
func (s *StoreBallotTrace) Run(r *traceRunner) error {
pending, exist := r.pending[s.ID]
if !exist {
return fmt.Errorf("id %v should be pending", s.ID)
}
if s.Malicious {
pending.SetMalicious()
}
delete(r.pending, s.ID)
err := r.trt.StoreBallot(pending)
if r.assertErrors {
if err := assertErrors(err, s.Error); err != nil {
return err
}
}
return nil
}
type EncodeVotesTrace struct {
Layer types.LayerID `json:"lid"`
Opinion *types.Opinion `json:"opinion"`
Error string `json:"e"`
}
func (e *EncodeVotesTrace) Type() eventType {
return traceEncode
}
func (e *EncodeVotesTrace) New() traceEvent {
return &EncodeVotesTrace{}
}
func (e *EncodeVotesTrace) Run(r *traceRunner) error {
opinion, err := r.trt.EncodeVotes(context.Background(), EncodeVotesWithCurrent(e.Layer))
if r.assertErrors {
if err := assertErrors(err, e.Error); err != nil {
return err
}
}
if err == nil {
if diff := cmp.Diff(opinion, e.Opinion); len(diff) > 0 && r.assertOutputs {
return errors.New(diff)
}
}
return nil
}
type TallyTrace struct {
Layer types.LayerID `json:"lid"`
}
func (t *TallyTrace) Type() eventType {
return traceTally
}
func (t *TallyTrace) New() traceEvent {
return &TallyTrace{}
}
func (t *TallyTrace) Run(r *traceRunner) error {
r.trt.TallyVotes(context.Background(), t.Layer)
return nil
}
type HareTrace struct {
Layer types.LayerID `json:"lid"`
Vote types.BlockID `json:"vote"`
}
func (h *HareTrace) Type() eventType {
return traceHare
}
func (h *HareTrace) New() traceEvent {
return &HareTrace{}
}
func (h *HareTrace) Run(r *traceRunner) error {
r.trt.OnHareOutput(h.Layer, h.Vote)
return nil
}
type ResultsTrace struct {
From types.LayerID `json:"from"`
To types.LayerID `json:"to"`
Error string `json:"e"`
Results []result.Layer `json:"results"`
}
func (r *ResultsTrace) Type() eventType {
return traceResults
}
func (r *ResultsTrace) New() traceEvent {
return &ResultsTrace{}
}
func (r *ResultsTrace) Run(rt *traceRunner) error {
rst, err := rt.trt.Results(r.From, r.To)
if rt.assertErrors {
if err := assertErrors(err, r.Error); err != nil {
return err
}
}
if err == nil {
if diff := cmp.Diff(rst, r.Results, cmpopts.EquateEmpty()); len(diff) > 0 && rt.assertOutputs {
return errors.New(diff)
}
}
return nil
}
type UpdatesTrace struct {
ResultsTrace `json:",inline"`
}
func (u *UpdatesTrace) Type() eventType {
return traceUpdates
}
func (u *UpdatesTrace) New() traceEvent {
return &UpdatesTrace{}
}
func (u *UpdatesTrace) Run(r *traceRunner) error {
rst := r.trt.Updates()
if diff := cmp.Diff(rst, u.Results, cmpopts.EquateEmpty()); len(diff) > 0 && r.assertOutputs {
return errors.New(diff)
}
return nil
}
type BlockTrace struct {
Header types.BlockHeader `json:",inline"`
Valid bool `json:"v"`
}
func (b *BlockTrace) Type() eventType {
return traceBlock
}
func (b *BlockTrace) New() traceEvent {
return &BlockTrace{}
}
func (b *BlockTrace) Run(r *traceRunner) error {
if b.Valid {
r.trt.OnValidBlock(b.Header)
} else {
r.trt.OnBlock(b.Header)
}
return nil
}
type MalfeasanceTrace struct {
ID types.NodeID `json:"id"`
}
func (m *MalfeasanceTrace) Type() eventType {
return traceMalfeasence
}
func (m *MalfeasanceTrace) New() traceEvent {
return &MalfeasanceTrace{}
}
func (m *MalfeasanceTrace) Run(r *traceRunner) error {
r.trt.OnMalfeasance(m.ID)
return nil
}
func assertErrors(err error, expect string) error {
msg := ""
if err != nil {
msg = err.Error()
}
if expect != msg {
return fmt.Errorf("%s != %s", expect, msg)
}
return nil
}
func newEventEnum() eventEnum {
enum := eventEnum{types: map[uint16]traceEvent{}}
enum.Register(&ConfigTrace{})
enum.Register(&WeakCoinTrace{})
enum.Register(&BeaconTrace{})
enum.Register(&AtxTrace{})
enum.Register(&BallotTrace{})
enum.Register(&DecodeBallotTrace{})
enum.Register(&StoreBallotTrace{})
enum.Register(&EncodeVotesTrace{})
enum.Register(&TallyTrace{})
enum.Register(&BlockTrace{})
enum.Register(&HareTrace{})
enum.Register(&ResultsTrace{})
enum.Register(&UpdatesTrace{})
enum.Register(&MalfeasanceTrace{})
return enum
}
type eventEnum struct {
types map[eventType]traceEvent
}
func (e *eventEnum) Register(ev traceEvent) {
e.types[ev.Type()] = ev
}
func (e *eventEnum) Decode(dec *json.Decoder) (traceEvent, error) {
var event output
if err := dec.Decode(&event); err != nil {
return nil, err
}
ev := e.types[event.Type]
if ev == nil {
return nil, fmt.Errorf("type %d is not registered", event.Type)
}
obj := ev.New()
if err := json.Unmarshal(event.Event, obj); err != nil {
return nil, err
}
return obj, nil
} | traceStore
traceEncode
traceTally
traceBlock
traceHare | random_line_split |
tracer.go | package tortoise
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/spacemeshos/go-spacemesh/common/types"
"github.com/spacemeshos/go-spacemesh/common/types/result"
)
type output struct {
Type eventType `json:"t"`
Event json.RawMessage `json:"o"`
}
type tracer struct {
logger *zap.Logger
}
func (t *tracer) On(event traceEvent) {
buf, err := json.Marshal(event)
if err != nil {
panic(err.Error())
}
raw := json.RawMessage(buf)
t.logger.Info("",
zap.Uint16("t", event.Type()),
zap.Any("o", &raw),
)
}
type TraceOpt func(*zap.Config)
func WithOutput(path string) TraceOpt {
return func(cfg *zap.Config) {
cfg.OutputPaths = []string{path}
}
}
func newTracer(opts ...TraceOpt) *tracer {
cfg := zap.NewProductionConfig()
cfg.Sampling = nil
cfg.EncoderConfig.CallerKey = zapcore.OmitKey
cfg.EncoderConfig.MessageKey = zapcore.OmitKey
cfg.EncoderConfig.LevelKey = zapcore.OmitKey
cfg.DisableCaller = true
for _, opt := range opts {
opt(&cfg)
}
logger, err := cfg.Build()
if err != nil {
panic(err.Error())
}
return &tracer{
logger: logger.Named("tracer"),
}
}
type traceRunner struct {
opts []Opt
trt *Tortoise
pending map[types.BallotID]*DecodedBallot
assertOutputs bool
assertErrors bool
}
func RunTrace(path string, breakpoint func(), opts ...Opt) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
dec := json.NewDecoder(bufio.NewReaderSize(f, 1<<20))
enum := newEventEnum()
runner := &traceRunner{
opts: opts,
pending: map[types.BallotID]*DecodedBallot{},
assertOutputs: true,
assertErrors: true,
}
for {
ev, err := enum.Decode(dec)
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return err
}
if err := ev.Run(runner); err != nil {
return err
}
if breakpoint != nil {
breakpoint()
}
}
}
type eventType = uint16
const (
traceStart eventType = 1 + iota
traceWeakCoin
traceBeacon
traceAtx
traceBallot
traceDecode
traceStore
traceEncode
traceTally
traceBlock
traceHare
traceActiveset
traceResults
traceUpdates
traceMalfeasence
)
type traceEvent interface {
Type() eventType
New() traceEvent
Run(*traceRunner) error
}
type ConfigTrace struct {
Hdist uint32 `json:"hdist"`
Zdist uint32 `json:"zdist"`
WindowSize uint32 `json:"window"`
MaxExceptions uint32 `json:"exceptions"`
BadBeaconVoteDelayLayers uint32 `json:"delay"`
LayerSize uint32 `json:"layer-size"`
EpochSize uint32 `json:"epoch-size"` // this field is not set in the original config
EffectiveGenesis uint32 `json:"effective-genesis"`
}
func (c *ConfigTrace) Type() eventType {
return traceStart
}
func (c *ConfigTrace) New() traceEvent {
return &ConfigTrace{}
}
func (c *ConfigTrace) Run(r *traceRunner) error {
types.SetLayersPerEpoch(c.EpochSize)
types.SetEffectiveGenesis(c.EffectiveGenesis)
trt, err := New(append(r.opts, WithConfig(Config{
Hdist: c.Hdist,
Zdist: c.Zdist,
WindowSize: c.WindowSize,
MaxExceptions: int(c.MaxExceptions),
BadBeaconVoteDelayLayers: c.BadBeaconVoteDelayLayers,
LayerSize: c.LayerSize,
}))...)
if err != nil {
return err
}
r.trt = trt
return nil
}
type AtxTrace struct {
Header *types.AtxTortoiseData `json:",inline"`
}
func (a *AtxTrace) Type() eventType {
return traceAtx
}
func (a *AtxTrace) New() traceEvent {
return &AtxTrace{}
}
func (a *AtxTrace) Run(r *traceRunner) error {
r.trt.OnAtx(a.Header)
return nil
}
type WeakCoinTrace struct {
Layer types.LayerID `json:"lid"`
Coin bool `json:"coin"`
}
func (w *WeakCoinTrace) Type() eventType {
return traceWeakCoin
}
func (w *WeakCoinTrace) New() traceEvent {
return &WeakCoinTrace{}
}
func (w *WeakCoinTrace) Run(r *traceRunner) error {
r.trt.OnWeakCoin(w.Layer, w.Coin)
return nil
}
type BeaconTrace struct {
Epoch types.EpochID `json:"epoch"`
Beacon types.Beacon `json:"beacon"`
}
func (b *BeaconTrace) Type() eventType {
return traceBeacon
}
func (b *BeaconTrace) New() traceEvent {
return &BeaconTrace{}
}
func (b *BeaconTrace) Run(r *traceRunner) error {
r.trt.OnBeacon(b.Epoch, b.Beacon)
return nil
}
type BallotTrace struct {
Ballot *types.BallotTortoiseData `json:",inline"`
}
func (b *BallotTrace) Type() eventType {
return traceBallot
}
func (b *BallotTrace) New() traceEvent {
return &BallotTrace{}
}
func (b *BallotTrace) Run(r *traceRunner) error {
r.trt.OnBallot(b.Ballot)
return nil
}
type DecodeBallotTrace struct {
Ballot *types.BallotTortoiseData `json:",inline"`
Error string `json:"e"`
// TODO(dshulyak) want to assert decoding results somehow
}
func (d *DecodeBallotTrace) Type() eventType {
return traceDecode
}
func (d *DecodeBallotTrace) New() traceEvent {
return &DecodeBallotTrace{}
}
func (b *DecodeBallotTrace) Run(r *traceRunner) error {
decoded, err := r.trt.DecodeBallot(b.Ballot)
if r.assertErrors {
if err := assertErrors(err, b.Error); err != nil {
return err
}
}
if err == nil {
r.pending[decoded.ID] = decoded
}
return nil
}
type StoreBallotTrace struct {
ID types.BallotID `json:"id"`
Malicious bool `json:"mal"`
Error string `json:"e,omitempty"`
}
func (s *StoreBallotTrace) Type() eventType {
return traceStore
}
func (s *StoreBallotTrace) | () traceEvent {
return &StoreBallotTrace{}
}
func (s *StoreBallotTrace) Run(r *traceRunner) error {
pending, exist := r.pending[s.ID]
if !exist {
return fmt.Errorf("id %v should be pending", s.ID)
}
if s.Malicious {
pending.SetMalicious()
}
delete(r.pending, s.ID)
err := r.trt.StoreBallot(pending)
if r.assertErrors {
if err := assertErrors(err, s.Error); err != nil {
return err
}
}
return nil
}
type EncodeVotesTrace struct {
Layer types.LayerID `json:"lid"`
Opinion *types.Opinion `json:"opinion"`
Error string `json:"e"`
}
func (e *EncodeVotesTrace) Type() eventType {
return traceEncode
}
func (e *EncodeVotesTrace) New() traceEvent {
return &EncodeVotesTrace{}
}
func (e *EncodeVotesTrace) Run(r *traceRunner) error {
opinion, err := r.trt.EncodeVotes(context.Background(), EncodeVotesWithCurrent(e.Layer))
if r.assertErrors {
if err := assertErrors(err, e.Error); err != nil {
return err
}
}
if err == nil {
if diff := cmp.Diff(opinion, e.Opinion); len(diff) > 0 && r.assertOutputs {
return errors.New(diff)
}
}
return nil
}
type TallyTrace struct {
Layer types.LayerID `json:"lid"`
}
func (t *TallyTrace) Type() eventType {
return traceTally
}
func (t *TallyTrace) New() traceEvent {
return &TallyTrace{}
}
func (t *TallyTrace) Run(r *traceRunner) error {
r.trt.TallyVotes(context.Background(), t.Layer)
return nil
}
type HareTrace struct {
Layer types.LayerID `json:"lid"`
Vote types.BlockID `json:"vote"`
}
func (h *HareTrace) Type() eventType {
return traceHare
}
func (h *HareTrace) New() traceEvent {
return &HareTrace{}
}
func (h *HareTrace) Run(r *traceRunner) error {
r.trt.OnHareOutput(h.Layer, h.Vote)
return nil
}
type ResultsTrace struct {
From types.LayerID `json:"from"`
To types.LayerID `json:"to"`
Error string `json:"e"`
Results []result.Layer `json:"results"`
}
func (r *ResultsTrace) Type() eventType {
return traceResults
}
func (r *ResultsTrace) New() traceEvent {
return &ResultsTrace{}
}
func (r *ResultsTrace) Run(rt *traceRunner) error {
rst, err := rt.trt.Results(r.From, r.To)
if rt.assertErrors {
if err := assertErrors(err, r.Error); err != nil {
return err
}
}
if err == nil {
if diff := cmp.Diff(rst, r.Results, cmpopts.EquateEmpty()); len(diff) > 0 && rt.assertOutputs {
return errors.New(diff)
}
}
return nil
}
type UpdatesTrace struct {
ResultsTrace `json:",inline"`
}
func (u *UpdatesTrace) Type() eventType {
return traceUpdates
}
func (u *UpdatesTrace) New() traceEvent {
return &UpdatesTrace{}
}
func (u *UpdatesTrace) Run(r *traceRunner) error {
rst := r.trt.Updates()
if diff := cmp.Diff(rst, u.Results, cmpopts.EquateEmpty()); len(diff) > 0 && r.assertOutputs {
return errors.New(diff)
}
return nil
}
type BlockTrace struct {
Header types.BlockHeader `json:",inline"`
Valid bool `json:"v"`
}
func (b *BlockTrace) Type() eventType {
return traceBlock
}
func (b *BlockTrace) New() traceEvent {
return &BlockTrace{}
}
func (b *BlockTrace) Run(r *traceRunner) error {
if b.Valid {
r.trt.OnValidBlock(b.Header)
} else {
r.trt.OnBlock(b.Header)
}
return nil
}
type MalfeasanceTrace struct {
ID types.NodeID `json:"id"`
}
func (m *MalfeasanceTrace) Type() eventType {
return traceMalfeasence
}
func (m *MalfeasanceTrace) New() traceEvent {
return &MalfeasanceTrace{}
}
func (m *MalfeasanceTrace) Run(r *traceRunner) error {
r.trt.OnMalfeasance(m.ID)
return nil
}
func assertErrors(err error, expect string) error {
msg := ""
if err != nil {
msg = err.Error()
}
if expect != msg {
return fmt.Errorf("%s != %s", expect, msg)
}
return nil
}
func newEventEnum() eventEnum {
enum := eventEnum{types: map[uint16]traceEvent{}}
enum.Register(&ConfigTrace{})
enum.Register(&WeakCoinTrace{})
enum.Register(&BeaconTrace{})
enum.Register(&AtxTrace{})
enum.Register(&BallotTrace{})
enum.Register(&DecodeBallotTrace{})
enum.Register(&StoreBallotTrace{})
enum.Register(&EncodeVotesTrace{})
enum.Register(&TallyTrace{})
enum.Register(&BlockTrace{})
enum.Register(&HareTrace{})
enum.Register(&ResultsTrace{})
enum.Register(&UpdatesTrace{})
enum.Register(&MalfeasanceTrace{})
return enum
}
type eventEnum struct {
types map[eventType]traceEvent
}
func (e *eventEnum) Register(ev traceEvent) {
e.types[ev.Type()] = ev
}
func (e *eventEnum) Decode(dec *json.Decoder) (traceEvent, error) {
var event output
if err := dec.Decode(&event); err != nil {
return nil, err
}
ev := e.types[event.Type]
if ev == nil {
return nil, fmt.Errorf("type %d is not registered", event.Type)
}
obj := ev.New()
if err := json.Unmarshal(event.Event, obj); err != nil {
return nil, err
}
return obj, nil
}
| New | identifier_name |
SemaIndexerStage2.py | #coding: latin-1
''' inicializa o ambiente para captura de informacoes do clipping '''
''' ractionline p auto-ask(classify,relactionate,composition) ->auto-ask para classificar,relacionar,build objectos dentro dos purposes(ambientes) , as quais sao passadas por linha de comando, roda automaticamente '''
import base64
import calendar
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import urllib
import urllib2
import urlparse
import thread
from os import environ
from weakref import proxy
import umisc
import pycassa
from pycassa.pool import ConnectionPool
from pycassa import index
from pycassa.columnfamily import ColumnFamily
pool2 = ConnectionPool('MINDNET', ['79.143.185.3:9160'],timeout=10000)
import mdTb
mdTb.start_db(pool2)
import get_object
get_object.start_db(pool2)
import get_object2
get_object2.start_db(pool2)
import mdLayout
import mdER
import mdNeural
import Identify
import logging
import mdOntology
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('SemanticIndexer-Stage2')
# obs : extrair informacoes -> preview de informacoes->pagina para exibir o resultado (href), titulo ou foco principal como result, classificacao como categoria --> para busca web realtime(busca na ontologia semanti_objects2 ,nao clipping
'''
import conn
conn= conn.conn_mx
connTrace=conn
conn4=conn
conn5=conn
conn3=conn
'''
'''
conn= MySQLdb.connect(host='dbmy0023.whservidor.com', user='mindnet' , passwd='acc159753', db='mindnet')
connTrace= MySQLdb.connect(host='dbmy0032.whservidor.com', user='mindnet_2' , passwd='acc159753', db='mindnet_2')
conn4=MySQLdb.connect(host='dbmy0050.whservidor.com', user='mindnet_4' , passwd='acc159753', db='mindnet_4')
conn5= MySQLdb.connect(host='dbmy0035.whservidor.com', user='mindnet_3' , passwd='acc159753', db='mindnet_3')
conn3= MySQLdb.connect(host='dbmy0039.whservidor.com', user='mindnet_5' , passwd='acc159753', db='mindnet_5')
def config_conns(conn_):
cursor=conn_.cursor ()
cursor.execute('SET SESSION wait_timeout = 90000')
config_conns(conn)
config_conns(connTrace)
config_conns(conn4)
config_conns(conn3)
'''
def Identify_pre_process_data (l2,ln_o,onto_basis,purpose,id,t_h,ret_ps):
Identify.pre_process_data(l2,ln_o,onto_basis,purpose,id,t_h,ret_ps)
def start_job_sec(start,usr,path_j,th):
cmd='python '+path_j+' "'+usr+'" '+str(start)
#os.execv('/usr/bin/python',(path_j,str(start)) )
print 'Prepare cmd:',cmd
os.system(cmd)
th.finished=True
def mount_node(term,id,purpose):
l=Identify.prepare_layout(id,purpose)
allp=[]
onto=Identify.prepare_data_by_ask(l, term,id,purpose,allp )
return [onto,allp]
entry_doc =[]
#entry_doc = ['vectra é um bom carro com design inteligente, em composto por fibra de carbono restrito em carro de passeio ',' expectativas de carro com venda fortes','casa bom estado venda']
#entry('viagens melhores opções',['viagem para o nordeste é muito mais tranquilo, composto por translados mas restrito para criancas '])
tb_object = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT3')
tb_object_dt = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT3')
tb_relaction = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS3')
mdNeural.tb_object=tb_object
mdNeural.tb_object_dt=tb_object_dt
mdNeural.tb_relaction=tb_relaction
def get_object_by_data(obj,usr,uid):
#========
if uid != '':
resultSet=tb_object.get(uid)
key1=uid
else:
resultSet=tb_object.get(obj)
key1=uid
#
#
obj_nm=None
uid=None
cenar=0
cnts_all_tps=0
if True:
results = resultSet
obj_nm=results[u'objeto']
uid=key1
cenar=results[u'cenar']
cnts_all_tps=results[u'conts_n']
#-----------
lay=mdNeural.mdLayer ()
if obj_nm == None: obj_nm=obj
lay.name=obj_nm
tpc2=lay.set_topico('identificador')
tpc2.uuid=cenar
nrc2= lay.set_nr(lay.name)
nrc2.uuid=cenar
tpc2.connect_to(lay,nrc2,'Composicao')
#print lay.topicos,'....................'
print 'Read object(g1):',obj,' uid:',uid
#-----------
def cached_dt(objectkey,cnts_all_tp):
'''
cached=[]
keyc=objectkey
i=1
while i <= cnts_all_tp:
try:
c1=tb_object_dt.get(keyc+"|"+str(i))
cached.append([keyc+"|"+str(i),c1])
except: break
i+=1
'''
cached=[]
keyc=objectkey
i=1
keys=[]
while i <= int(cnts_all_tp):
keys.append( keyc+"|"+str(i) )
i+=1
i=0
if True:
try:
c1=tb_object_dt.multiget(keys,column_count=10000)
for kd in c1.items():
cached.append([kd[0],kd[1]])
i+=1
except Exception,e:
print 'ERROR:',e
pass
return cached
if True :
class iter_cd:
def dump(self):
fil=open("c:\\python24\\kk.txt","w")
for ky,s in self.arr:
s1=str(s)
fil.write(s1+'\n')
fil.close()
def __init__(self,arr1):
self.arr=arr1
self.start=-1
def get_level(self,level2):
rt=[]
arr=self.arr
for ky,cols in arr:
for level in level2:
if int(cols[u'LEV']) == level:
rt.append([ky,cols])
rt2=iter_cd(rt)
return rt2
def get_all(self):
rt=[]
while True:
s=self.next()
if s[0] == None: break
rt.append(s)
return rt
def next(self):
if self.start == -1:
self.start=0
else:
self.start+=1
if self.start < len(self.arr):
return self.arr[self.start]
else:
return [None,None]
rows=cached_dt(uid,cnts_all_tps)
iterat=iter_cd(rows)
#iterat.dump()
def read_dt_level( nr_top,level,uid1,ic1,lay1,results,resultSet,uuid):
while results :
DT=results[u"datach"]
TOP=results[u'topico']
ic1=uuid
lev=results[u'LEV']
#print 'READ(g2).top-init:',TOP,DT,'->',lev , level
if int(lev) != level:
return results
break
nrc= lay1.set_nr(DT)
nrc.uuid=uuid
#print 'READ(g2).top:',TOP,DT
#==
nr_top.connect_to(lay1,nrc,'Composicao')
ky,results = resultSet.next()
read_dt_level(nrc,(level+1),uid1,ic1,lay1,results,resultSet,uuid)
return results
#====================
#
resultSet =iterat.get_level([0,1]).get_all()
obj_nm=None
for ky,results in resultSet:
DT=results[u"datach"]
TOP=results[u'topico']
cnti=int(results[u'cnt'])
ic=ky
uuid=ky
nr= lay.set_nr(DT)
nr.uuid=cenar
if ic == None: ic=0
#tps=lay.get_topico(TOP)
#if tps == None:
# tps=lay.set_topico(TOP)
# ---
tps=lay.set_topico(TOP)
tps.uuid=cenar
#===
#print 'Set topico:',TOP,' for layer:',obj_nm,' uid:',uid,'tps.uid:',tps.uuid
# ---
tps.connect_to(lay,nr,'Composicao')
if True:
#==
#
levs=range(0,50)
resultSet1=iterat.get_level(levs)
#sess=conn3.prepare(sql1)
#resultSet = sess.execute ()
ky,results = resultSet1.next()
while results :
DT=results[u"datach"]
TOP=results[u'topico']
ic2=ky
lev=results[u'LEV']
uuid=ky
if int(results[u'cnt']) <= cnti:
ky,results = resultSet1.next()
continue
#print 'Level 2(ind):',lev,TOP,DT
if int(lev) != 2:
break
#==
#print 'Level 2(dt) :',nrc.dt
nrc= lay.set_nr(DT)
nrc.uuid=cenar
nr.connect_to(lay,nrc,'Composicao')
ky,results = resultSet1.next()
results = read_dt_level( nrc,3,uid,ic2,lay,results,resultSet1,cenar ) #####
#lay.dump_layer_file()
#print 'collected.layer.dump():============================================='
#lay.dump_layer_file()
return lay
def get_ontology(aliases,purposes,usr):
tree_h=[]
for alias in aliases:
if True:
tree_cen=[]
if True:
''' '''
if alias == '$$all$$': alias=''
#alias='%'+alias+'%'
#alias=alias+'%'
''' '''
h_tree_v=[]
#
resultSet=None
try:
resultSet=tb_object.get(alias)
except: pass
ky=alias
if resultSet:
results = resultSet
i=results[u'objeto']
uid=ky
cenar=results[u'cenar']
#===
print 'read-Obj-onto:[',alias,'] ',i, '->collects:',uid
#====
avaliable_objs=[]
#===--------------------------------------
obj_principal=get_object_by_data(i,usr,uid)
#if len(obj_principal.topicos) > 0 :
h_tree_v.append(obj_principal)
#==----------------------------------------
#break
if len(h_tree_v) > 0 :
tree_cen.append(h_tree_v)
if len(tree_cen) > 0 :
tree_h.append(tree_cen)
return tree_h
class thread_cntl:
def __init__(self):
self.finished=False
class Task_C:
def __init__(self,Dt1=None,Dt2=None):
self.dt1=Dt1
self.dt2=Dt2
#//2
def process_page(layersc,purpose,usr,result_onto_tree_er,onto_basis,relactionate):
#try:
id=usr
if True:
#===
#try:
if True:
#print 'LayersC:',layersc
try:
ir=Identify.resume_process_datac(layersc,onto_basis,purpose,id,relactionate)
except Exception,errc:
print 'Error resume_process_datac:',errc
log.exception("-------------------------")
print 'Process->resume_data()',ir
if ir[0] != None :
# procura identificador ---
fnd_ident=False
for es in ir[0].topicos:
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid') or ir[0].es_compare_dt(es,'realid2'):
fnd_ident=True
if not fnd_ident:
ind=len(result_onto_tree_er)-1
fond_cs=False
while ind >=0 and not fond_cs:
for es2 in result_onto_tree_er[ind].topicos:
if ir[0].es_compare_dt(es2,'identificador') or ir[0].es_compare_dt(es2,'realid') or ir[0].es_compare_dt(es2,'realid2'):
ir[0].set_topico_nr(es2)
fond_cs=True
break
ind-=1
# verificar se nao tem somente identificadores(elemento fact invalido)
oth=False
print 'RCT TPS:{',ir[0].topicos,'}'
indtotph=0
for es in ir[0].topicos:
indtotph+=1
print 'RCT TPS('+str(indtotph)+'):{',es.dt,'}'
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid')or ir[0].es_compare_dt(es,'realid2') :
pass
else:
oth=True
print 'OTH:',oth
class layer_processesC:
def __init__(self):
self.lrs=[]
import SemaIniParser
# global
layer_processes=layer_processesC ()
'''
obs: filtros/restricoes ou informacoes compl, devem ser lidos dos layer_processes, que contem as informacoes da sentenca passada
'''
def process_termo(layers,usr,pur_p,onto_basis,relactionate=False):
#=====================================
result_onto_tree_er=[] # caracteristicas,descricoes,etc...
print 'Inter -stack executing : '
for n in onto_basis.nodesER:
print n.name
print '======================================='
print '======================================='
print '======================================='
process_page(layers,pur_p,usr,result_onto_tree_er,onto_basis,relactionate)
#========================================================================================================================================
print 'ER---Final-Exec:',len(result_onto_tree_er)
for e in result_onto_tree_er:
print '----------------------------------------'
for t in e.topicos:
print t.dt
print '**************************'
for s in t.sinapses:
print s.nr.dt
print '**************************'
print '----------------------------------------'
print '======================================='
print '======================================='
print '======================================='
'''
->gravar na ontologia semantic_objects2, os qualificadores de definicao,composicao( achar objectos com destinacao(info-composicao), que contem as informacoes de propriedades )
->gravar na ontologia semantic_objects2, os qualificadores de estado( achar objectos com destinacao(info-state), que contem as informacoes de estado->links para cada momento das propriedades )
->gravar na ontologiao links_objetcts os qualificadores definidores de relacao , link ,conexao, etc.. ( achar objectos com destinacao(info-relaction), que contem as informacoes de relacoes )
->os qualificadores de filtro,foco informados na sentenca do clipping seram utilizados para o retorno no clipping
1 -setar foco nos objectos
2 -caracteristicas + links
3 -layout-out com interesses
4 -layout-code para extrair informacao
5 -inserir no clipping return
'''
def get_aliases_ob( ):
str_ret=[]
for ir in layer_processes.lrs:
print 'get_aliases_ob()->ir(1):',ir
#if ir != None: print 'lr:',ir.name
for topico in ir.topicos:
if len(topico.dt) > 0 :
topicodt=topico.dt
if 'identificador' in topicodt or 'realid' in topicodt or 'realid2' in topicodt or 'object' in topicodt:
dtk=''
for p in topico.sinapses:
for dts1 in p.nr.dt:
dtk+=' '+umisc.trim(dts1)
if umisc.trim(dtk) != '':
print 'Collect.element:',dtk
str_ret.append(umisc.trim(dtk) )
return str_ret
def get_aliases_p( rcts_impo ):
str_ret=[]
for ir in rcts_impo :
#print 'get_aliases_ob()->ir(2):',ir,ir.name
for topico in ir.topicos:
if len(topico.dt) > 0 :
topicodt=topico.dt
#print topicodt,'<<<'
if 'purpose' in topicodt or 'destination' in topicodt :
for p in topico.sinapses:
for dts1 in p.nr.dt:
str_ret.append(dts1)
return str_ret
def process_sentences(usr,rcts_impo):
#
purps=get_aliases_p( rcts_impo )
print 'Purps:',purps
for pur_p in purps:
aliases=get_aliases_ob ()
layers=[]
layers2=[]
layers=get_ontology(aliases,pur_p,usr)
nms=[]
for dks in layers:
for dk1 in dks:
for dk in dk1:
nms.append(dk.name)
print 'GetLayers:(',aliases,',',pur_p,')->',(layers),'{',nms,'}'
#===============
#======== load ractionlines baseado no cenario escolhido => parametro de linha de comando purpose
print 'Collect ractionlines for(1):',pur_p,'-----'
ractionlines=mdOntology.mdAbstractBaseOntology ()
ractionlines.nodesER=rcts_impo
print 'Found:',len(ractionlines.nodesER),'-> Ready to start inference-engine:'
#===============
print 'OBJS:',aliases,' Result:',layers
if len(layers)>0:
print 'Start process of rct:-------------------------'
# doc
print 'Process layers:',len(layers)
for doc in layers:
#sente
print 'Process doc:',len(doc)
for sentece in doc:
#lines
print 'Process Sentence:',len(sentece)
ids=0
try:
for s in sentece:
ids+=1
#print s.name,'---',ids
print 'AK-sentence:[',sentece,']'
process_termo(sentece,usr,pur_p,ractionlines)
except Exception,ess1:
print 'Error process termo:',ess1
log.exception( '===========================' )
else:
process_termo([],usr,pur_p,ractionlines)
#===============
'''
print 'OBJS Rels:',aliases,' Result:',layers2
if len(layers2)>0:
print 'Start process of rct:-------------------------'
# doc
for doc in layers2:
#sente
for sentece in doc:
#lines
process_termo(sentece,usr,pur_p,ractionlines,True)
'''
def process_sentences2(usr,rcts_impo,layers_param):
#
purps=get_aliases_p( rcts_impo )
print 'Purps:',purps
for pur_p in purps:
aliases=get_aliases_ob ()
layers=[]
layers2=[]
#======== load ractionlines baseado no cenario escolhido => parametro de linha de comando purpose
print 'Collect ractionlines for(1):',pur_p,'-----'
ractionlines=mdOntology.mdAbstractBaseOntology ()
ractionlines.nodesER=rcts_impo
print 'Found:',len(ractionlines.nodesER),'-> Ready to start inference-engine:'
#===============
print 'OBJS:',aliases,' Result:',layers
if True :
try:
process_termo(layers_param,usr,pur_p,ractionlines)
except Exception,ess1:
log.exception( '===========================' )
print 'Error process termo:',ess1
class ProcessPgStack:
def __init_(self):
pass
def call_process(self,usr,rcts_impo):
process_sentences(usr,rcts_impo)
def call | f,usr,rcts_impo,layers_param=[]):
process_sentences2(usr,rcts_impo,layers_param)
mdNeural.GlobalStack.proc_pg=ProcessPgStack ()
def process_pagest(layersc,purpose,usr,result_onto_tree_er,onto_basis,relactionate):
#try:
id=usr
if True:
#===
#try:
if True:
print 'LayersC-1:',layersc
ir=Identify.resume_process_datac(layersc,onto_basis,purpose,id,relactionate)
print 'LayersC-2:',layersc
print 'Process->resume_data()',ir
if ir[0] != None :
# procura identificador ---
fnd_ident=False
for es in ir[0].topicos:
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid') or ir[0].es_compare_dt(es,'realid2'):
fnd_ident=True
if not fnd_ident:
ind=len(result_onto_tree_er)-1
fond_cs=False
while ind >=0 and not fond_cs:
for es2 in result_onto_tree_er[ind].topicos:
if ir[0].es_compare_dt(es2,'identificador') or ir[0].es_compare_dt(es2,'realid') or ir[0].es_compare_dt(es2,'realid2'):
ir[0].set_topico_nr(es2)
fond_cs=True
break
ind-=1
# verificar se nao tem somente identificadores(elemento fact invalido)
oth=False
print 'RCT TPS:{',ir[0].topicos,'}'
indtotph=0
for es in ir[0].topicos:
indtotph+=1
print 'RCT TPS('+str(indtotph)+'):{',es.dt,'}'
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid')or ir[0].es_compare_dt(es,'realid2') :
pass
else:
oth=True
print 'OTH:',oth
def process_termost(layers,usr,pur_p,onto_basis,relactionate=False):
#=====================================
result_onto_tree_er=[] # caracteristicas,descricoes,etc...
process_pagest(layers,pur_p,usr,result_onto_tree_er,onto_basis,relactionate)
#========================================================================================================================================
print 'ER:',len(result_onto_tree_er)
for e in result_onto_tree_er:
print '----------------------------------------'
for t in e.topicos:
print t.dt
print '**************************'
for s in t.sinapses:
print s.nr.dt
print '**************************'
print '----------------------------------------'
def process_sentences_ST1(usr,layers,pur_p):
print 'Collect ractionlines for(2):',pur_p,'-----'
ractionlines=mdOntology.mdBaseOntology(usr,pur_p)
print 'Found:',len(ractionlines.nodesER)
process_termost(layers,usr,pur_p,ractionlines)
def Gentry(logid,action_def,purp):
sentence=''
usr=''
sessao=''
tmp=''
try:
fnds=False
purpose=''
#=============
#=============
usr=logid
start_c=0
sentence=action_def
purpose=purp
if True:
#print usr,purpose,sentence
sentence=sentence.replace('*','%')
print 'SemaIniParser:',sentence,'|',usr,'|',purpose
layer_proc=SemaIniParser.entry(sentence,usr,purpose)
layer_processes.lrs=layer_proc[0]
process_sentences_ST1(usr,layer_processes.lrs,purpose)
except:
log.exception( 'Error process sentences:' )
return usr
import time
startTT = time.clock ()
usuario=''
try:
mdLayout.dump_all_state=False
mdLayout.dump_all_state2=False
mdLayout.dump_all_state3=False
mdLayout.dump_all_state4=False
mdLayout.dump_all_state5=False
start_c=0
usr=0
usr=sys.argv[1]
#print usr
sentence=sys.argv[2]
#print sentence
purp=sys.argv[3]
#print purp
tp_n=sys.argv[4] # type of neural objects
#print 'Cmd line :',usr
mdNeural.self_usr=usr
mdNeural.type_coll=tp_n
if sentence.find('.') <=-1 and sentence.find('!') <=-1 and sentence.find('?') <=-1 and sentence.find(';') <=-1:
sentence=sentence+'.'
#print 'sentence:',sentence
usuario=Gentry(usr,sentence,purp)
except Exception,err:
log.exception( 'Error process sentences:' )
print 'End process.Time elapsed: ',time.clock () - startTT
| _process2(sel | identifier_name |
SemaIndexerStage2.py | #coding: latin-1
''' inicializa o ambiente para captura de informacoes do clipping '''
''' ractionline p auto-ask(classify,relactionate,composition) ->auto-ask para classificar,relacionar,build objectos dentro dos purposes(ambientes) , as quais sao passadas por linha de comando, roda automaticamente '''
import base64
import calendar
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import urllib
import urllib2
import urlparse
import thread
from os import environ
from weakref import proxy
import umisc
import pycassa
from pycassa.pool import ConnectionPool
from pycassa import index
from pycassa.columnfamily import ColumnFamily
pool2 = ConnectionPool('MINDNET', ['79.143.185.3:9160'],timeout=10000)
import mdTb
mdTb.start_db(pool2)
import get_object
get_object.start_db(pool2)
import get_object2
get_object2.start_db(pool2)
import mdLayout
import mdER
import mdNeural
import Identify
import logging
import mdOntology
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('SemanticIndexer-Stage2')
# obs : extrair informacoes -> preview de informacoes->pagina para exibir o resultado (href), titulo ou foco principal como result, classificacao como categoria --> para busca web realtime(busca na ontologia semanti_objects2 ,nao clipping
'''
import conn
conn= conn.conn_mx
connTrace=conn
conn4=conn
conn5=conn
conn3=conn
'''
'''
conn= MySQLdb.connect(host='dbmy0023.whservidor.com', user='mindnet' , passwd='acc159753', db='mindnet')
connTrace= MySQLdb.connect(host='dbmy0032.whservidor.com', user='mindnet_2' , passwd='acc159753', db='mindnet_2')
conn4=MySQLdb.connect(host='dbmy0050.whservidor.com', user='mindnet_4' , passwd='acc159753', db='mindnet_4')
conn5= MySQLdb.connect(host='dbmy0035.whservidor.com', user='mindnet_3' , passwd='acc159753', db='mindnet_3')
conn3= MySQLdb.connect(host='dbmy0039.whservidor.com', user='mindnet_5' , passwd='acc159753', db='mindnet_5')
def config_conns(conn_):
cursor=conn_.cursor ()
cursor.execute('SET SESSION wait_timeout = 90000')
config_conns(conn)
config_conns(connTrace)
config_conns(conn4)
config_conns(conn3)
'''
def Identify_pre_process_data (l2,ln_o,onto_basis,purpose,id,t_h,ret_ps):
Identify.pre_process_data(l2,ln_o,onto_basis,purpose,id,t_h,ret_ps)
def start_job_sec(start,usr,path_j,th):
cmd='python '+path_j+' "'+usr+'" '+str(start)
#os.execv('/usr/bin/python',(path_j,str(start)) )
print 'Prepare cmd:',cmd
os.system(cmd)
th.finished=True
def mount_node(term,id,purpose):
l=Identify.prepare_layout(id,purpose)
allp=[]
onto=Identify.prepare_data_by_ask(l, term,id,purpose,allp )
return [onto,allp]
entry_doc =[]
#entry_doc = ['vectra é um bom carro com design inteligente, em composto por fibra de carbono restrito em carro de passeio ',' expectativas de carro com venda fortes','casa bom estado venda']
#entry('viagens melhores opções',['viagem para o nordeste é muito mais tranquilo, composto por translados mas restrito para criancas '])
tb_object = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT3')
tb_object_dt = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT3')
tb_relaction = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS3')
mdNeural.tb_object=tb_object
mdNeural.tb_object_dt=tb_object_dt
mdNeural.tb_relaction=tb_relaction
def get_object_by_data(obj,usr,uid):
#========
if uid != '':
resultSet=tb_object.get(uid)
key1=uid
else:
resultSet=tb_object.get(obj)
key1=uid
#
#
obj_nm=None
uid=None
cenar=0
cnts_all_tps=0
if True:
results = resultSet
obj_nm=results[u'objeto']
uid=key1
cenar=results[u'cenar']
cnts_all_tps=results[u'conts_n']
#-----------
lay=mdNeural.mdLayer ()
if obj_nm == None: obj_nm=obj
lay.name=obj_nm
tpc2=lay.set_topico('identificador')
tpc2.uuid=cenar
nrc2= lay.set_nr(lay.name)
nrc2.uuid=cenar
tpc2.connect_to(lay,nrc2,'Composicao')
#print lay.topicos,'....................'
print 'Read object(g1):',obj,' uid:',uid
#-----------
def cached_dt(objectkey,cnts_all_tp):
'''
cached=[]
keyc=objectkey
i=1
while i <= cnts_all_tp:
try:
c1=tb_object_dt.get(keyc+"|"+str(i))
cached.append([keyc+"|"+str(i),c1])
except: break
i+=1
'''
cached=[]
keyc=objectkey
i=1
keys=[]
while i <= int(cnts_all_tp):
keys.append( keyc+"|"+str(i) )
i+=1
i=0
if True:
try:
c1=tb_object_dt.multiget(keys,column_count=10000)
for kd in c1.items():
cached.append([kd[0],kd[1]])
i+=1
except Exception,e:
print 'ERROR:',e
pass
return cached
if True :
class iter_cd:
def dump(self):
fil=open("c:\\python24\\kk.txt","w")
for ky,s in self.arr:
s1=str(s)
fil.write(s1+'\n')
fil.close()
def __init__(self,arr1):
self.arr=arr1
self.start=-1
def get_level(self,level2):
rt=[]
arr=self.arr
for ky,cols in arr:
for level in level2:
if int(cols[u'LEV']) == level:
rt.append([ky,cols])
rt2=iter_cd(rt)
return rt2
def get_all(self):
rt=[]
while True:
s=self.next()
if s[0] == None: break
rt.append(s)
return rt
def next(self):
if self.start == -1:
self.start=0
else:
self.start+=1
if self.start < len(self.arr):
return self.arr[self.start]
else:
return [None,None]
rows=cached_dt(uid,cnts_all_tps)
iterat=iter_cd(rows)
#iterat.dump()
def read_dt_level( nr_top,level,uid1,ic1,lay1,results,resultSet,uuid):
while results :
DT=results[u"datach"]
TOP=results[u'topico']
ic1=uuid
lev=results[u'LEV']
#print 'READ(g2).top-init:',TOP,DT,'->',lev , level
if int(lev) != level:
return results
break
nrc= lay1.set_nr(DT)
nrc.uuid=uuid
#print 'READ(g2).top:',TOP,DT
#==
nr_top.connect_to(lay1,nrc,'Composicao')
ky,results = resultSet.next()
read_dt_level(nrc,(level+1),uid1,ic1,lay1,results,resultSet,uuid)
return results
#====================
#
resultSet =iterat.get_level([0,1]).get_all()
obj_nm=None
for ky,results in resultSet:
DT=results[u"datach"]
TOP=results[u'topico']
cnti=int(results[u'cnt'])
ic=ky
uuid=ky
nr= lay.set_nr(DT)
nr.uuid=cenar
if ic == None: ic=0
#tps=lay.get_topico(TOP)
#if tps == None:
# tps=lay.set_topico(TOP)
# ---
tps=lay.set_topico(TOP)
tps.uuid=cenar
#===
#print 'Set topico:',TOP,' for layer:',obj_nm,' uid:',uid,'tps.uid:',tps.uuid
# ---
tps.connect_to(lay,nr,'Composicao')
if True:
#==
#
levs |
#print 'collected.layer.dump():============================================='
#lay.dump_layer_file()
return lay
def get_ontology(aliases,purposes,usr):
tree_h=[]
for alias in aliases:
if True:
tree_cen=[]
if True:
''' '''
if alias == '$$all$$': alias=''
#alias='%'+alias+'%'
#alias=alias+'%'
''' '''
h_tree_v=[]
#
resultSet=None
try:
resultSet=tb_object.get(alias)
except: pass
ky=alias
if resultSet:
results = resultSet
i=results[u'objeto']
uid=ky
cenar=results[u'cenar']
#===
print 'read-Obj-onto:[',alias,'] ',i, '->collects:',uid
#====
avaliable_objs=[]
#===--------------------------------------
obj_principal=get_object_by_data(i,usr,uid)
#if len(obj_principal.topicos) > 0 :
h_tree_v.append(obj_principal)
#==----------------------------------------
#break
if len(h_tree_v) > 0 :
tree_cen.append(h_tree_v)
if len(tree_cen) > 0 :
tree_h.append(tree_cen)
return tree_h
class thread_cntl:
def __init__(self):
self.finished=False
class Task_C:
def __init__(self,Dt1=None,Dt2=None):
self.dt1=Dt1
self.dt2=Dt2
#//2
def process_page(layersc,purpose,usr,result_onto_tree_er,onto_basis,relactionate):
#try:
id=usr
if True:
#===
#try:
if True:
#print 'LayersC:',layersc
try:
ir=Identify.resume_process_datac(layersc,onto_basis,purpose,id,relactionate)
except Exception,errc:
print 'Error resume_process_datac:',errc
log.exception("-------------------------")
print 'Process->resume_data()',ir
if ir[0] != None :
# procura identificador ---
fnd_ident=False
for es in ir[0].topicos:
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid') or ir[0].es_compare_dt(es,'realid2'):
fnd_ident=True
if not fnd_ident:
ind=len(result_onto_tree_er)-1
fond_cs=False
while ind >=0 and not fond_cs:
for es2 in result_onto_tree_er[ind].topicos:
if ir[0].es_compare_dt(es2,'identificador') or ir[0].es_compare_dt(es2,'realid') or ir[0].es_compare_dt(es2,'realid2'):
ir[0].set_topico_nr(es2)
fond_cs=True
break
ind-=1
# verificar se nao tem somente identificadores(elemento fact invalido)
oth=False
print 'RCT TPS:{',ir[0].topicos,'}'
indtotph=0
for es in ir[0].topicos:
indtotph+=1
print 'RCT TPS('+str(indtotph)+'):{',es.dt,'}'
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid')or ir[0].es_compare_dt(es,'realid2') :
pass
else:
oth=True
print 'OTH:',oth
class layer_processesC:
def __init__(self):
self.lrs=[]
import SemaIniParser
# global
layer_processes=layer_processesC ()
'''
obs: filtros/restricoes ou informacoes compl, devem ser lidos dos layer_processes, que contem as informacoes da sentenca passada
'''
def process_termo(layers,usr,pur_p,onto_basis,relactionate=False):
#=====================================
result_onto_tree_er=[] # caracteristicas,descricoes,etc...
print 'Inter -stack executing : '
for n in onto_basis.nodesER:
print n.name
print '======================================='
print '======================================='
print '======================================='
process_page(layers,pur_p,usr,result_onto_tree_er,onto_basis,relactionate)
#========================================================================================================================================
print 'ER---Final-Exec:',len(result_onto_tree_er)
for e in result_onto_tree_er:
print '----------------------------------------'
for t in e.topicos:
print t.dt
print '**************************'
for s in t.sinapses:
print s.nr.dt
print '**************************'
print '----------------------------------------'
print '======================================='
print '======================================='
print '======================================='
'''
->gravar na ontologia semantic_objects2, os qualificadores de definicao,composicao( achar objectos com destinacao(info-composicao), que contem as informacoes de propriedades )
->gravar na ontologia semantic_objects2, os qualificadores de estado( achar objectos com destinacao(info-state), que contem as informacoes de estado->links para cada momento das propriedades )
->gravar na ontologiao links_objetcts os qualificadores definidores de relacao , link ,conexao, etc.. ( achar objectos com destinacao(info-relaction), que contem as informacoes de relacoes )
->os qualificadores de filtro,foco informados na sentenca do clipping seram utilizados para o retorno no clipping
1 -setar foco nos objectos
2 -caracteristicas + links
3 -layout-out com interesses
4 -layout-code para extrair informacao
5 -inserir no clipping return
'''
def get_aliases_ob( ):
str_ret=[]
for ir in layer_processes.lrs:
print 'get_aliases_ob()->ir(1):',ir
#if ir != None: print 'lr:',ir.name
for topico in ir.topicos:
if len(topico.dt) > 0 :
topicodt=topico.dt
if 'identificador' in topicodt or 'realid' in topicodt or 'realid2' in topicodt or 'object' in topicodt:
dtk=''
for p in topico.sinapses:
for dts1 in p.nr.dt:
dtk+=' '+umisc.trim(dts1)
if umisc.trim(dtk) != '':
print 'Collect.element:',dtk
str_ret.append(umisc.trim(dtk) )
return str_ret
def get_aliases_p( rcts_impo ):
str_ret=[]
for ir in rcts_impo :
#print 'get_aliases_ob()->ir(2):',ir,ir.name
for topico in ir.topicos:
if len(topico.dt) > 0 :
topicodt=topico.dt
#print topicodt,'<<<'
if 'purpose' in topicodt or 'destination' in topicodt :
for p in topico.sinapses:
for dts1 in p.nr.dt:
str_ret.append(dts1)
return str_ret
def process_sentences(usr,rcts_impo):
#
purps=get_aliases_p( rcts_impo )
print 'Purps:',purps
for pur_p in purps:
aliases=get_aliases_ob ()
layers=[]
layers2=[]
layers=get_ontology(aliases,pur_p,usr)
nms=[]
for dks in layers:
for dk1 in dks:
for dk in dk1:
nms.append(dk.name)
print 'GetLayers:(',aliases,',',pur_p,')->',(layers),'{',nms,'}'
#===============
#======== load ractionlines baseado no cenario escolhido => parametro de linha de comando purpose
print 'Collect ractionlines for(1):',pur_p,'-----'
ractionlines=mdOntology.mdAbstractBaseOntology ()
ractionlines.nodesER=rcts_impo
print 'Found:',len(ractionlines.nodesER),'-> Ready to start inference-engine:'
#===============
print 'OBJS:',aliases,' Result:',layers
if len(layers)>0:
print 'Start process of rct:-------------------------'
# doc
print 'Process layers:',len(layers)
for doc in layers:
#sente
print 'Process doc:',len(doc)
for sentece in doc:
#lines
print 'Process Sentence:',len(sentece)
ids=0
try:
for s in sentece:
ids+=1
#print s.name,'---',ids
print 'AK-sentence:[',sentece,']'
process_termo(sentece,usr,pur_p,ractionlines)
except Exception,ess1:
print 'Error process termo:',ess1
log.exception( '===========================' )
else:
process_termo([],usr,pur_p,ractionlines)
#===============
'''
print 'OBJS Rels:',aliases,' Result:',layers2
if len(layers2)>0:
print 'Start process of rct:-------------------------'
# doc
for doc in layers2:
#sente
for sentece in doc:
#lines
process_termo(sentece,usr,pur_p,ractionlines,True)
'''
def process_sentences2(usr,rcts_impo,layers_param):
#
purps=get_aliases_p( rcts_impo )
print 'Purps:',purps
for pur_p in purps:
aliases=get_aliases_ob ()
layers=[]
layers2=[]
#======== load ractionlines baseado no cenario escolhido => parametro de linha de comando purpose
print 'Collect ractionlines for(1):',pur_p,'-----'
ractionlines=mdOntology.mdAbstractBaseOntology ()
ractionlines.nodesER=rcts_impo
print 'Found:',len(ractionlines.nodesER),'-> Ready to start inference-engine:'
#===============
print 'OBJS:',aliases,' Result:',layers
if True :
try:
process_termo(layers_param,usr,pur_p,ractionlines)
except Exception,ess1:
log.exception( '===========================' )
print 'Error process termo:',ess1
class ProcessPgStack:
def __init_(self):
pass
def call_process(self,usr,rcts_impo):
process_sentences(usr,rcts_impo)
def call_process2(self,usr,rcts_impo,layers_param=[]):
process_sentences2(usr,rcts_impo,layers_param)
mdNeural.GlobalStack.proc_pg=ProcessPgStack ()
def process_pagest(layersc,purpose,usr,result_onto_tree_er,onto_basis,relactionate):
#try:
id=usr
if True:
#===
#try:
if True:
print 'LayersC-1:',layersc
ir=Identify.resume_process_datac(layersc,onto_basis,purpose,id,relactionate)
print 'LayersC-2:',layersc
print 'Process->resume_data()',ir
if ir[0] != None :
# procura identificador ---
fnd_ident=False
for es in ir[0].topicos:
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid') or ir[0].es_compare_dt(es,'realid2'):
fnd_ident=True
if not fnd_ident:
ind=len(result_onto_tree_er)-1
fond_cs=False
while ind >=0 and not fond_cs:
for es2 in result_onto_tree_er[ind].topicos:
if ir[0].es_compare_dt(es2,'identificador') or ir[0].es_compare_dt(es2,'realid') or ir[0].es_compare_dt(es2,'realid2'):
ir[0].set_topico_nr(es2)
fond_cs=True
break
ind-=1
# verificar se nao tem somente identificadores(elemento fact invalido)
oth=False
print 'RCT TPS:{',ir[0].topicos,'}'
indtotph=0
for es in ir[0].topicos:
indtotph+=1
print 'RCT TPS('+str(indtotph)+'):{',es.dt,'}'
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid')or ir[0].es_compare_dt(es,'realid2') :
pass
else:
oth=True
print 'OTH:',oth
def process_termost(layers,usr,pur_p,onto_basis,relactionate=False):
#=====================================
result_onto_tree_er=[] # caracteristicas,descricoes,etc...
process_pagest(layers,pur_p,usr,result_onto_tree_er,onto_basis,relactionate)
#========================================================================================================================================
print 'ER:',len(result_onto_tree_er)
for e in result_onto_tree_er:
print '----------------------------------------'
for t in e.topicos:
print t.dt
print '**************************'
for s in t.sinapses:
print s.nr.dt
print '**************************'
print '----------------------------------------'
def process_sentences_ST1(usr,layers,pur_p):
print 'Collect ractionlines for(2):',pur_p,'-----'
ractionlines=mdOntology.mdBaseOntology(usr,pur_p)
print 'Found:',len(ractionlines.nodesER)
process_termost(layers,usr,pur_p,ractionlines)
def Gentry(logid,action_def,purp):
sentence=''
usr=''
sessao=''
tmp=''
try:
fnds=False
purpose=''
#=============
#=============
usr=logid
start_c=0
sentence=action_def
purpose=purp
if True:
#print usr,purpose,sentence
sentence=sentence.replace('*','%')
print 'SemaIniParser:',sentence,'|',usr,'|',purpose
layer_proc=SemaIniParser.entry(sentence,usr,purpose)
layer_processes.lrs=layer_proc[0]
process_sentences_ST1(usr,layer_processes.lrs,purpose)
except:
log.exception( 'Error process sentences:' )
return usr
import time
startTT = time.clock ()
usuario=''
try:
mdLayout.dump_all_state=False
mdLayout.dump_all_state2=False
mdLayout.dump_all_state3=False
mdLayout.dump_all_state4=False
mdLayout.dump_all_state5=False
start_c=0
usr=0
usr=sys.argv[1]
#print usr
sentence=sys.argv[2]
#print sentence
purp=sys.argv[3]
#print purp
tp_n=sys.argv[4] # type of neural objects
#print 'Cmd line :',usr
mdNeural.self_usr=usr
mdNeural.type_coll=tp_n
if sentence.find('.') <=-1 and sentence.find('!') <=-1 and sentence.find('?') <=-1 and sentence.find(';') <=-1:
sentence=sentence+'.'
#print 'sentence:',sentence
usuario=Gentry(usr,sentence,purp)
except Exception,err:
log.exception( 'Error process sentences:' )
print 'End process.Time elapsed: ',time.clock () - startTT
| =range(0,50)
resultSet1=iterat.get_level(levs)
#sess=conn3.prepare(sql1)
#resultSet = sess.execute ()
ky,results = resultSet1.next()
while results :
DT=results[u"datach"]
TOP=results[u'topico']
ic2=ky
lev=results[u'LEV']
uuid=ky
if int(results[u'cnt']) <= cnti:
ky,results = resultSet1.next()
continue
#print 'Level 2(ind):',lev,TOP,DT
if int(lev) != 2:
break
#==
#print 'Level 2(dt) :',nrc.dt
nrc= lay.set_nr(DT)
nrc.uuid=cenar
nr.connect_to(lay,nrc,'Composicao')
ky,results = resultSet1.next()
results = read_dt_level( nrc,3,uid,ic2,lay,results,resultSet1,cenar ) #####
#lay.dump_layer_file()
| conditional_block |
SemaIndexerStage2.py | #coding: latin-1
''' inicializa o ambiente para captura de informacoes do clipping '''
''' ractionline p auto-ask(classify,relactionate,composition) ->auto-ask para classificar,relacionar,build objectos dentro dos purposes(ambientes) , as quais sao passadas por linha de comando, roda automaticamente '''
import base64
import calendar
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import urllib
import urllib2
import urlparse
import thread
from os import environ
from weakref import proxy
import umisc
import pycassa
from pycassa.pool import ConnectionPool
from pycassa import index
from pycassa.columnfamily import ColumnFamily
pool2 = ConnectionPool('MINDNET', ['79.143.185.3:9160'],timeout=10000)
import mdTb
mdTb.start_db(pool2)
import get_object
get_object.start_db(pool2)
import get_object2
get_object2.start_db(pool2)
import mdLayout
import mdER
import mdNeural
import Identify
import logging
import mdOntology
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('SemanticIndexer-Stage2')
# obs : extrair informacoes -> preview de informacoes->pagina para exibir o resultado (href), titulo ou foco principal como result, classificacao como categoria --> para busca web realtime(busca na ontologia semanti_objects2 ,nao clipping
'''
import conn
conn= conn.conn_mx
connTrace=conn
conn4=conn
conn5=conn
conn3=conn
'''
'''
conn= MySQLdb.connect(host='dbmy0023.whservidor.com', user='mindnet' , passwd='acc159753', db='mindnet')
connTrace= MySQLdb.connect(host='dbmy0032.whservidor.com', user='mindnet_2' , passwd='acc159753', db='mindnet_2')
conn4=MySQLdb.connect(host='dbmy0050.whservidor.com', user='mindnet_4' , passwd='acc159753', db='mindnet_4')
conn5= MySQLdb.connect(host='dbmy0035.whservidor.com', user='mindnet_3' , passwd='acc159753', db='mindnet_3')
conn3= MySQLdb.connect(host='dbmy0039.whservidor.com', user='mindnet_5' , passwd='acc159753', db='mindnet_5')
def config_conns(conn_):
cursor=conn_.cursor ()
cursor.execute('SET SESSION wait_timeout = 90000')
config_conns(conn)
config_conns(connTrace)
config_conns(conn4)
config_conns(conn3)
'''
def Identify_pre_process_data (l2,ln_o,onto_basis,purpose,id,t_h,ret_ps):
Identify.pre_process_data(l2,ln_o,onto_basis,purpose,id,t_h,ret_ps)
def start_job_sec(start,usr,path_j,th):
cmd='python '+path_j+' "'+usr+'" '+str(start)
#os.execv('/usr/bin/python',(path_j,str(start)) )
print 'Prepare cmd:',cmd
os.system(cmd)
th.finished=True
def mount_node(term,id,purpose):
l=Identify.prepare_layout(id,purpose)
allp=[]
onto=Identify.prepare_data_by_ask(l, term,id,purpose,allp )
return [onto,allp]
entry_doc =[]
#entry_doc = ['vectra é um bom carro com design inteligente, em composto por fibra de carbono restrito em carro de passeio ',' expectativas de carro com venda fortes','casa bom estado venda']
#entry('viagens melhores opções',['viagem para o nordeste é muito mais tranquilo, composto por translados mas restrito para criancas '])
tb_object = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT3')
tb_object_dt = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT3')
tb_relaction = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS3')
mdNeural.tb_object=tb_object
mdNeural.tb_object_dt=tb_object_dt
mdNeural.tb_relaction=tb_relaction
def get_object_by_data(obj,usr,uid):
#========
if uid != '':
resultSet=tb_object.get(uid)
key1=uid
else:
resultSet=tb_object.get(obj)
key1=uid
#
#
obj_nm=None
uid=None
cenar=0
cnts_all_tps=0
if True:
results = resultSet
obj_nm=results[u'objeto']
uid=key1
cenar=results[u'cenar']
cnts_all_tps=results[u'conts_n']
#-----------
lay=mdNeural.mdLayer ()
if obj_nm == None: obj_nm=obj
lay.name=obj_nm
tpc2=lay.set_topico('identificador')
tpc2.uuid=cenar
nrc2= lay.set_nr(lay.name)
nrc2.uuid=cenar
tpc2.connect_to(lay,nrc2,'Composicao')
#print lay.topicos,'....................'
print 'Read object(g1):',obj,' uid:',uid
#-----------
def cached_dt(objectkey,cnts_all_tp):
'''
cached=[]
keyc=objectkey
i=1
while i <= cnts_all_tp:
try:
c1=tb_object_dt.get(keyc+"|"+str(i))
cached.append([keyc+"|"+str(i),c1])
except: break
i+=1
'''
cached=[]
keyc=objectkey
i=1
keys=[]
while i <= int(cnts_all_tp):
keys.append( keyc+"|"+str(i) )
i+=1
i=0
if True:
try:
c1=tb_object_dt.multiget(keys,column_count=10000)
for kd in c1.items():
cached.append([kd[0],kd[1]])
i+=1
except Exception,e:
print 'ERROR:',e
pass
return cached
if True :
class iter_cd:
def dump(self):
fil=open("c:\\python24\\kk.txt","w")
for ky,s in self.arr:
s1=str(s)
fil.write(s1+'\n')
fil.close()
def __init__(self,arr1):
self.arr=arr1
self.start=-1
def get_level(self,level2):
rt=[]
arr=self.arr
for ky,cols in arr:
for level in level2:
if int(cols[u'LEV']) == level:
rt.append([ky,cols])
rt2=iter_cd(rt)
return rt2
def get_all(self):
rt=[]
while True:
s=self.next()
if s[0] == None: break
rt.append(s)
return rt
def next(self):
if self.start == -1:
self.start=0
else:
self.start+=1
if self.start < len(self.arr):
return self.arr[self.start]
else:
return [None,None]
rows=cached_dt(uid,cnts_all_tps)
iterat=iter_cd(rows)
#iterat.dump()
def read_dt_level( nr_top,level,uid1,ic1,lay1,results,resultSet,uuid):
while results :
DT=results[u"datach"]
TOP=results[u'topico']
ic1=uuid
lev=results[u'LEV']
#print 'READ(g2).top-init:',TOP,DT,'->',lev , level
if int(lev) != level:
return results
break
nrc= lay1.set_nr(DT)
nrc.uuid=uuid
#print 'READ(g2).top:',TOP,DT
#==
nr_top.connect_to(lay1,nrc,'Composicao')
ky,results = resultSet.next()
read_dt_level(nrc,(level+1),uid1,ic1,lay1,results,resultSet,uuid)
return results
#====================
#
resultSet =iterat.get_level([0,1]).get_all()
obj_nm=None
for ky,results in resultSet:
DT=results[u"datach"]
TOP=results[u'topico']
cnti=int(results[u'cnt'])
ic=ky
uuid=ky
nr= lay.set_nr(DT)
nr.uuid=cenar
if ic == None: ic=0
#tps=lay.get_topico(TOP)
#if tps == None:
# tps=lay.set_topico(TOP)
# ---
tps=lay.set_topico(TOP)
tps.uuid=cenar
#===
#print 'Set topico:',TOP,' for layer:',obj_nm,' uid:',uid,'tps.uid:',tps.uuid
# ---
tps.connect_to(lay,nr,'Composicao')
if True:
#==
#
levs=range(0,50)
resultSet1=iterat.get_level(levs)
#sess=conn3.prepare(sql1)
#resultSet = sess.execute ()
ky,results = resultSet1.next()
while results :
DT=results[u"datach"]
TOP=results[u'topico']
ic2=ky
lev=results[u'LEV']
uuid=ky
if int(results[u'cnt']) <= cnti:
ky,results = resultSet1.next()
continue
#print 'Level 2(ind):',lev,TOP,DT
if int(lev) != 2:
break
#==
#print 'Level 2(dt) :',nrc.dt
nrc= lay.set_nr(DT)
nrc.uuid=cenar
nr.connect_to(lay,nrc,'Composicao')
ky,results = resultSet1.next()
results = read_dt_level( nrc,3,uid,ic2,lay,results,resultSet1,cenar ) #####
#lay.dump_layer_file()
#print 'collected.layer.dump():============================================='
#lay.dump_layer_file()
return lay
def get_ontology(aliases,purposes,usr):
tree_h=[]
for alias in aliases:
if True:
tree_cen=[]
if True:
''' '''
if alias == '$$all$$': alias=''
#alias='%'+alias+'%'
#alias=alias+'%'
''' '''
h_tree_v=[]
#
resultSet=None
try:
resultSet=tb_object.get(alias)
except: pass
ky=alias
if resultSet:
results = resultSet
i=results[u'objeto']
uid=ky
cenar=results[u'cenar']
#===
print 'read-Obj-onto:[',alias,'] ',i, '->collects:',uid
#====
avaliable_objs=[]
#===--------------------------------------
obj_principal=get_object_by_data(i,usr,uid)
#if len(obj_principal.topicos) > 0 :
h_tree_v.append(obj_principal)
#==----------------------------------------
#break
if len(h_tree_v) > 0 :
tree_cen.append(h_tree_v)
if len(tree_cen) > 0 :
tree_h.append(tree_cen)
return tree_h
class thread_cntl:
def __init__(self):
self.finished=False
class Task_C:
def __init__(self,Dt1=None,Dt2=None):
self.dt1=Dt1
self.dt2=Dt2
#//2
def process_page(layersc,purpose,usr,result_onto_tree_er,onto_basis,relactionate):
#try:
id=usr
if True:
#===
#try:
if True:
#print 'LayersC:',layersc
try:
ir=Identify.resume_process_datac(layersc,onto_basis,purpose,id,relactionate)
except Exception,errc:
print 'Error resume_process_datac:',errc
log.exception("-------------------------")
print 'Process->resume_data()',ir
if ir[0] != None :
# procura identificador ---
fnd_ident=False
for es in ir[0].topicos:
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid') or ir[0].es_compare_dt(es,'realid2'):
fnd_ident=True
if not fnd_ident:
ind=len(result_onto_tree_er)-1
fond_cs=False
while ind >=0 and not fond_cs:
for es2 in result_onto_tree_er[ind].topicos:
if ir[0].es_compare_dt(es2,'identificador') or ir[0].es_compare_dt(es2,'realid') or ir[0].es_compare_dt(es2,'realid2'):
ir[0].set_topico_nr(es2)
fond_cs=True
break
ind-=1
# verificar se nao tem somente identificadores(elemento fact invalido)
oth=False
print 'RCT TPS:{',ir[0].topicos,'}'
indtotph=0
for es in ir[0].topicos:
indtotph+=1
print 'RCT TPS('+str(indtotph)+'):{',es.dt,'}'
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid')or ir[0].es_compare_dt(es,'realid2') :
pass
else:
oth=True
print 'OTH:',oth
class layer_processesC:
def __init__(self):
self.lrs=[]
import SemaIniParser
# global
layer_processes=layer_processesC ()
'''
obs: filtros/restricoes ou informacoes compl, devem ser lidos dos layer_processes, que contem as informacoes da sentenca passada
'''
def process_termo(layers,usr,pur_p,onto_basis,relactionate=False):
#=====================================
result_onto_tree_er=[] # caracteristicas,descricoes,etc...
print 'Inter -stack executing : '
for n in onto_basis.nodesER:
print n.name
print '======================================='
print '======================================='
print '======================================='
process_page(layers,pur_p,usr,result_onto_tree_er,onto_basis,relactionate)
#========================================================================================================================================
print 'ER---Final-Exec:',len(result_onto_tree_er)
for e in result_onto_tree_er:
print '----------------------------------------'
for t in e.topicos:
print t.dt
print '**************************'
for s in t.sinapses:
print s.nr.dt
print '**************************'
print '----------------------------------------'
print '======================================='
print '======================================='
print '======================================='
'''
->gravar na ontologia semantic_objects2, os qualificadores de definicao,composicao( achar objectos com destinacao(info-composicao), que contem as informacoes de propriedades )
->gravar na ontologia semantic_objects2, os qualificadores de estado( achar objectos com destinacao(info-state), que contem as informacoes de estado->links para cada momento das propriedades )
->gravar na ontologiao links_objetcts os qualificadores definidores de relacao , link ,conexao, etc.. ( achar objectos com destinacao(info-relaction), que contem as informacoes de relacoes )
->os qualificadores de filtro,foco informados na sentenca do clipping seram utilizados para o retorno no clipping
1 -setar foco nos objectos
2 -caracteristicas + links
3 -layout-out com interesses
4 -layout-code para extrair informacao
5 -inserir no clipping return
'''
def get_aliases_ob( ):
str_ret=[]
for ir in layer_processes.lrs:
print 'get_aliases_ob()->ir(1):',ir
#if ir != None: print 'lr:',ir.name
for topico in ir.topicos:
if len(topico.dt) > 0 :
topicodt=topico.dt
if 'identificador' in topicodt or 'realid' in topicodt or 'realid2' in topicodt or 'object' in topicodt:
dtk=''
for p in topico.sinapses:
for dts1 in p.nr.dt:
dtk+=' '+umisc.trim(dts1)
if umisc.trim(dtk) != '':
print 'Collect.element:',dtk
str_ret.append(umisc.trim(dtk) )
return str_ret
def get_aliases_p( rcts_impo ):
str_ret=[]
for ir in rcts_impo :
#print 'get_aliases_ob()->ir(2):',ir,ir.name
for topico in ir.topicos:
if len(topico.dt) > 0 :
topicodt=topico.dt
#print topicodt,'<<<'
if 'purpose' in topicodt or 'destination' in topicodt :
for p in topico.sinapses:
for dts1 in p.nr.dt:
str_ret.append(dts1)
return str_ret
def process_sentences(usr,rcts_impo):
#
purps=get_aliases_p( rcts_impo )
print 'Purps:',purps
for pur_p in purps:
aliases=get_aliases_ob ()
layers=[]
layers2=[]
layers=get_ontology(aliases,pur_p,usr)
nms=[]
for dks in layers:
for dk1 in dks:
for dk in dk1:
nms.append(dk.name)
print 'GetLayers:(',aliases,',',pur_p,')->',(layers),'{',nms,'}'
#===============
#======== load ractionlines baseado no cenario escolhido => parametro de linha de comando purpose
print 'Collect ractionlines for(1):',pur_p,'-----'
ractionlines=mdOntology.mdAbstractBaseOntology ()
ractionlines.nodesER=rcts_impo
print 'Found:',len(ractionlines.nodesER),'-> Ready to start inference-engine:'
#===============
print 'OBJS:',aliases,' Result:',layers
if len(layers)>0:
print 'Start process of rct:-------------------------'
# doc
print 'Process layers:',len(layers)
for doc in layers:
#sente
print 'Process doc:',len(doc)
for sentece in doc:
#lines
print 'Process Sentence:',len(sentece)
ids=0
try:
for s in sentece:
ids+=1
#print s.name,'---',ids
print 'AK-sentence:[',sentece,']'
process_termo(sentece,usr,pur_p,ractionlines)
except Exception,ess1:
print 'Error process termo:',ess1
log.exception( '===========================' )
else:
process_termo([],usr,pur_p,ractionlines)
#===============
'''
print 'OBJS Rels:',aliases,' Result:',layers2
if len(layers2)>0:
print 'Start process of rct:-------------------------'
# doc
for doc in layers2:
#sente
for sentece in doc:
#lines
process_termo(sentece,usr,pur_p,ractionlines,True)
'''
def process_sentences2(usr,rcts_impo,layers_param):
#
purps=get_aliases_p( rcts_impo )
print 'Purps:',purps
for pur_p in purps:
aliases=get_aliases_ob ()
layers=[]
layers2=[]
#======== load ractionlines baseado no cenario escolhido => parametro de linha de comando purpose
print 'Collect ractionlines for(1):',pur_p,'-----'
ractionlines=mdOntology.mdAbstractBaseOntology ()
ractionlines.nodesER=rcts_impo
print 'Found:',len(ractionlines.nodesER),'-> Ready to start inference-engine:'
#===============
print 'OBJS:',aliases,' Result:',layers
if True :
try:
process_termo(layers_param,usr,pur_p,ractionlines)
except Exception,ess1:
log.exception( '===========================' )
print 'Error process termo:',ess1
class ProcessPgStack:
def __init_(self):
pass
def call_process(self,usr,rcts_impo):
process_sentences(usr,rcts_impo)
def call_process2(self,usr,rcts_impo,layers_param=[]):
process_sentences2(usr,rcts_impo,layers_param)
mdNeural.GlobalStack.proc_pg=ProcessPgStack ()
def process_pagest(layersc,purpose,usr,result_onto_tree_er,onto_basis,relactionate):
#try:
id=usr
if True:
#===
#try:
if True:
print 'LayersC-1:',layersc
ir=Identify.resume_process_datac(layersc,onto_basis,purpose,id,relactionate)
print 'LayersC-2:',layersc
print 'Process->resume_data()',ir
if ir[0] != None :
# procura identificador ---
fnd_ident=False
for es in ir[0].topicos:
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid') or ir[0].es_compare_dt(es,'realid2'):
fnd_ident=True
if not fnd_ident:
ind=len(result_onto_tree_er)-1
fond_cs=False
while ind >=0 and not fond_cs:
for es2 in result_onto_tree_er[ind].topicos:
if ir[0].es_compare_dt(es2,'identificador') or ir[0].es_compare_dt(es2,'realid') or ir[0].es_compare_dt(es2,'realid2'):
ir[0].set_topico_nr(es2)
fond_cs=True
break
ind-=1
# verificar se nao tem somente identificadores(elemento fact invalido)
oth=False
print 'RCT TPS:{',ir[0].topicos,'}'
indtotph=0
for es in ir[0].topicos:
indtotph+=1
print 'RCT TPS('+str(indtotph)+'):{',es.dt,'}'
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid')or ir[0].es_compare_dt(es,'realid2') :
pass
else:
oth=True
print 'OTH:',oth
def process_termost(layers,usr,pur_p,onto_basis,relactionate=False):
#=====================================
result_onto_tree_er=[] # caracteristicas,descricoes,etc...
process_pagest(layers,pur_p,usr,result_onto_tree_er,onto_basis,relactionate)
#========================================================================================================================================
print 'ER:',len(result_onto_tree_er)
for e in result_onto_tree_er:
print '----------------------------------------'
for t in e.topicos:
print t.dt
print '**************************'
for s in t.sinapses:
print s.nr.dt
print '**************************'
print '----------------------------------------'
def process_sentences_ST1(usr,layers,pur_p):
print 'Collect ractionlines for(2):',pur_p,'-----'
ractionlines=mdOntology.mdBaseOntology(usr,pur_p)
print 'Found:',len(ractionlines.nodesER)
process_termost(layers,usr,pur_p,ractionlines)
def Gentry(logid,action_def,purp):
sentence=''
usr=''
sessao=''
tmp=''
try:
fnds=False
purpose=''
#=============
#=============
usr=logid
start_c=0
sentence=action_def
purpose=purp
if True:
#print usr,purpose,sentence
sentence=sentence.replace('*','%')
print 'SemaIniParser:',sentence,'|',usr,'|',purpose
layer_proc=SemaIniParser.entry(sentence,usr,purpose)
layer_processes.lrs=layer_proc[0]
process_sentences_ST1(usr,layer_processes.lrs,purpose)
except:
log.exception( 'Error process sentences:' )
return usr
import time
startTT = time.clock ()
usuario=''
try:
mdLayout.dump_all_state=False
mdLayout.dump_all_state2=False
mdLayout.dump_all_state3=False
mdLayout.dump_all_state4=False
mdLayout.dump_all_state5=False
start_c=0
usr=0
usr=sys.argv[1]
#print usr
sentence=sys.argv[2]
#print sentence
| purp=sys.argv[3]
#print purp
tp_n=sys.argv[4] # type of neural objects
#print 'Cmd line :',usr
mdNeural.self_usr=usr
mdNeural.type_coll=tp_n
if sentence.find('.') <=-1 and sentence.find('!') <=-1 and sentence.find('?') <=-1 and sentence.find(';') <=-1:
sentence=sentence+'.'
#print 'sentence:',sentence
usuario=Gentry(usr,sentence,purp)
except Exception,err:
log.exception( 'Error process sentences:' )
print 'End process.Time elapsed: ',time.clock () - startTT | random_line_split | |
SemaIndexerStage2.py | #coding: latin-1
''' inicializa o ambiente para captura de informacoes do clipping '''
''' ractionline p auto-ask(classify,relactionate,composition) ->auto-ask para classificar,relacionar,build objectos dentro dos purposes(ambientes) , as quais sao passadas por linha de comando, roda automaticamente '''
import base64
import calendar
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import urllib
import urllib2
import urlparse
import thread
from os import environ
from weakref import proxy
import umisc
import pycassa
from pycassa.pool import ConnectionPool
from pycassa import index
from pycassa.columnfamily import ColumnFamily
pool2 = ConnectionPool('MINDNET', ['79.143.185.3:9160'],timeout=10000)
import mdTb
mdTb.start_db(pool2)
import get_object
get_object.start_db(pool2)
import get_object2
get_object2.start_db(pool2)
import mdLayout
import mdER
import mdNeural
import Identify
import logging
import mdOntology
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('SemanticIndexer-Stage2')
# obs : extrair informacoes -> preview de informacoes->pagina para exibir o resultado (href), titulo ou foco principal como result, classificacao como categoria --> para busca web realtime(busca na ontologia semanti_objects2 ,nao clipping
'''
import conn
conn= conn.conn_mx
connTrace=conn
conn4=conn
conn5=conn
conn3=conn
'''
'''
conn= MySQLdb.connect(host='dbmy0023.whservidor.com', user='mindnet' , passwd='acc159753', db='mindnet')
connTrace= MySQLdb.connect(host='dbmy0032.whservidor.com', user='mindnet_2' , passwd='acc159753', db='mindnet_2')
conn4=MySQLdb.connect(host='dbmy0050.whservidor.com', user='mindnet_4' , passwd='acc159753', db='mindnet_4')
conn5= MySQLdb.connect(host='dbmy0035.whservidor.com', user='mindnet_3' , passwd='acc159753', db='mindnet_3')
conn3= MySQLdb.connect(host='dbmy0039.whservidor.com', user='mindnet_5' , passwd='acc159753', db='mindnet_5')
def config_conns(conn_):
cursor=conn_.cursor ()
cursor.execute('SET SESSION wait_timeout = 90000')
config_conns(conn)
config_conns(connTrace)
config_conns(conn4)
config_conns(conn3)
'''
def Identify_pre_process_data (l2,ln_o,onto_basis,purpose,id,t_h,ret_ps):
Identify.pre_process_data(l2,ln_o,onto_basis,purpose,id,t_h,ret_ps)
def start_job_sec(start,usr,path_j,th):
cmd='python '+path_j+' "'+usr+'" '+str(start)
#os.execv('/usr/bin/python',(path_j,str(start)) )
print 'Prepare cmd:',cmd
os.system(cmd)
th.finished=True
def mount_node(term,id,purpose):
l=Identify.prepare_layout(id,purpose)
allp=[]
onto=Identify.prepare_data_by_ask(l, term,id,purpose,allp )
return [onto,allp]
entry_doc =[]
#entry_doc = ['vectra é um bom carro com design inteligente, em composto por fibra de carbono restrito em carro de passeio ',' expectativas de carro com venda fortes','casa bom estado venda']
#entry('viagens melhores opções',['viagem para o nordeste é muito mais tranquilo, composto por translados mas restrito para criancas '])
tb_object = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT3')
tb_object_dt = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT3')
tb_relaction = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS3')
mdNeural.tb_object=tb_object
mdNeural.tb_object_dt=tb_object_dt
mdNeural.tb_relaction=tb_relaction
def get_object_by_data(obj,usr,uid):
#========
if uid != '':
resultSet=tb_object.get(uid)
key1=uid
else:
resultSet=tb_object.get(obj)
key1=uid
#
#
obj_nm=None
uid=None
cenar=0
cnts_all_tps=0
if True:
results = resultSet
obj_nm=results[u'objeto']
uid=key1
cenar=results[u'cenar']
cnts_all_tps=results[u'conts_n']
#-----------
lay=mdNeural.mdLayer ()
if obj_nm == None: obj_nm=obj
lay.name=obj_nm
tpc2=lay.set_topico('identificador')
tpc2.uuid=cenar
nrc2= lay.set_nr(lay.name)
nrc2.uuid=cenar
tpc2.connect_to(lay,nrc2,'Composicao')
#print lay.topicos,'....................'
print 'Read object(g1):',obj,' uid:',uid
#-----------
def cached_dt(objectkey,cnts_all_tp):
'''
cached=[]
keyc=objectkey
i=1
while i <= cnts_all_tp:
try:
c1=tb_object_dt.get(keyc+"|"+str(i))
cached.append([keyc+"|"+str(i),c1])
except: break
i+=1
'''
cached=[]
keyc=objectkey
i=1
keys=[]
while i <= int(cnts_all_tp):
keys.append( keyc+"|"+str(i) )
i+=1
i=0
if True:
try:
c1=tb_object_dt.multiget(keys,column_count=10000)
for kd in c1.items():
cached.append([kd[0],kd[1]])
i+=1
except Exception,e:
print 'ERROR:',e
pass
return cached
if True :
class iter_cd:
def dump(self):
fil=open("c:\\python24\\kk.txt","w")
for ky,s in self.arr:
s1=str(s)
fil.write(s1+'\n')
fil.close()
def __init__(self,arr1):
self.arr=arr1
self.start=-1
def get_level(self,level2):
rt=[ | def get_all(self):
rt=[]
while True:
s=self.next()
if s[0] == None: break
rt.append(s)
return rt
def next(self):
if self.start == -1:
self.start=0
else:
self.start+=1
if self.start < len(self.arr):
return self.arr[self.start]
else:
return [None,None]
rows=cached_dt(uid,cnts_all_tps)
iterat=iter_cd(rows)
#iterat.dump()
def read_dt_level( nr_top,level,uid1,ic1,lay1,results,resultSet,uuid):
while results :
DT=results[u"datach"]
TOP=results[u'topico']
ic1=uuid
lev=results[u'LEV']
#print 'READ(g2).top-init:',TOP,DT,'->',lev , level
if int(lev) != level:
return results
break
nrc= lay1.set_nr(DT)
nrc.uuid=uuid
#print 'READ(g2).top:',TOP,DT
#==
nr_top.connect_to(lay1,nrc,'Composicao')
ky,results = resultSet.next()
read_dt_level(nrc,(level+1),uid1,ic1,lay1,results,resultSet,uuid)
return results
#====================
#
resultSet =iterat.get_level([0,1]).get_all()
obj_nm=None
for ky,results in resultSet:
DT=results[u"datach"]
TOP=results[u'topico']
cnti=int(results[u'cnt'])
ic=ky
uuid=ky
nr= lay.set_nr(DT)
nr.uuid=cenar
if ic == None: ic=0
#tps=lay.get_topico(TOP)
#if tps == None:
# tps=lay.set_topico(TOP)
# ---
tps=lay.set_topico(TOP)
tps.uuid=cenar
#===
#print 'Set topico:',TOP,' for layer:',obj_nm,' uid:',uid,'tps.uid:',tps.uuid
# ---
tps.connect_to(lay,nr,'Composicao')
if True:
#==
#
levs=range(0,50)
resultSet1=iterat.get_level(levs)
#sess=conn3.prepare(sql1)
#resultSet = sess.execute ()
ky,results = resultSet1.next()
while results :
DT=results[u"datach"]
TOP=results[u'topico']
ic2=ky
lev=results[u'LEV']
uuid=ky
if int(results[u'cnt']) <= cnti:
ky,results = resultSet1.next()
continue
#print 'Level 2(ind):',lev,TOP,DT
if int(lev) != 2:
break
#==
#print 'Level 2(dt) :',nrc.dt
nrc= lay.set_nr(DT)
nrc.uuid=cenar
nr.connect_to(lay,nrc,'Composicao')
ky,results = resultSet1.next()
results = read_dt_level( nrc,3,uid,ic2,lay,results,resultSet1,cenar ) #####
#lay.dump_layer_file()
#print 'collected.layer.dump():============================================='
#lay.dump_layer_file()
return lay
def get_ontology(aliases,purposes,usr):
tree_h=[]
for alias in aliases:
if True:
tree_cen=[]
if True:
''' '''
if alias == '$$all$$': alias=''
#alias='%'+alias+'%'
#alias=alias+'%'
''' '''
h_tree_v=[]
#
resultSet=None
try:
resultSet=tb_object.get(alias)
except: pass
ky=alias
if resultSet:
results = resultSet
i=results[u'objeto']
uid=ky
cenar=results[u'cenar']
#===
print 'read-Obj-onto:[',alias,'] ',i, '->collects:',uid
#====
avaliable_objs=[]
#===--------------------------------------
obj_principal=get_object_by_data(i,usr,uid)
#if len(obj_principal.topicos) > 0 :
h_tree_v.append(obj_principal)
#==----------------------------------------
#break
if len(h_tree_v) > 0 :
tree_cen.append(h_tree_v)
if len(tree_cen) > 0 :
tree_h.append(tree_cen)
return tree_h
class thread_cntl:
def __init__(self):
self.finished=False
class Task_C:
def __init__(self,Dt1=None,Dt2=None):
self.dt1=Dt1
self.dt2=Dt2
#//2
def process_page(layersc,purpose,usr,result_onto_tree_er,onto_basis,relactionate):
#try:
id=usr
if True:
#===
#try:
if True:
#print 'LayersC:',layersc
try:
ir=Identify.resume_process_datac(layersc,onto_basis,purpose,id,relactionate)
except Exception,errc:
print 'Error resume_process_datac:',errc
log.exception("-------------------------")
print 'Process->resume_data()',ir
if ir[0] != None :
# procura identificador ---
fnd_ident=False
for es in ir[0].topicos:
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid') or ir[0].es_compare_dt(es,'realid2'):
fnd_ident=True
if not fnd_ident:
ind=len(result_onto_tree_er)-1
fond_cs=False
while ind >=0 and not fond_cs:
for es2 in result_onto_tree_er[ind].topicos:
if ir[0].es_compare_dt(es2,'identificador') or ir[0].es_compare_dt(es2,'realid') or ir[0].es_compare_dt(es2,'realid2'):
ir[0].set_topico_nr(es2)
fond_cs=True
break
ind-=1
# verificar se nao tem somente identificadores(elemento fact invalido)
oth=False
print 'RCT TPS:{',ir[0].topicos,'}'
indtotph=0
for es in ir[0].topicos:
indtotph+=1
print 'RCT TPS('+str(indtotph)+'):{',es.dt,'}'
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid')or ir[0].es_compare_dt(es,'realid2') :
pass
else:
oth=True
print 'OTH:',oth
class layer_processesC:
def __init__(self):
self.lrs=[]
import SemaIniParser
# global
layer_processes=layer_processesC ()
'''
obs: filtros/restricoes ou informacoes compl, devem ser lidos dos layer_processes, que contem as informacoes da sentenca passada
'''
def process_termo(layers,usr,pur_p,onto_basis,relactionate=False):
#=====================================
result_onto_tree_er=[] # caracteristicas,descricoes,etc...
print 'Inter -stack executing : '
for n in onto_basis.nodesER:
print n.name
print '======================================='
print '======================================='
print '======================================='
process_page(layers,pur_p,usr,result_onto_tree_er,onto_basis,relactionate)
#========================================================================================================================================
print 'ER---Final-Exec:',len(result_onto_tree_er)
for e in result_onto_tree_er:
print '----------------------------------------'
for t in e.topicos:
print t.dt
print '**************************'
for s in t.sinapses:
print s.nr.dt
print '**************************'
print '----------------------------------------'
print '======================================='
print '======================================='
print '======================================='
'''
->gravar na ontologia semantic_objects2, os qualificadores de definicao,composicao( achar objectos com destinacao(info-composicao), que contem as informacoes de propriedades )
->gravar na ontologia semantic_objects2, os qualificadores de estado( achar objectos com destinacao(info-state), que contem as informacoes de estado->links para cada momento das propriedades )
->gravar na ontologiao links_objetcts os qualificadores definidores de relacao , link ,conexao, etc.. ( achar objectos com destinacao(info-relaction), que contem as informacoes de relacoes )
->os qualificadores de filtro,foco informados na sentenca do clipping seram utilizados para o retorno no clipping
1 -setar foco nos objectos
2 -caracteristicas + links
3 -layout-out com interesses
4 -layout-code para extrair informacao
5 -inserir no clipping return
'''
def get_aliases_ob( ):
str_ret=[]
for ir in layer_processes.lrs:
print 'get_aliases_ob()->ir(1):',ir
#if ir != None: print 'lr:',ir.name
for topico in ir.topicos:
if len(topico.dt) > 0 :
topicodt=topico.dt
if 'identificador' in topicodt or 'realid' in topicodt or 'realid2' in topicodt or 'object' in topicodt:
dtk=''
for p in topico.sinapses:
for dts1 in p.nr.dt:
dtk+=' '+umisc.trim(dts1)
if umisc.trim(dtk) != '':
print 'Collect.element:',dtk
str_ret.append(umisc.trim(dtk) )
return str_ret
def get_aliases_p( rcts_impo ):
str_ret=[]
for ir in rcts_impo :
#print 'get_aliases_ob()->ir(2):',ir,ir.name
for topico in ir.topicos:
if len(topico.dt) > 0 :
topicodt=topico.dt
#print topicodt,'<<<'
if 'purpose' in topicodt or 'destination' in topicodt :
for p in topico.sinapses:
for dts1 in p.nr.dt:
str_ret.append(dts1)
return str_ret
def process_sentences(usr,rcts_impo):
#
purps=get_aliases_p( rcts_impo )
print 'Purps:',purps
for pur_p in purps:
aliases=get_aliases_ob ()
layers=[]
layers2=[]
layers=get_ontology(aliases,pur_p,usr)
nms=[]
for dks in layers:
for dk1 in dks:
for dk in dk1:
nms.append(dk.name)
print 'GetLayers:(',aliases,',',pur_p,')->',(layers),'{',nms,'}'
#===============
#======== load ractionlines baseado no cenario escolhido => parametro de linha de comando purpose
print 'Collect ractionlines for(1):',pur_p,'-----'
ractionlines=mdOntology.mdAbstractBaseOntology ()
ractionlines.nodesER=rcts_impo
print 'Found:',len(ractionlines.nodesER),'-> Ready to start inference-engine:'
#===============
print 'OBJS:',aliases,' Result:',layers
if len(layers)>0:
print 'Start process of rct:-------------------------'
# doc
print 'Process layers:',len(layers)
for doc in layers:
#sente
print 'Process doc:',len(doc)
for sentece in doc:
#lines
print 'Process Sentence:',len(sentece)
ids=0
try:
for s in sentece:
ids+=1
#print s.name,'---',ids
print 'AK-sentence:[',sentece,']'
process_termo(sentece,usr,pur_p,ractionlines)
except Exception,ess1:
print 'Error process termo:',ess1
log.exception( '===========================' )
else:
process_termo([],usr,pur_p,ractionlines)
#===============
'''
print 'OBJS Rels:',aliases,' Result:',layers2
if len(layers2)>0:
print 'Start process of rct:-------------------------'
# doc
for doc in layers2:
#sente
for sentece in doc:
#lines
process_termo(sentece,usr,pur_p,ractionlines,True)
'''
def process_sentences2(usr,rcts_impo,layers_param):
#
purps=get_aliases_p( rcts_impo )
print 'Purps:',purps
for pur_p in purps:
aliases=get_aliases_ob ()
layers=[]
layers2=[]
#======== load ractionlines baseado no cenario escolhido => parametro de linha de comando purpose
print 'Collect ractionlines for(1):',pur_p,'-----'
ractionlines=mdOntology.mdAbstractBaseOntology ()
ractionlines.nodesER=rcts_impo
print 'Found:',len(ractionlines.nodesER),'-> Ready to start inference-engine:'
#===============
print 'OBJS:',aliases,' Result:',layers
if True :
try:
process_termo(layers_param,usr,pur_p,ractionlines)
except Exception,ess1:
log.exception( '===========================' )
print 'Error process termo:',ess1
class ProcessPgStack:
def __init_(self):
pass
def call_process(self,usr,rcts_impo):
process_sentences(usr,rcts_impo)
def call_process2(self,usr,rcts_impo,layers_param=[]):
process_sentences2(usr,rcts_impo,layers_param)
mdNeural.GlobalStack.proc_pg=ProcessPgStack ()
def process_pagest(layersc,purpose,usr,result_onto_tree_er,onto_basis,relactionate):
#try:
id=usr
if True:
#===
#try:
if True:
print 'LayersC-1:',layersc
ir=Identify.resume_process_datac(layersc,onto_basis,purpose,id,relactionate)
print 'LayersC-2:',layersc
print 'Process->resume_data()',ir
if ir[0] != None :
# procura identificador ---
fnd_ident=False
for es in ir[0].topicos:
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid') or ir[0].es_compare_dt(es,'realid2'):
fnd_ident=True
if not fnd_ident:
ind=len(result_onto_tree_er)-1
fond_cs=False
while ind >=0 and not fond_cs:
for es2 in result_onto_tree_er[ind].topicos:
if ir[0].es_compare_dt(es2,'identificador') or ir[0].es_compare_dt(es2,'realid') or ir[0].es_compare_dt(es2,'realid2'):
ir[0].set_topico_nr(es2)
fond_cs=True
break
ind-=1
# verificar se nao tem somente identificadores(elemento fact invalido)
oth=False
print 'RCT TPS:{',ir[0].topicos,'}'
indtotph=0
for es in ir[0].topicos:
indtotph+=1
print 'RCT TPS('+str(indtotph)+'):{',es.dt,'}'
if ir[0].es_compare_dt(es,'identificador') or ir[0].es_compare_dt(es,'realid')or ir[0].es_compare_dt(es,'realid2') :
pass
else:
oth=True
print 'OTH:',oth
def process_termost(layers,usr,pur_p,onto_basis,relactionate=False):
#=====================================
result_onto_tree_er=[] # caracteristicas,descricoes,etc...
process_pagest(layers,pur_p,usr,result_onto_tree_er,onto_basis,relactionate)
#========================================================================================================================================
print 'ER:',len(result_onto_tree_er)
for e in result_onto_tree_er:
print '----------------------------------------'
for t in e.topicos:
print t.dt
print '**************************'
for s in t.sinapses:
print s.nr.dt
print '**************************'
print '----------------------------------------'
def process_sentences_ST1(usr,layers,pur_p):
print 'Collect ractionlines for(2):',pur_p,'-----'
ractionlines=mdOntology.mdBaseOntology(usr,pur_p)
print 'Found:',len(ractionlines.nodesER)
process_termost(layers,usr,pur_p,ractionlines)
def Gentry(logid,action_def,purp):
sentence=''
usr=''
sessao=''
tmp=''
try:
fnds=False
purpose=''
#=============
#=============
usr=logid
start_c=0
sentence=action_def
purpose=purp
if True:
#print usr,purpose,sentence
sentence=sentence.replace('*','%')
print 'SemaIniParser:',sentence,'|',usr,'|',purpose
layer_proc=SemaIniParser.entry(sentence,usr,purpose)
layer_processes.lrs=layer_proc[0]
process_sentences_ST1(usr,layer_processes.lrs,purpose)
except:
log.exception( 'Error process sentences:' )
return usr
import time
startTT = time.clock ()
usuario=''
try:
mdLayout.dump_all_state=False
mdLayout.dump_all_state2=False
mdLayout.dump_all_state3=False
mdLayout.dump_all_state4=False
mdLayout.dump_all_state5=False
start_c=0
usr=0
usr=sys.argv[1]
#print usr
sentence=sys.argv[2]
#print sentence
purp=sys.argv[3]
#print purp
tp_n=sys.argv[4] # type of neural objects
#print 'Cmd line :',usr
mdNeural.self_usr=usr
mdNeural.type_coll=tp_n
if sentence.find('.') <=-1 and sentence.find('!') <=-1 and sentence.find('?') <=-1 and sentence.find(';') <=-1:
sentence=sentence+'.'
#print 'sentence:',sentence
usuario=Gentry(usr,sentence,purp)
except Exception,err:
log.exception( 'Error process sentences:' )
print 'End process.Time elapsed: ',time.clock () - startTT
| ]
arr=self.arr
for ky,cols in arr:
for level in level2:
if int(cols[u'LEV']) == level:
rt.append([ky,cols])
rt2=iter_cd(rt)
return rt2
| identifier_body |
HaaLimitsHMass.py | import os
import sys
import logging
import itertools
import numpy as np
import argparse
import math
import errno
import array
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch()
import CombineLimits.Limits.Models as Models
from CombineLimits.Limits.Limits import Limits
from CombineLimits.HaaLimits.HaaLimits import HaaLimits
from CombineLimits.Limits.utilities import *
class HaaLimitsHMass(HaaLimits):
| '''
Create the Haa Limits workspace
'''
SPLINENAME = 'sig{a}'
XRANGE = [50,1000]
XLABEL = 'm_{#mu#mu#tau_{#mu}#tau_{h}}'
def __init__(self,histMap):
'''
Required arguments:
histMap = histogram map. the structure should be:
histMap[region][shift][process] = ROOT.TH1()
where:
region : 'PP' or 'FP' for regions A and B, respectively
shift : '', 'shiftName', 'shiftNameUp', or 'shiftNameDown'
'' : central value
'shiftName' : a symmetric shift (ie, jet resolution)
'shiftName[Up,Down]' : an asymmetric shift (ie, fake rate, lepton efficiencies, etc)
shiftName : the name the uncertainty will be given in the datacard
process : the name of the process
signal must be of the form 'HToAAH{h}A{a}'
data = 'data'
background = 'datadriven'
'''
super(HaaLimitsHMass,self).__init__(histMap)
self.plotDir = 'figures/HaaLimitsHMass'
python_mkdir(self.plotDir)
###########################
### Workspace utilities ###
###########################
def initializeWorkspace(self):
self.addX(*self.XRANGE,unit='GeV',label=self.XLABEL)
self.addMH(*self.XRANGE,unit='GeV',label='m_{h}')
def buildModel(self,region='PP',**kwargs):
tag = kwargs.pop('tag',region)
# continuum background
cont = Models.Chebychev('cont',
order = 2,
p0 = [-1,-1.4,0],
p1 = [0.25,0,0.5],
p2 = [0.03,-1,1],
)
nameC = 'cont{}'.format('_'+tag if tag else '')
cont.build(self.workspace,nameC)
cont1 = Models.Exponential('cont1',
lamb = [-0.20,-1,0],
)
nameC1 = 'cont1{}'.format('_'+tag if tag else '')
cont1.build(self.workspace,nameC1)
cont2 = Models.Exponential('cont2',
lamb = [-0.05,-1,0],
)
nameC2 = 'cont2{}'.format('_'+tag if tag else '')
cont2.build(self.workspace,nameC2)
cont3 = Models.Exponential('cont3',
lamb = [-0.75,-5,0],
)
nameC3 = 'cont3{}'.format('_'+tag if tag else '')
cont3.build(self.workspace,nameC3)
cont4 = Models.Exponential('cont4',
lamb = [-2,-5,0],
)
nameC4 = 'cont4{}'.format('_'+tag if tag else '')
cont4.build(self.workspace,nameC4)
#cont = Models.Sum('cont',
# **{
# nameC1 : [0.95,0,1],
# nameC2 : [0.05,0,1],
# 'recursive' : True,
# }
#)
#nameC = 'cont{}'.format('_'+tag if tag else '')
#cont.build(self.workspace,nameC)
# sum
bg = Models.Sum('bg',
**{
#nameC : [0.1,0,1],
nameC1: [0.5,0,1],
nameC2: [0.7,0,1],
'recursive' : True,
}
)
name = 'bg_{}'.format(region)
bg.build(self.workspace,name)
def buildSpline(self,a,region='PP',shift=''):
'''
Get the signal spline for a given Higgs mass.
Required arguments:
h = higgs mass
'''
histMap = self.histMap[region][shift]
tag= '{}{}'.format(region,'_'+shift if shift else '')
# initial fit
results = {}
errors = {}
results[a] = {}
errors[a] = {}
for h in self.HMASSES:
ws = ROOT.RooWorkspace('sig')
ws.factory('x[{0}, {1}]'.format(*self.XRANGE))
ws.var('x').setUnit('GeV')
ws.var('x').setPlotLabel(self.XLABEL)
ws.var('x').SetTitle(self.XLABEL)
model = Models.Voigtian('sig',
mean = [h,0,1000],
width = [0.1*h,0,0.5*h],
sigma = [0.1*h,0,0.5*h],
)
model.build(ws, 'sig')
hist = histMap[self.SIGNAME.format(h=h,a=a)]
results[a][h], errors[a][h] = model.fit(ws, hist, 'h{}_a{}_{}'.format(h,a,tag), saveDir=self.plotDir, save=True, doErrors=True)
models = {
'mean' : Models.Chebychev('mean', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
'width': Models.Chebychev('width', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
'sigma': Models.Chebychev('sigma', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
}
for param in ['mean', 'width', 'sigma']:
ws = ROOT.RooWorkspace(param)
ws.factory('x[{},{}]'.format(*self.XRANGE))
ws.var('x').setUnit('GeV')
ws.var('x').setPlotLabel(self.XLABEL)
ws.var('x').SetTitle(self.XLABEL)
model = models[param]
model.build(ws, param)
name = '{}_{}{}'.format(param,a,tag)
bins = [50,200,400,1000]
hist = ROOT.TH1D(name, name, len(bins)-1, array.array('d',bins))
vals = [results[a][h]['{}_h{}_a{}_{}'.format(param,h,a,tag)] for h in self.HMASSES]
errs = [errors[a][h]['{}_h{}_a{}_{}'.format(param,h,a,tag)] for h in self.HMASSES]
for i,h in enumerate(self.HMASSES):
b = hist.FindBin(h)
hist.SetBinContent(b,vals[i])
hist.SetBinError(b,errs[i])
model.fit(ws, hist, name, saveDir=self.plotDir, save=True)
# create model
for h in self.HMASSES:
print h, a, results[a][h]
model = Models.VoigtianSpline(self.SPLINENAME.format(a=a),
**{
'masses' : self.HMASSES,
'means' : [results[a][h]['mean_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
'widths' : [results[a][h]['width_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
'sigmas' : [results[a][h]['sigma_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
}
)
integrals = [histMap[self.SIGNAME.format(h=h,a=a)].Integral() for h in self.HMASSES]
model.setIntegral(self.HMASSES,integrals)
model.build(self.workspace,'{}_{}'.format(self.SPLINENAME.format(a=a),tag))
model.buildIntegral(self.workspace,'integral_{}_{}'.format(self.SPLINENAME.format(a=a),tag))
def fitBackground(self,region='PP',shift=''):
model = self.workspace.pdf('bg_{}'.format(region))
name = 'data_prefit_{}{}'.format(region,'_'+shift if shift else '')
data = ROOT.RooDataHist(name,name,ROOT.RooArgList(self.workspace.var('x')),self.histMap[region][shift]['dataNoSig'])
fr = model.fitTo(data,ROOT.RooFit.Save(),ROOT.RooFit.SumW2Error(True))
xFrame = self.workspace.var('x').frame()
data.plotOn(xFrame)
# continuum
model.plotOn(xFrame,ROOT.RooFit.Components('cont1_{}'.format(region)),ROOT.RooFit.LineStyle(ROOT.kDashed))
model.plotOn(xFrame,ROOT.RooFit.Components('cont2_{}'.format(region)),ROOT.RooFit.LineStyle(ROOT.kDashed))
# combined model
model.plotOn(xFrame)
canvas = ROOT.TCanvas('c','c',800,800)
xFrame.Draw()
#canvas.SetLogy()
canvas.Print('{}/model_fit_{}{}.png'.format(self.plotDir,region,'_'+shift if shift else ''))
pars = fr.floatParsFinal()
vals = {}
errs = {}
for p in range(pars.getSize()):
vals[pars.at(p).GetName()] = pars.at(p).getValV()
errs[pars.at(p).GetName()] = pars.at(p).getError()
for v in sorted(vals.keys()):
print ' ', v, vals[v], '+/-', errs[v]
###############################
### Add things to workspace ###
###############################
def addSignalModels(self):
for region in self.REGIONS:
for shift in ['']+self.SHIFTS:
for a in self.AMASSES:
self.buildSpline(a,region=region,shift=shift)
self.workspace.factory('{}_{}_norm[1,0,9999]'.format(self.SPLINENAME.format(a=a),region))
######################
### Setup datacard ###
######################
def setupDatacard(self):
# setup bins
for region in self.REGIONS:
self.addBin(region)
# add processes
self.addProcess('bg')
for proc in [self.SPLINENAME.format(a=a) for a in self.AMASSES]:
self.addProcess(proc,signal=True)
# set expected
for region in self.REGIONS:
h = self.histMap[region]['']['dataNoSig']
integral = h.Integral(h.FindBin(self.XRANGE[0]),h.FindBin(self.XRANGE[1]))
self.setExpected('bg',region,integral)
for proc in [self.SPLINENAME.format(a=a) for a in self.AMASSES]:
self.setExpected(proc,region,1) # TODO: how to handle different integrals
self.addRateParam('integral_{}_{}'.format(proc,region),region,proc)
self.setObserved(region,-1) # reads from histogram
###################
### Systematics ###
###################
def addSystematics(self):
self.sigProcesses = tuple([self.SPLINENAME.format(a=a) for a in self.AMASSES])
self._addLumiSystematic()
self._addMuonSystematic()
self._addTauSystematic()
###################################
### Save workspace and datacard ###
###################################
def save(self,name='mmmt'):
processes = {}
for a in self.AMASSES:
processes[self.SIGNAME.format(h='X',a=a)] = [self.SPLINENAME.format(a=a)] + ['bg']
self.printCard('datacards_shape/MuMuTauTau/{}'.format(name),processes=processes,blind=False,saveWorkspace=True) | identifier_body | |
HaaLimitsHMass.py | import os
import sys
import logging
import itertools
import numpy as np
import argparse
import math
import errno
import array
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch()
import CombineLimits.Limits.Models as Models
from CombineLimits.Limits.Limits import Limits
from CombineLimits.HaaLimits.HaaLimits import HaaLimits
from CombineLimits.Limits.utilities import *
class HaaLimitsHMass(HaaLimits):
'''
Create the Haa Limits workspace
'''
SPLINENAME = 'sig{a}'
XRANGE = [50,1000]
XLABEL = 'm_{#mu#mu#tau_{#mu}#tau_{h}}'
def __init__(self,histMap):
'''
Required arguments:
histMap = histogram map. the structure should be:
histMap[region][shift][process] = ROOT.TH1()
where:
region : 'PP' or 'FP' for regions A and B, respectively
shift : '', 'shiftName', 'shiftNameUp', or 'shiftNameDown'
'' : central value
'shiftName' : a symmetric shift (ie, jet resolution)
'shiftName[Up,Down]' : an asymmetric shift (ie, fake rate, lepton efficiencies, etc)
shiftName : the name the uncertainty will be given in the datacard
process : the name of the process
signal must be of the form 'HToAAH{h}A{a}'
data = 'data'
background = 'datadriven'
'''
super(HaaLimitsHMass,self).__init__(histMap)
self.plotDir = 'figures/HaaLimitsHMass'
python_mkdir(self.plotDir)
###########################
### Workspace utilities ###
###########################
def initializeWorkspace(self):
self.addX(*self.XRANGE,unit='GeV',label=self.XLABEL)
self.addMH(*self.XRANGE,unit='GeV',label='m_{h}')
def buildModel(self,region='PP',**kwargs):
tag = kwargs.pop('tag',region)
# continuum background
cont = Models.Chebychev('cont',
order = 2,
p0 = [-1,-1.4,0],
p1 = [0.25,0,0.5],
p2 = [0.03,-1,1],
)
nameC = 'cont{}'.format('_'+tag if tag else '')
cont.build(self.workspace,nameC)
cont1 = Models.Exponential('cont1',
lamb = [-0.20,-1,0],
)
nameC1 = 'cont1{}'.format('_'+tag if tag else '')
cont1.build(self.workspace,nameC1)
cont2 = Models.Exponential('cont2',
lamb = [-0.05,-1,0],
)
nameC2 = 'cont2{}'.format('_'+tag if tag else '')
cont2.build(self.workspace,nameC2)
cont3 = Models.Exponential('cont3',
lamb = [-0.75,-5,0],
)
nameC3 = 'cont3{}'.format('_'+tag if tag else '')
cont3.build(self.workspace,nameC3)
cont4 = Models.Exponential('cont4',
lamb = [-2,-5,0],
)
nameC4 = 'cont4{}'.format('_'+tag if tag else '')
cont4.build(self.workspace,nameC4)
#cont = Models.Sum('cont',
# **{
# nameC1 : [0.95,0,1],
# nameC2 : [0.05,0,1],
# 'recursive' : True,
# }
#)
#nameC = 'cont{}'.format('_'+tag if tag else '')
#cont.build(self.workspace,nameC)
# sum
bg = Models.Sum('bg',
**{
#nameC : [0.1,0,1],
nameC1: [0.5,0,1],
nameC2: [0.7,0,1],
'recursive' : True,
}
)
name = 'bg_{}'.format(region)
bg.build(self.workspace,name)
def buildSpline(self,a,region='PP',shift=''):
'''
Get the signal spline for a given Higgs mass.
Required arguments:
h = higgs mass
'''
histMap = self.histMap[region][shift]
tag= '{}{}'.format(region,'_'+shift if shift else '')
# initial fit
results = {}
errors = {}
results[a] = {}
errors[a] = {}
for h in self.HMASSES:
ws = ROOT.RooWorkspace('sig')
ws.factory('x[{0}, {1}]'.format(*self.XRANGE))
ws.var('x').setUnit('GeV')
ws.var('x').setPlotLabel(self.XLABEL)
ws.var('x').SetTitle(self.XLABEL)
model = Models.Voigtian('sig',
mean = [h,0,1000],
width = [0.1*h,0,0.5*h],
sigma = [0.1*h,0,0.5*h],
)
model.build(ws, 'sig')
hist = histMap[self.SIGNAME.format(h=h,a=a)]
results[a][h], errors[a][h] = model.fit(ws, hist, 'h{}_a{}_{}'.format(h,a,tag), saveDir=self.plotDir, save=True, doErrors=True)
models = {
'mean' : Models.Chebychev('mean', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
'width': Models.Chebychev('width', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
'sigma': Models.Chebychev('sigma', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
}
for param in ['mean', 'width', 'sigma']:
ws = ROOT.RooWorkspace(param)
ws.factory('x[{},{}]'.format(*self.XRANGE))
ws.var('x').setUnit('GeV')
ws.var('x').setPlotLabel(self.XLABEL)
ws.var('x').SetTitle(self.XLABEL)
model = models[param]
model.build(ws, param)
name = '{}_{}{}'.format(param,a,tag)
bins = [50,200,400,1000]
hist = ROOT.TH1D(name, name, len(bins)-1, array.array('d',bins))
vals = [results[a][h]['{}_h{}_a{}_{}'.format(param,h,a,tag)] for h in self.HMASSES]
errs = [errors[a][h]['{}_h{}_a{}_{}'.format(param,h,a,tag)] for h in self.HMASSES]
for i,h in enumerate(self.HMASSES):
b = hist.FindBin(h)
hist.SetBinContent(b,vals[i])
hist.SetBinError(b,errs[i])
model.fit(ws, hist, name, saveDir=self.plotDir, save=True)
# create model
for h in self.HMASSES:
print h, a, results[a][h]
model = Models.VoigtianSpline(self.SPLINENAME.format(a=a),
**{
'masses' : self.HMASSES,
'means' : [results[a][h]['mean_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
'widths' : [results[a][h]['width_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
'sigmas' : [results[a][h]['sigma_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
}
)
integrals = [histMap[self.SIGNAME.format(h=h,a=a)].Integral() for h in self.HMASSES]
model.setIntegral(self.HMASSES,integrals)
model.build(self.workspace,'{}_{}'.format(self.SPLINENAME.format(a=a),tag))
model.buildIntegral(self.workspace,'integral_{}_{}'.format(self.SPLINENAME.format(a=a),tag))
def fitBackground(self,region='PP',shift=''):
model = self.workspace.pdf('bg_{}'.format(region))
name = 'data_prefit_{}{}'.format(region,'_'+shift if shift else '')
data = ROOT.RooDataHist(name,name,ROOT.RooArgList(self.workspace.var('x')),self.histMap[region][shift]['dataNoSig'])
fr = model.fitTo(data,ROOT.RooFit.Save(),ROOT.RooFit.SumW2Error(True))
xFrame = self.workspace.var('x').frame()
data.plotOn(xFrame)
# continuum
model.plotOn(xFrame,ROOT.RooFit.Components('cont1_{}'.format(region)),ROOT.RooFit.LineStyle(ROOT.kDashed))
model.plotOn(xFrame,ROOT.RooFit.Components('cont2_{}'.format(region)),ROOT.RooFit.LineStyle(ROOT.kDashed))
# combined model
model.plotOn(xFrame)
canvas = ROOT.TCanvas('c','c',800,800)
xFrame.Draw()
#canvas.SetLogy()
canvas.Print('{}/model_fit_{}{}.png'.format(self.plotDir,region,'_'+shift if shift else ''))
pars = fr.floatParsFinal()
vals = {}
errs = {}
for p in range(pars.getSize()):
vals[pars.at(p).GetName()] = pars.at(p).getValV()
errs[pars.at(p).GetName()] = pars.at(p).getError()
for v in sorted(vals.keys()):
print ' ', v, vals[v], '+/-', errs[v]
###############################
### Add things to workspace ###
###############################
def | (self):
for region in self.REGIONS:
for shift in ['']+self.SHIFTS:
for a in self.AMASSES:
self.buildSpline(a,region=region,shift=shift)
self.workspace.factory('{}_{}_norm[1,0,9999]'.format(self.SPLINENAME.format(a=a),region))
######################
### Setup datacard ###
######################
def setupDatacard(self):
# setup bins
for region in self.REGIONS:
self.addBin(region)
# add processes
self.addProcess('bg')
for proc in [self.SPLINENAME.format(a=a) for a in self.AMASSES]:
self.addProcess(proc,signal=True)
# set expected
for region in self.REGIONS:
h = self.histMap[region]['']['dataNoSig']
integral = h.Integral(h.FindBin(self.XRANGE[0]),h.FindBin(self.XRANGE[1]))
self.setExpected('bg',region,integral)
for proc in [self.SPLINENAME.format(a=a) for a in self.AMASSES]:
self.setExpected(proc,region,1) # TODO: how to handle different integrals
self.addRateParam('integral_{}_{}'.format(proc,region),region,proc)
self.setObserved(region,-1) # reads from histogram
###################
### Systematics ###
###################
def addSystematics(self):
self.sigProcesses = tuple([self.SPLINENAME.format(a=a) for a in self.AMASSES])
self._addLumiSystematic()
self._addMuonSystematic()
self._addTauSystematic()
###################################
### Save workspace and datacard ###
###################################
def save(self,name='mmmt'):
processes = {}
for a in self.AMASSES:
processes[self.SIGNAME.format(h='X',a=a)] = [self.SPLINENAME.format(a=a)] + ['bg']
self.printCard('datacards_shape/MuMuTauTau/{}'.format(name),processes=processes,blind=False,saveWorkspace=True)
| addSignalModels | identifier_name |
HaaLimitsHMass.py | import os
import sys
import logging
import itertools
import numpy as np
import argparse
import math
import errno
import array
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch()
import CombineLimits.Limits.Models as Models
from CombineLimits.Limits.Limits import Limits
from CombineLimits.HaaLimits.HaaLimits import HaaLimits
from CombineLimits.Limits.utilities import *
class HaaLimitsHMass(HaaLimits):
'''
Create the Haa Limits workspace
'''
SPLINENAME = 'sig{a}'
XRANGE = [50,1000]
XLABEL = 'm_{#mu#mu#tau_{#mu}#tau_{h}}'
def __init__(self,histMap):
'''
Required arguments:
histMap = histogram map. the structure should be:
histMap[region][shift][process] = ROOT.TH1()
where:
region : 'PP' or 'FP' for regions A and B, respectively
shift : '', 'shiftName', 'shiftNameUp', or 'shiftNameDown'
'' : central value
'shiftName' : a symmetric shift (ie, jet resolution)
'shiftName[Up,Down]' : an asymmetric shift (ie, fake rate, lepton efficiencies, etc)
shiftName : the name the uncertainty will be given in the datacard
process : the name of the process
signal must be of the form 'HToAAH{h}A{a}'
data = 'data'
background = 'datadriven'
'''
super(HaaLimitsHMass,self).__init__(histMap)
self.plotDir = 'figures/HaaLimitsHMass'
python_mkdir(self.plotDir)
###########################
### Workspace utilities ###
###########################
def initializeWorkspace(self):
self.addX(*self.XRANGE,unit='GeV',label=self.XLABEL)
self.addMH(*self.XRANGE,unit='GeV',label='m_{h}')
def buildModel(self,region='PP',**kwargs):
tag = kwargs.pop('tag',region)
# continuum background
cont = Models.Chebychev('cont',
order = 2,
p0 = [-1,-1.4,0],
p1 = [0.25,0,0.5],
p2 = [0.03,-1,1],
)
nameC = 'cont{}'.format('_'+tag if tag else '')
cont.build(self.workspace,nameC)
cont1 = Models.Exponential('cont1',
lamb = [-0.20,-1,0],
)
nameC1 = 'cont1{}'.format('_'+tag if tag else '')
cont1.build(self.workspace,nameC1)
cont2 = Models.Exponential('cont2',
lamb = [-0.05,-1,0],
)
nameC2 = 'cont2{}'.format('_'+tag if tag else '')
cont2.build(self.workspace,nameC2)
cont3 = Models.Exponential('cont3',
lamb = [-0.75,-5,0],
)
nameC3 = 'cont3{}'.format('_'+tag if tag else '')
cont3.build(self.workspace,nameC3)
cont4 = Models.Exponential('cont4',
lamb = [-2,-5,0],
)
nameC4 = 'cont4{}'.format('_'+tag if tag else '')
cont4.build(self.workspace,nameC4)
#cont = Models.Sum('cont',
# **{
# nameC1 : [0.95,0,1],
# nameC2 : [0.05,0,1],
# 'recursive' : True,
# }
#)
#nameC = 'cont{}'.format('_'+tag if tag else '')
#cont.build(self.workspace,nameC)
# sum
bg = Models.Sum('bg',
**{
#nameC : [0.1,0,1],
nameC1: [0.5,0,1],
nameC2: [0.7,0,1],
'recursive' : True,
}
)
name = 'bg_{}'.format(region)
bg.build(self.workspace,name)
def buildSpline(self,a,region='PP',shift=''):
'''
Get the signal spline for a given Higgs mass.
Required arguments:
h = higgs mass
'''
histMap = self.histMap[region][shift]
tag= '{}{}'.format(region,'_'+shift if shift else '')
# initial fit
results = {}
errors = {}
results[a] = {}
errors[a] = {}
for h in self.HMASSES:
ws = ROOT.RooWorkspace('sig')
ws.factory('x[{0}, {1}]'.format(*self.XRANGE))
ws.var('x').setUnit('GeV')
ws.var('x').setPlotLabel(self.XLABEL)
ws.var('x').SetTitle(self.XLABEL)
model = Models.Voigtian('sig',
mean = [h,0,1000],
width = [0.1*h,0,0.5*h],
sigma = [0.1*h,0,0.5*h],
)
model.build(ws, 'sig')
hist = histMap[self.SIGNAME.format(h=h,a=a)]
results[a][h], errors[a][h] = model.fit(ws, hist, 'h{}_a{}_{}'.format(h,a,tag), saveDir=self.plotDir, save=True, doErrors=True)
models = {
'mean' : Models.Chebychev('mean', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
'width': Models.Chebychev('width', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
'sigma': Models.Chebychev('sigma', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
}
for param in ['mean', 'width', 'sigma']:
|
# create model
for h in self.HMASSES:
print h, a, results[a][h]
model = Models.VoigtianSpline(self.SPLINENAME.format(a=a),
**{
'masses' : self.HMASSES,
'means' : [results[a][h]['mean_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
'widths' : [results[a][h]['width_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
'sigmas' : [results[a][h]['sigma_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
}
)
integrals = [histMap[self.SIGNAME.format(h=h,a=a)].Integral() for h in self.HMASSES]
model.setIntegral(self.HMASSES,integrals)
model.build(self.workspace,'{}_{}'.format(self.SPLINENAME.format(a=a),tag))
model.buildIntegral(self.workspace,'integral_{}_{}'.format(self.SPLINENAME.format(a=a),tag))
def fitBackground(self,region='PP',shift=''):
model = self.workspace.pdf('bg_{}'.format(region))
name = 'data_prefit_{}{}'.format(region,'_'+shift if shift else '')
data = ROOT.RooDataHist(name,name,ROOT.RooArgList(self.workspace.var('x')),self.histMap[region][shift]['dataNoSig'])
fr = model.fitTo(data,ROOT.RooFit.Save(),ROOT.RooFit.SumW2Error(True))
xFrame = self.workspace.var('x').frame()
data.plotOn(xFrame)
# continuum
model.plotOn(xFrame,ROOT.RooFit.Components('cont1_{}'.format(region)),ROOT.RooFit.LineStyle(ROOT.kDashed))
model.plotOn(xFrame,ROOT.RooFit.Components('cont2_{}'.format(region)),ROOT.RooFit.LineStyle(ROOT.kDashed))
# combined model
model.plotOn(xFrame)
canvas = ROOT.TCanvas('c','c',800,800)
xFrame.Draw()
#canvas.SetLogy()
canvas.Print('{}/model_fit_{}{}.png'.format(self.plotDir,region,'_'+shift if shift else ''))
pars = fr.floatParsFinal()
vals = {}
errs = {}
for p in range(pars.getSize()):
vals[pars.at(p).GetName()] = pars.at(p).getValV()
errs[pars.at(p).GetName()] = pars.at(p).getError()
for v in sorted(vals.keys()):
print ' ', v, vals[v], '+/-', errs[v]
###############################
### Add things to workspace ###
###############################
def addSignalModels(self):
for region in self.REGIONS:
for shift in ['']+self.SHIFTS:
for a in self.AMASSES:
self.buildSpline(a,region=region,shift=shift)
self.workspace.factory('{}_{}_norm[1,0,9999]'.format(self.SPLINENAME.format(a=a),region))
######################
### Setup datacard ###
######################
def setupDatacard(self):
# setup bins
for region in self.REGIONS:
self.addBin(region)
# add processes
self.addProcess('bg')
for proc in [self.SPLINENAME.format(a=a) for a in self.AMASSES]:
self.addProcess(proc,signal=True)
# set expected
for region in self.REGIONS:
h = self.histMap[region]['']['dataNoSig']
integral = h.Integral(h.FindBin(self.XRANGE[0]),h.FindBin(self.XRANGE[1]))
self.setExpected('bg',region,integral)
for proc in [self.SPLINENAME.format(a=a) for a in self.AMASSES]:
self.setExpected(proc,region,1) # TODO: how to handle different integrals
self.addRateParam('integral_{}_{}'.format(proc,region),region,proc)
self.setObserved(region,-1) # reads from histogram
###################
### Systematics ###
###################
def addSystematics(self):
self.sigProcesses = tuple([self.SPLINENAME.format(a=a) for a in self.AMASSES])
self._addLumiSystematic()
self._addMuonSystematic()
self._addTauSystematic()
###################################
### Save workspace and datacard ###
###################################
def save(self,name='mmmt'):
processes = {}
for a in self.AMASSES:
processes[self.SIGNAME.format(h='X',a=a)] = [self.SPLINENAME.format(a=a)] + ['bg']
self.printCard('datacards_shape/MuMuTauTau/{}'.format(name),processes=processes,blind=False,saveWorkspace=True)
| ws = ROOT.RooWorkspace(param)
ws.factory('x[{},{}]'.format(*self.XRANGE))
ws.var('x').setUnit('GeV')
ws.var('x').setPlotLabel(self.XLABEL)
ws.var('x').SetTitle(self.XLABEL)
model = models[param]
model.build(ws, param)
name = '{}_{}{}'.format(param,a,tag)
bins = [50,200,400,1000]
hist = ROOT.TH1D(name, name, len(bins)-1, array.array('d',bins))
vals = [results[a][h]['{}_h{}_a{}_{}'.format(param,h,a,tag)] for h in self.HMASSES]
errs = [errors[a][h]['{}_h{}_a{}_{}'.format(param,h,a,tag)] for h in self.HMASSES]
for i,h in enumerate(self.HMASSES):
b = hist.FindBin(h)
hist.SetBinContent(b,vals[i])
hist.SetBinError(b,errs[i])
model.fit(ws, hist, name, saveDir=self.plotDir, save=True) | conditional_block |
HaaLimitsHMass.py | import os
import sys
import logging
import itertools
import numpy as np
import argparse
import math
import errno
import array
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch()
import CombineLimits.Limits.Models as Models
from CombineLimits.Limits.Limits import Limits
from CombineLimits.HaaLimits.HaaLimits import HaaLimits
from CombineLimits.Limits.utilities import *
class HaaLimitsHMass(HaaLimits):
'''
Create the Haa Limits workspace
'''
SPLINENAME = 'sig{a}'
XRANGE = [50,1000]
XLABEL = 'm_{#mu#mu#tau_{#mu}#tau_{h}}'
def __init__(self,histMap):
'''
Required arguments:
histMap = histogram map. the structure should be:
histMap[region][shift][process] = ROOT.TH1()
where:
region : 'PP' or 'FP' for regions A and B, respectively
shift : '', 'shiftName', 'shiftNameUp', or 'shiftNameDown'
'' : central value
'shiftName' : a symmetric shift (ie, jet resolution)
'shiftName[Up,Down]' : an asymmetric shift (ie, fake rate, lepton efficiencies, etc)
shiftName : the name the uncertainty will be given in the datacard
process : the name of the process
signal must be of the form 'HToAAH{h}A{a}'
data = 'data'
background = 'datadriven'
'''
super(HaaLimitsHMass,self).__init__(histMap)
self.plotDir = 'figures/HaaLimitsHMass'
python_mkdir(self.plotDir)
###########################
### Workspace utilities ###
###########################
def initializeWorkspace(self):
self.addX(*self.XRANGE,unit='GeV',label=self.XLABEL)
self.addMH(*self.XRANGE,unit='GeV',label='m_{h}')
def buildModel(self,region='PP',**kwargs):
tag = kwargs.pop('tag',region)
# continuum background
cont = Models.Chebychev('cont',
order = 2,
p0 = [-1,-1.4,0],
p1 = [0.25,0,0.5],
p2 = [0.03,-1,1],
)
nameC = 'cont{}'.format('_'+tag if tag else '')
cont.build(self.workspace,nameC)
cont1 = Models.Exponential('cont1',
lamb = [-0.20,-1,0],
)
nameC1 = 'cont1{}'.format('_'+tag if tag else '')
cont1.build(self.workspace,nameC1)
cont2 = Models.Exponential('cont2',
lamb = [-0.05,-1,0],
)
nameC2 = 'cont2{}'.format('_'+tag if tag else '')
cont2.build(self.workspace,nameC2)
cont3 = Models.Exponential('cont3',
lamb = [-0.75,-5,0],
)
nameC3 = 'cont3{}'.format('_'+tag if tag else '')
cont3.build(self.workspace,nameC3)
cont4 = Models.Exponential('cont4',
lamb = [-2,-5,0],
)
nameC4 = 'cont4{}'.format('_'+tag if tag else '')
cont4.build(self.workspace,nameC4)
#cont = Models.Sum('cont',
# **{
# nameC1 : [0.95,0,1],
# nameC2 : [0.05,0,1],
# 'recursive' : True,
# }
#)
#nameC = 'cont{}'.format('_'+tag if tag else '')
#cont.build(self.workspace,nameC)
# sum
bg = Models.Sum('bg',
**{
#nameC : [0.1,0,1],
nameC1: [0.5,0,1],
nameC2: [0.7,0,1],
'recursive' : True,
}
)
name = 'bg_{}'.format(region)
bg.build(self.workspace,name)
def buildSpline(self,a,region='PP',shift=''):
'''
Get the signal spline for a given Higgs mass.
Required arguments:
h = higgs mass
'''
histMap = self.histMap[region][shift]
tag= '{}{}'.format(region,'_'+shift if shift else '')
# initial fit
results = {}
errors = {}
results[a] = {}
errors[a] = {}
for h in self.HMASSES:
ws = ROOT.RooWorkspace('sig')
ws.factory('x[{0}, {1}]'.format(*self.XRANGE))
ws.var('x').setUnit('GeV')
ws.var('x').setPlotLabel(self.XLABEL)
ws.var('x').SetTitle(self.XLABEL)
model = Models.Voigtian('sig',
mean = [h,0,1000],
width = [0.1*h,0,0.5*h],
sigma = [0.1*h,0,0.5*h],
)
model.build(ws, 'sig')
hist = histMap[self.SIGNAME.format(h=h,a=a)]
results[a][h], errors[a][h] = model.fit(ws, hist, 'h{}_a{}_{}'.format(h,a,tag), saveDir=self.plotDir, save=True, doErrors=True)
models = {
'mean' : Models.Chebychev('mean', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
'width': Models.Chebychev('width', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
'sigma': Models.Chebychev('sigma', order = 1, p0 = [1,-5,50], p1 = [0.1,-5,5], p2 = [0.03,-5,5]),
}
for param in ['mean', 'width', 'sigma']:
ws = ROOT.RooWorkspace(param)
ws.factory('x[{},{}]'.format(*self.XRANGE))
ws.var('x').setUnit('GeV')
ws.var('x').setPlotLabel(self.XLABEL)
ws.var('x').SetTitle(self.XLABEL)
model = models[param]
model.build(ws, param)
name = '{}_{}{}'.format(param,a,tag)
bins = [50,200,400,1000]
hist = ROOT.TH1D(name, name, len(bins)-1, array.array('d',bins))
vals = [results[a][h]['{}_h{}_a{}_{}'.format(param,h,a,tag)] for h in self.HMASSES]
errs = [errors[a][h]['{}_h{}_a{}_{}'.format(param,h,a,tag)] for h in self.HMASSES]
for i,h in enumerate(self.HMASSES):
b = hist.FindBin(h)
hist.SetBinContent(b,vals[i])
hist.SetBinError(b,errs[i])
model.fit(ws, hist, name, saveDir=self.plotDir, save=True)
# create model
for h in self.HMASSES:
print h, a, results[a][h]
model = Models.VoigtianSpline(self.SPLINENAME.format(a=a),
**{
'masses' : self.HMASSES,
'means' : [results[a][h]['mean_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
'widths' : [results[a][h]['width_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
'sigmas' : [results[a][h]['sigma_h{0}_a{1}_{2}'.format(h,a,tag)] for h in self.HMASSES],
}
)
integrals = [histMap[self.SIGNAME.format(h=h,a=a)].Integral() for h in self.HMASSES]
model.setIntegral(self.HMASSES,integrals)
model.build(self.workspace,'{}_{}'.format(self.SPLINENAME.format(a=a),tag))
model.buildIntegral(self.workspace,'integral_{}_{}'.format(self.SPLINENAME.format(a=a),tag))
def fitBackground(self,region='PP',shift=''):
model = self.workspace.pdf('bg_{}'.format(region))
name = 'data_prefit_{}{}'.format(region,'_'+shift if shift else '')
data = ROOT.RooDataHist(name,name,ROOT.RooArgList(self.workspace.var('x')),self.histMap[region][shift]['dataNoSig'])
fr = model.fitTo(data,ROOT.RooFit.Save(),ROOT.RooFit.SumW2Error(True))
xFrame = self.workspace.var('x').frame()
data.plotOn(xFrame)
# continuum
model.plotOn(xFrame,ROOT.RooFit.Components('cont1_{}'.format(region)),ROOT.RooFit.LineStyle(ROOT.kDashed))
model.plotOn(xFrame,ROOT.RooFit.Components('cont2_{}'.format(region)),ROOT.RooFit.LineStyle(ROOT.kDashed))
# combined model
model.plotOn(xFrame)
canvas = ROOT.TCanvas('c','c',800,800)
xFrame.Draw()
#canvas.SetLogy()
canvas.Print('{}/model_fit_{}{}.png'.format(self.plotDir,region,'_'+shift if shift else ''))
pars = fr.floatParsFinal()
vals = {}
errs = {}
for p in range(pars.getSize()):
vals[pars.at(p).GetName()] = pars.at(p).getValV()
errs[pars.at(p).GetName()] = pars.at(p).getError()
for v in sorted(vals.keys()):
print ' ', v, vals[v], '+/-', errs[v]
###############################
### Add things to workspace ###
############################### | for shift in ['']+self.SHIFTS:
for a in self.AMASSES:
self.buildSpline(a,region=region,shift=shift)
self.workspace.factory('{}_{}_norm[1,0,9999]'.format(self.SPLINENAME.format(a=a),region))
######################
### Setup datacard ###
######################
def setupDatacard(self):
# setup bins
for region in self.REGIONS:
self.addBin(region)
# add processes
self.addProcess('bg')
for proc in [self.SPLINENAME.format(a=a) for a in self.AMASSES]:
self.addProcess(proc,signal=True)
# set expected
for region in self.REGIONS:
h = self.histMap[region]['']['dataNoSig']
integral = h.Integral(h.FindBin(self.XRANGE[0]),h.FindBin(self.XRANGE[1]))
self.setExpected('bg',region,integral)
for proc in [self.SPLINENAME.format(a=a) for a in self.AMASSES]:
self.setExpected(proc,region,1) # TODO: how to handle different integrals
self.addRateParam('integral_{}_{}'.format(proc,region),region,proc)
self.setObserved(region,-1) # reads from histogram
###################
### Systematics ###
###################
def addSystematics(self):
self.sigProcesses = tuple([self.SPLINENAME.format(a=a) for a in self.AMASSES])
self._addLumiSystematic()
self._addMuonSystematic()
self._addTauSystematic()
###################################
### Save workspace and datacard ###
###################################
def save(self,name='mmmt'):
processes = {}
for a in self.AMASSES:
processes[self.SIGNAME.format(h='X',a=a)] = [self.SPLINENAME.format(a=a)] + ['bg']
self.printCard('datacards_shape/MuMuTauTau/{}'.format(name),processes=processes,blind=False,saveWorkspace=True) | def addSignalModels(self):
for region in self.REGIONS: | random_line_split |
sphere.rs | // Copyright 2017 Dasein Phaos aka. Luxko
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use geometry::prelude::*;
use super::Shape;
use std;
use serde;
use serde::{Serialize, Deserialize};
use serde::ser::{Serializer, SerializeStruct};
use serde::de::{Deserializer, MapAccess, SeqAccess, Visitor};
/// A (possibly-partial) sphere, as a geometry definition
#[derive(Copy, Clone, PartialEq)]
pub struct Sphere {
/// The radius of the sphere
pub radius: Float,
/// The lower bound xy-plane. Points with `z<zmin` being excluded.
pub zmin: Float,
/// The upper bound xy-plane. Points with `z>zmax` being excluded.
pub zmax: Float,
/// The maximum `phi`. Points with `phi>phimax` being excluded.
pub phimax: Float,
// These two are updated accordingly when `zmin` or `zmax` changes.
thetamin: Float,
thetamax: Float,
}
impl Serialize for Sphere {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
let mut state = s.serialize_struct("Sphere", 4)?;
state.serialize_field("radius", &self.radius)?;
state.serialize_field("zmin", &self.zmin)?;
state.serialize_field("zmax", &self.zmax)?;
state.serialize_field("phimax", &self.phimax)?;
state.end()
}
}
impl<'de> Deserialize<'de> for Sphere {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
{
#[derive(Deserialize)]
#[serde(field_identifier, rename_all = "lowercase")]
enum Field { Radius, Zmin, Zmax, Phimax }
struct SamplerVisitor;
impl<'de> Visitor<'de> for SamplerVisitor {
type Value = Sphere;
fn expecting(&self, fmter: &mut std::fmt::Formatter) -> std::fmt::Result {
fmter.write_str("struct Sphere")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where V: SeqAccess<'de>
{
let radius = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(0, &self))?;
let zmin = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(1, &self))?;
let zmax = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(2, &self))?;
let phimax = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(3, &self))?;
Ok(Sphere::new(radius, zmin, zmax, phimax))
}
fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error>
where V: MapAccess<'de>
{
let mut radius = None;
let mut zmin = None;
let mut zmax = None;
let mut phimax = None;
while let Some(key) = map.next_key()? {
match key {
Field::Radius => {
if radius.is_some() {
return Err(serde::de::Error::duplicate_field("radius"));
}
radius = Some(map.next_value()?);
}
Field::Zmin => {
if zmin.is_some() {
return Err(serde::de::Error::duplicate_field("zmin"));
}
zmin = Some(map.next_value()?);
}
Field::Zmax => {
if zmax.is_some() {
return Err(serde::de::Error::duplicate_field("zmax"));
}
zmax = Some(map.next_value()?);
}
Field::Phimax => {
if phimax.is_some() {
return Err(serde::de::Error::duplicate_field("phimax"));
}
phimax = Some(map.next_value()?);
}
}
}
let radius = radius.ok_or_else(||
serde::de::Error::missing_field("radius")
)?;
let zmin = zmin.ok_or_else(||
serde::de::Error::missing_field("zmin")
)?;
let zmax = zmax.ok_or_else(||
serde::de::Error::missing_field("znear")
)?;
let phimax = phimax.ok_or_else(||
serde::de::Error::missing_field("zfar")
)?;
Ok(Sphere::new(
radius, zmin, zmax, phimax
))
}
}
const FIELDS: &[&str] = &["transform", "screen", "znear", "zfar", "fov", "lens", "film"];
deserializer.deserialize_struct("Sphere", FIELDS, SamplerVisitor)
}
}
impl Sphere {
/// Constructs a new `Sphere`.
pub fn new(radius: Float, mut zmin: Float, mut zmax: Float, mut phimax: Float) -> Sphere |
/// Constructs a full sphere
#[inline]
pub fn full(radius: Float) -> Sphere {
Sphere::new(radius, -radius, radius, float::pi() * (2.0 as Float))
}
/// returns the local space bounding box
#[inline]
pub fn bounding(&self) -> BBox3f {
BBox3f::new(
Point3f::new(-self.radius, -self.radius, self.zmin),
Point3f::new(self.radius, self.radius, self.zmax)
)
}
// /// test intersection in local frame, returns `t` when first hit
// #[inline]
// pub fn intersect_ray(&self, ray: &RawRay) -> Option<Float>
// {
// if let Some(t) = Sphere::intersect_ray_full(self.radius, ray) {
// let p = ray.evaluate(t);
// // TODO: refine sphere intersection
// let mut phi = p.y.atan2(p.x);
// if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); }
// if p.z < self.zmin || p.z > self.zmax || phi > self.phimax {
// None
// } else {
// Some(t)
// }
// } else {
// None
// }
// }
/// test intersection against the full sphere
pub fn intersect_ray_full(radius: Float, ray: &RawRay) -> Option<Float>
{
let origin = ray.origin().to_vec();
let direction = ray.direction();
let a = direction.magnitude2();
let b = (direction.mul_element_wise(origin) * (2.0 as Float)).sum();
let c = origin.magnitude2() - radius * radius;
let delta = b* b - (4.0 as Float) * a * c;
if delta < (0.0 as Float) { return None; }
let invert_2a = (1.0 as Float) / ((2.0 as Float) * a);
let d1 = delta.sqrt() * invert_2a;
let d0 = -b * invert_2a;
let(t0, t1) = if invert_2a > 0.0 as Float {
(d0-d1, d0+d1)
} else {
(d0+d1, d0-d1)
};
let tmax = ray.max_extend();
if t0 > tmax || t1 < (0.0 as Float) { return None; }
if t0 > (0.0 as Float) {
Some(t0)
} else if t1 > tmax {
None
} else {
Some(t1)
}
}
}
impl Shape for Sphere {
#[inline]
fn bbox_local(&self) -> BBox3f {
self.bounding()
}
#[inline]
fn intersect_ray(&self, ray: &RawRay) -> Option<(Float, SurfaceInteraction)> {
if let Some(t) = Sphere::intersect_ray_full(self.radius, &ray) {
let mut p = ray.evaluate(t).to_vec();
// refine sphere intersection
p = p* self.radius / p.magnitude();
if p.x == 0.0 as Float && p.y == 0.0 as Float {
p.x = 1e-5 as Float * self.radius;
}
let p = Point3f::from_vec(p);
let mut phi = p.y.atan2(p.x);
if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); }
// TODO: refine test against clipping
if p.z < self.zmin || p.z > self.zmax || phi > self.phimax {
None
} else {
let phimax = self.phimax;
let thetamax = self.thetamax;
let thetamin = self.thetamin;
let thetadelta = thetamax - thetamin;
let u = phi / phimax;
let theta = (p.z / self.radius).acos();
let v = (theta - thetamin) / thetadelta;
let inv_z_radius = (1.0 as Float) / (p.x * p.x + p.y * p.y).sqrt();
let cos_phi = p.x * inv_z_radius;
let sin_phi = p.y * inv_z_radius;
let dpdu = Vector3f::new(-phimax * p.y, phimax * p.x, 0.0 as Float);
let dpdv = thetadelta * Vector3f::new(p.z * cos_phi, p.z * sin_phi, -self.radius * theta.sin());
let (dndu, dndv) = {
let dppduu = - phimax * phimax * Vector3f::new(p.x, p.y, 0.0 as Float);
let dppduv = thetadelta * p.z * phimax * Vector3f::new(-sin_phi, cos_phi, 0.0 as Float);
let dppdvv = -thetadelta * thetadelta * Vector3f::new(p.x, p.y, p.z);
let e = dpdu.dot(dpdu);
let f = dpdu.dot(dpdv);
let g = dpdv.dot(dpdv);
let n = dpdu.cross(dpdv).normalize();
let ee = n.dot(dppduu);
let ff = n.dot(dppduv);
let gg = n.dot(dppdvv);
let inv = (1.0 as Float) / (e * g - f * f);
(
(ff*f - ee*g) * inv * dpdu + (ee*f - ff*e) * inv * dpdv,
(gg*f - ff*g) * inv * dpdu + (ff*f - gg*e) * inv * dpdv
)
};
Some((
t, SurfaceInteraction::new(
p,
// FIXME: wrong
Vector3f::zero(),
-ray.direction(), Point2f::new(u, v),
DuvInfo{
dpdu: dpdu,
dpdv: dpdv,
dndu: dndu,
dndv: dndv,
},
)
))
}
} else {
None
}
}
#[inline]
fn surface_area(&self) -> Float {
self.phimax * self.radius * (self.zmax - self.zmin)
}
fn sample(&self, sample: Point2f) -> (Point3f, Vector3f, Float) {
// sample.x scaled to [0, phimax]
let phi = sample.x * self.phimax;
// sample.y scaled to [thetamin, thetamax]
let theta = sample.y * (self.thetamax - self.thetamin) + self.thetamin;
let dir = Sphericalf::new(theta, phi).to_vec();
let pos = Point3f::from_vec(dir*self.radius);
(pos, dir, 1. as Float / self.surface_area())
// use sample::sample_uniform_sphere;
// let dir = sample_uniform_sphere(sample);
// let pos = Point3f::from_vec(dir*self.radius);
// (pos, dir, 1. as Float / self.surface_area())
}
}
| {
assert!(radius>(0.0 as Float), "Sphere radius should be positive");
assert!(zmin<zmax, "zmin should be lower than zmax");
if zmin < -radius { zmin = -radius; }
if zmax > radius { zmax = radius; }
if phimax < (0.0 as Float) { phimax = 0.0 as Float; }
let twopi = float::pi() * (2.0 as Float);
if phimax > twopi { phimax = twopi; }
// TODO: double check
let thetamin = (zmin/radius).acos();
let thetamax = (zmax/radius).acos();
Sphere {
radius: radius,
zmin: zmin,
zmax: zmax,
thetamin: thetamin,
thetamax: thetamax,
phimax: phimax,
}
} | identifier_body |
sphere.rs | // Copyright 2017 Dasein Phaos aka. Luxko
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use geometry::prelude::*;
use super::Shape;
use std;
use serde;
use serde::{Serialize, Deserialize};
use serde::ser::{Serializer, SerializeStruct};
use serde::de::{Deserializer, MapAccess, SeqAccess, Visitor};
/// A (possibly-partial) sphere, as a geometry definition
#[derive(Copy, Clone, PartialEq)]
pub struct Sphere {
/// The radius of the sphere
pub radius: Float,
/// The lower bound xy-plane. Points with `z<zmin` being excluded.
pub zmin: Float,
/// The upper bound xy-plane. Points with `z>zmax` being excluded.
pub zmax: Float,
/// The maximum `phi`. Points with `phi>phimax` being excluded.
pub phimax: Float,
// These two are updated accordingly when `zmin` or `zmax` changes.
thetamin: Float,
thetamax: Float,
}
impl Serialize for Sphere {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
let mut state = s.serialize_struct("Sphere", 4)?;
state.serialize_field("radius", &self.radius)?;
state.serialize_field("zmin", &self.zmin)?;
state.serialize_field("zmax", &self.zmax)?;
state.serialize_field("phimax", &self.phimax)?;
state.end()
}
}
impl<'de> Deserialize<'de> for Sphere {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
{
#[derive(Deserialize)]
#[serde(field_identifier, rename_all = "lowercase")]
enum Field { Radius, Zmin, Zmax, Phimax }
struct SamplerVisitor;
impl<'de> Visitor<'de> for SamplerVisitor {
type Value = Sphere;
fn expecting(&self, fmter: &mut std::fmt::Formatter) -> std::fmt::Result {
fmter.write_str("struct Sphere")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where V: SeqAccess<'de>
{
let radius = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(0, &self))?;
let zmin = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(1, &self))?;
let zmax = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(2, &self))?;
let phimax = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(3, &self))?;
Ok(Sphere::new(radius, zmin, zmax, phimax))
}
fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error>
where V: MapAccess<'de>
{
let mut radius = None;
let mut zmin = None;
let mut zmax = None;
let mut phimax = None;
while let Some(key) = map.next_key()? {
match key {
Field::Radius => {
if radius.is_some() {
return Err(serde::de::Error::duplicate_field("radius"));
}
radius = Some(map.next_value()?);
}
Field::Zmin => {
if zmin.is_some() {
return Err(serde::de::Error::duplicate_field("zmin"));
}
zmin = Some(map.next_value()?);
}
Field::Zmax => {
if zmax.is_some() {
return Err(serde::de::Error::duplicate_field("zmax"));
}
zmax = Some(map.next_value()?);
}
Field::Phimax => {
if phimax.is_some() {
return Err(serde::de::Error::duplicate_field("phimax"));
}
phimax = Some(map.next_value()?);
}
}
}
let radius = radius.ok_or_else(||
serde::de::Error::missing_field("radius")
)?;
let zmin = zmin.ok_or_else(||
serde::de::Error::missing_field("zmin")
)?;
let zmax = zmax.ok_or_else(||
serde::de::Error::missing_field("znear")
)?;
let phimax = phimax.ok_or_else(||
serde::de::Error::missing_field("zfar")
)?;
Ok(Sphere::new(
radius, zmin, zmax, phimax
))
}
}
const FIELDS: &[&str] = &["transform", "screen", "znear", "zfar", "fov", "lens", "film"];
deserializer.deserialize_struct("Sphere", FIELDS, SamplerVisitor)
}
}
impl Sphere {
/// Constructs a new `Sphere`.
pub fn new(radius: Float, mut zmin: Float, mut zmax: Float, mut phimax: Float) -> Sphere {
assert!(radius>(0.0 as Float), "Sphere radius should be positive");
assert!(zmin<zmax, "zmin should be lower than zmax");
if zmin < -radius { zmin = -radius; }
if zmax > radius { zmax = radius; }
if phimax < (0.0 as Float) { phimax = 0.0 as Float; }
let twopi = float::pi() * (2.0 as Float);
if phimax > twopi { phimax = twopi; }
// TODO: double check
let thetamin = (zmin/radius).acos();
let thetamax = (zmax/radius).acos();
Sphere {
radius: radius,
zmin: zmin,
zmax: zmax,
thetamin: thetamin,
thetamax: thetamax,
phimax: phimax,
}
}
/// Constructs a full sphere
#[inline]
pub fn full(radius: Float) -> Sphere {
Sphere::new(radius, -radius, radius, float::pi() * (2.0 as Float))
}
/// returns the local space bounding box
#[inline]
pub fn bounding(&self) -> BBox3f {
BBox3f::new(
Point3f::new(-self.radius, -self.radius, self.zmin), | // #[inline]
// pub fn intersect_ray(&self, ray: &RawRay) -> Option<Float>
// {
// if let Some(t) = Sphere::intersect_ray_full(self.radius, ray) {
// let p = ray.evaluate(t);
// // TODO: refine sphere intersection
// let mut phi = p.y.atan2(p.x);
// if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); }
// if p.z < self.zmin || p.z > self.zmax || phi > self.phimax {
// None
// } else {
// Some(t)
// }
// } else {
// None
// }
// }
/// test intersection against the full sphere
pub fn intersect_ray_full(radius: Float, ray: &RawRay) -> Option<Float>
{
let origin = ray.origin().to_vec();
let direction = ray.direction();
let a = direction.magnitude2();
let b = (direction.mul_element_wise(origin) * (2.0 as Float)).sum();
let c = origin.magnitude2() - radius * radius;
let delta = b* b - (4.0 as Float) * a * c;
if delta < (0.0 as Float) { return None; }
let invert_2a = (1.0 as Float) / ((2.0 as Float) * a);
let d1 = delta.sqrt() * invert_2a;
let d0 = -b * invert_2a;
let(t0, t1) = if invert_2a > 0.0 as Float {
(d0-d1, d0+d1)
} else {
(d0+d1, d0-d1)
};
let tmax = ray.max_extend();
if t0 > tmax || t1 < (0.0 as Float) { return None; }
if t0 > (0.0 as Float) {
Some(t0)
} else if t1 > tmax {
None
} else {
Some(t1)
}
}
}
impl Shape for Sphere {
#[inline]
fn bbox_local(&self) -> BBox3f {
self.bounding()
}
#[inline]
fn intersect_ray(&self, ray: &RawRay) -> Option<(Float, SurfaceInteraction)> {
if let Some(t) = Sphere::intersect_ray_full(self.radius, &ray) {
let mut p = ray.evaluate(t).to_vec();
// refine sphere intersection
p = p* self.radius / p.magnitude();
if p.x == 0.0 as Float && p.y == 0.0 as Float {
p.x = 1e-5 as Float * self.radius;
}
let p = Point3f::from_vec(p);
let mut phi = p.y.atan2(p.x);
if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); }
// TODO: refine test against clipping
if p.z < self.zmin || p.z > self.zmax || phi > self.phimax {
None
} else {
let phimax = self.phimax;
let thetamax = self.thetamax;
let thetamin = self.thetamin;
let thetadelta = thetamax - thetamin;
let u = phi / phimax;
let theta = (p.z / self.radius).acos();
let v = (theta - thetamin) / thetadelta;
let inv_z_radius = (1.0 as Float) / (p.x * p.x + p.y * p.y).sqrt();
let cos_phi = p.x * inv_z_radius;
let sin_phi = p.y * inv_z_radius;
let dpdu = Vector3f::new(-phimax * p.y, phimax * p.x, 0.0 as Float);
let dpdv = thetadelta * Vector3f::new(p.z * cos_phi, p.z * sin_phi, -self.radius * theta.sin());
let (dndu, dndv) = {
let dppduu = - phimax * phimax * Vector3f::new(p.x, p.y, 0.0 as Float);
let dppduv = thetadelta * p.z * phimax * Vector3f::new(-sin_phi, cos_phi, 0.0 as Float);
let dppdvv = -thetadelta * thetadelta * Vector3f::new(p.x, p.y, p.z);
let e = dpdu.dot(dpdu);
let f = dpdu.dot(dpdv);
let g = dpdv.dot(dpdv);
let n = dpdu.cross(dpdv).normalize();
let ee = n.dot(dppduu);
let ff = n.dot(dppduv);
let gg = n.dot(dppdvv);
let inv = (1.0 as Float) / (e * g - f * f);
(
(ff*f - ee*g) * inv * dpdu + (ee*f - ff*e) * inv * dpdv,
(gg*f - ff*g) * inv * dpdu + (ff*f - gg*e) * inv * dpdv
)
};
Some((
t, SurfaceInteraction::new(
p,
// FIXME: wrong
Vector3f::zero(),
-ray.direction(), Point2f::new(u, v),
DuvInfo{
dpdu: dpdu,
dpdv: dpdv,
dndu: dndu,
dndv: dndv,
},
)
))
}
} else {
None
}
}
#[inline]
fn surface_area(&self) -> Float {
self.phimax * self.radius * (self.zmax - self.zmin)
}
fn sample(&self, sample: Point2f) -> (Point3f, Vector3f, Float) {
// sample.x scaled to [0, phimax]
let phi = sample.x * self.phimax;
// sample.y scaled to [thetamin, thetamax]
let theta = sample.y * (self.thetamax - self.thetamin) + self.thetamin;
let dir = Sphericalf::new(theta, phi).to_vec();
let pos = Point3f::from_vec(dir*self.radius);
(pos, dir, 1. as Float / self.surface_area())
// use sample::sample_uniform_sphere;
// let dir = sample_uniform_sphere(sample);
// let pos = Point3f::from_vec(dir*self.radius);
// (pos, dir, 1. as Float / self.surface_area())
}
} | Point3f::new(self.radius, self.radius, self.zmax)
)
}
// /// test intersection in local frame, returns `t` when first hit | random_line_split |
sphere.rs | // Copyright 2017 Dasein Phaos aka. Luxko
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use geometry::prelude::*;
use super::Shape;
use std;
use serde;
use serde::{Serialize, Deserialize};
use serde::ser::{Serializer, SerializeStruct};
use serde::de::{Deserializer, MapAccess, SeqAccess, Visitor};
/// A (possibly-partial) sphere, as a geometry definition
#[derive(Copy, Clone, PartialEq)]
pub struct Sphere {
/// The radius of the sphere
pub radius: Float,
/// The lower bound xy-plane. Points with `z<zmin` being excluded.
pub zmin: Float,
/// The upper bound xy-plane. Points with `z>zmax` being excluded.
pub zmax: Float,
/// The maximum `phi`. Points with `phi>phimax` being excluded.
pub phimax: Float,
// These two are updated accordingly when `zmin` or `zmax` changes.
thetamin: Float,
thetamax: Float,
}
impl Serialize for Sphere {
fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
let mut state = s.serialize_struct("Sphere", 4)?;
state.serialize_field("radius", &self.radius)?;
state.serialize_field("zmin", &self.zmin)?;
state.serialize_field("zmax", &self.zmax)?;
state.serialize_field("phimax", &self.phimax)?;
state.end()
}
}
impl<'de> Deserialize<'de> for Sphere {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
{
#[derive(Deserialize)]
#[serde(field_identifier, rename_all = "lowercase")]
enum Field { Radius, Zmin, Zmax, Phimax }
struct SamplerVisitor;
impl<'de> Visitor<'de> for SamplerVisitor {
type Value = Sphere;
fn expecting(&self, fmter: &mut std::fmt::Formatter) -> std::fmt::Result {
fmter.write_str("struct Sphere")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where V: SeqAccess<'de>
{
let radius = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(0, &self))?;
let zmin = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(1, &self))?;
let zmax = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(2, &self))?;
let phimax = seq.next_element()?
.ok_or_else(|| serde::de::Error::invalid_length(3, &self))?;
Ok(Sphere::new(radius, zmin, zmax, phimax))
}
fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error>
where V: MapAccess<'de>
{
let mut radius = None;
let mut zmin = None;
let mut zmax = None;
let mut phimax = None;
while let Some(key) = map.next_key()? {
match key {
Field::Radius => {
if radius.is_some() {
return Err(serde::de::Error::duplicate_field("radius"));
}
radius = Some(map.next_value()?);
}
Field::Zmin => {
if zmin.is_some() {
return Err(serde::de::Error::duplicate_field("zmin"));
}
zmin = Some(map.next_value()?);
}
Field::Zmax => {
if zmax.is_some() {
return Err(serde::de::Error::duplicate_field("zmax"));
}
zmax = Some(map.next_value()?);
}
Field::Phimax => {
if phimax.is_some() {
return Err(serde::de::Error::duplicate_field("phimax"));
}
phimax = Some(map.next_value()?);
}
}
}
let radius = radius.ok_or_else(||
serde::de::Error::missing_field("radius")
)?;
let zmin = zmin.ok_or_else(||
serde::de::Error::missing_field("zmin")
)?;
let zmax = zmax.ok_or_else(||
serde::de::Error::missing_field("znear")
)?;
let phimax = phimax.ok_or_else(||
serde::de::Error::missing_field("zfar")
)?;
Ok(Sphere::new(
radius, zmin, zmax, phimax
))
}
}
const FIELDS: &[&str] = &["transform", "screen", "znear", "zfar", "fov", "lens", "film"];
deserializer.deserialize_struct("Sphere", FIELDS, SamplerVisitor)
}
}
impl Sphere {
/// Constructs a new `Sphere`.
pub fn new(radius: Float, mut zmin: Float, mut zmax: Float, mut phimax: Float) -> Sphere {
assert!(radius>(0.0 as Float), "Sphere radius should be positive");
assert!(zmin<zmax, "zmin should be lower than zmax");
if zmin < -radius { zmin = -radius; }
if zmax > radius { zmax = radius; }
if phimax < (0.0 as Float) { phimax = 0.0 as Float; }
let twopi = float::pi() * (2.0 as Float);
if phimax > twopi { phimax = twopi; }
// TODO: double check
let thetamin = (zmin/radius).acos();
let thetamax = (zmax/radius).acos();
Sphere {
radius: radius,
zmin: zmin,
zmax: zmax,
thetamin: thetamin,
thetamax: thetamax,
phimax: phimax,
}
}
/// Constructs a full sphere
#[inline]
pub fn full(radius: Float) -> Sphere {
Sphere::new(radius, -radius, radius, float::pi() * (2.0 as Float))
}
/// returns the local space bounding box
#[inline]
pub fn bounding(&self) -> BBox3f {
BBox3f::new(
Point3f::new(-self.radius, -self.radius, self.zmin),
Point3f::new(self.radius, self.radius, self.zmax)
)
}
// /// test intersection in local frame, returns `t` when first hit
// #[inline]
// pub fn intersect_ray(&self, ray: &RawRay) -> Option<Float>
// {
// if let Some(t) = Sphere::intersect_ray_full(self.radius, ray) {
// let p = ray.evaluate(t);
// // TODO: refine sphere intersection
// let mut phi = p.y.atan2(p.x);
// if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); }
// if p.z < self.zmin || p.z > self.zmax || phi > self.phimax {
// None
// } else {
// Some(t)
// }
// } else {
// None
// }
// }
/// test intersection against the full sphere
pub fn | (radius: Float, ray: &RawRay) -> Option<Float>
{
let origin = ray.origin().to_vec();
let direction = ray.direction();
let a = direction.magnitude2();
let b = (direction.mul_element_wise(origin) * (2.0 as Float)).sum();
let c = origin.magnitude2() - radius * radius;
let delta = b* b - (4.0 as Float) * a * c;
if delta < (0.0 as Float) { return None; }
let invert_2a = (1.0 as Float) / ((2.0 as Float) * a);
let d1 = delta.sqrt() * invert_2a;
let d0 = -b * invert_2a;
let(t0, t1) = if invert_2a > 0.0 as Float {
(d0-d1, d0+d1)
} else {
(d0+d1, d0-d1)
};
let tmax = ray.max_extend();
if t0 > tmax || t1 < (0.0 as Float) { return None; }
if t0 > (0.0 as Float) {
Some(t0)
} else if t1 > tmax {
None
} else {
Some(t1)
}
}
}
impl Shape for Sphere {
#[inline]
fn bbox_local(&self) -> BBox3f {
self.bounding()
}
#[inline]
fn intersect_ray(&self, ray: &RawRay) -> Option<(Float, SurfaceInteraction)> {
if let Some(t) = Sphere::intersect_ray_full(self.radius, &ray) {
let mut p = ray.evaluate(t).to_vec();
// refine sphere intersection
p = p* self.radius / p.magnitude();
if p.x == 0.0 as Float && p.y == 0.0 as Float {
p.x = 1e-5 as Float * self.radius;
}
let p = Point3f::from_vec(p);
let mut phi = p.y.atan2(p.x);
if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); }
// TODO: refine test against clipping
if p.z < self.zmin || p.z > self.zmax || phi > self.phimax {
None
} else {
let phimax = self.phimax;
let thetamax = self.thetamax;
let thetamin = self.thetamin;
let thetadelta = thetamax - thetamin;
let u = phi / phimax;
let theta = (p.z / self.radius).acos();
let v = (theta - thetamin) / thetadelta;
let inv_z_radius = (1.0 as Float) / (p.x * p.x + p.y * p.y).sqrt();
let cos_phi = p.x * inv_z_radius;
let sin_phi = p.y * inv_z_radius;
let dpdu = Vector3f::new(-phimax * p.y, phimax * p.x, 0.0 as Float);
let dpdv = thetadelta * Vector3f::new(p.z * cos_phi, p.z * sin_phi, -self.radius * theta.sin());
let (dndu, dndv) = {
let dppduu = - phimax * phimax * Vector3f::new(p.x, p.y, 0.0 as Float);
let dppduv = thetadelta * p.z * phimax * Vector3f::new(-sin_phi, cos_phi, 0.0 as Float);
let dppdvv = -thetadelta * thetadelta * Vector3f::new(p.x, p.y, p.z);
let e = dpdu.dot(dpdu);
let f = dpdu.dot(dpdv);
let g = dpdv.dot(dpdv);
let n = dpdu.cross(dpdv).normalize();
let ee = n.dot(dppduu);
let ff = n.dot(dppduv);
let gg = n.dot(dppdvv);
let inv = (1.0 as Float) / (e * g - f * f);
(
(ff*f - ee*g) * inv * dpdu + (ee*f - ff*e) * inv * dpdv,
(gg*f - ff*g) * inv * dpdu + (ff*f - gg*e) * inv * dpdv
)
};
Some((
t, SurfaceInteraction::new(
p,
// FIXME: wrong
Vector3f::zero(),
-ray.direction(), Point2f::new(u, v),
DuvInfo{
dpdu: dpdu,
dpdv: dpdv,
dndu: dndu,
dndv: dndv,
},
)
))
}
} else {
None
}
}
#[inline]
fn surface_area(&self) -> Float {
self.phimax * self.radius * (self.zmax - self.zmin)
}
fn sample(&self, sample: Point2f) -> (Point3f, Vector3f, Float) {
// sample.x scaled to [0, phimax]
let phi = sample.x * self.phimax;
// sample.y scaled to [thetamin, thetamax]
let theta = sample.y * (self.thetamax - self.thetamin) + self.thetamin;
let dir = Sphericalf::new(theta, phi).to_vec();
let pos = Point3f::from_vec(dir*self.radius);
(pos, dir, 1. as Float / self.surface_area())
// use sample::sample_uniform_sphere;
// let dir = sample_uniform_sphere(sample);
// let pos = Point3f::from_vec(dir*self.radius);
// (pos, dir, 1. as Float / self.surface_area())
}
}
| intersect_ray_full | identifier_name |
Dom.ts | import { Blend, Component } from "@blendsdk/core";
import { ICreateElementConfig, ICssClassDictionary, IHTMLElementProvider } from "./Types";
// tslint:disable-next-line:no-namespace
export namespace Dom {
/**
* Finds an element using a CSS selector. This method internally uses querySelector.
*
* @export
* @template T
* @param {string} selector
* @param {HTMLElement} [fromRoot]
* @returns {(T | null)}
*/
export function findElement<T extends HTMLElement>(selector: string, fromRoot?: HTMLElement | Document): T | null {
return (fromRoot || document).querySelector(selector) as T;
}
/**
* Clears the contents of an element
*
* @export
* @param {HTMLElement} el
*/
export function clearElement(el: HTMLElement) {
if (el) {
while (el.childNodes.length !== 0) {
el.children[0].parentElement.removeChild(el.children[0]);
}
// perhaps overkill!
el.textContent = "";
el.innerHTML = "";
}
}
/**
* Finds a list of elements using a CSS selector. This method internally uses querySelectorAll.
*
* @export
* @template T
* @param {string} selector
* @param {HTMLElement} [fromRoot]
* @returns {NodeListOf<T>}
*/
export function findElements<T extends HTMLElement & Node>(
selector: string,
fromRoot?: HTMLElement | Document
): NodeListOf<T> {
return (fromRoot || document).querySelectorAll(selector) as NodeListOf<T>;
}
/**
* Utility function for creating en `HTMLElement`.
* The reference callback function `refCallback` can be used
* to assign child elements which have a `reference` to class
* properties.
*
* The windows parameters `win` can be used to create the element from
* a specific `Window` context
*
* @export
* @template T
* @param {(ICreateElementConfig | ICreateElementConfigFunction)} [conf]
* @param {(reference: string, element: HTMLElement) => any} [refCallback]
* @param {Window} [win]
* @returns {T}
*/
export function createElement<T extends HTMLElement>(
conf?: HTMLElement | ICreateElementConfig,
refCallback?: (reference: string, element: HTMLElement) => any,
defaultEventTarget?: EventListenerObject
): T {
if (conf instanceof HTMLElement || conf instanceof Node) {
// TODO:1110 Check if there is a better way!
// Node to skip(fix for) SVGElement
return conf as T;
} else {
let config: ICreateElementConfig = conf as any;
/**
* Normalize the config for processing
*/
config = config || {};
config.tag = config.tag || "DIV";
refCallback = refCallback || null;
let el: HTMLElement;
if (config.tag.toLowerCase() === "svg" || config.isSVG === true) {
el = window.document.createElementNS("http://www.w3.org/2000/svg", config.tag) as any;
config.isSVG = true;
} else {
el = window.document.createElement(config.tag);
}
/**
* Internal function to parse the data-* values
*/
const parseData = (value: any) => {
if (Blend.isNullOrUndef(value)) {
value = "null";
}
if (Blend.isObject(value) || Blend.isArray(value)) {
return JSON.stringify(value);
} else {
return value;
}
};
if (config.id) {
el.id = config.id;
}
if (config.textContent) {
if (config.isSVG) {
el.textContent = config.textContent;
} else {
el.innerText = config.textContent;
}
}
if (config.htmlContent) {
el.innerHTML = config.htmlContent;
}
if (config.data) {
Blend.forEach(config.data, (item: any, key: string) => {
el.setAttribute("data-" + key, parseData(item));
});
}
if (config.attrs) {
Blend.forEach(config.attrs, (item: any, key: string) => {
if (item !== undefined) {
el.setAttribute(key, parseData(item));
}
});
}
if (config.listeners) {
Blend.forEach(config.listeners, (item: EventListenerOrEventListenerObject, key: string) => {
if (!Blend.isNullOrUndef(item)) {
item = (((item as any) === true ? defaultEventTarget : item) ||
new Function(item as any)) as any;
el.addEventListener(key, item, false);
}
});
}
if (config.css) {
el.setAttribute(
"class",
Blend.wrapInArray(config.css)
.join(" ")
.replace(/\s\s+/g, " ")
);
}
if (config.style) {
const styles: string[] = [];
Blend.forEach(config.style, (rule: string, key: string) => {
if (rule) {
styles.push(`${Blend.dashedCase(key)}:${rule}`);
}
});
const t = styles.join(";");
if (t.length !== 0) {
el.setAttribute("style", t);
}
}
/**
* The children accepts either a function or string/item/items[]
*/
if (config.children) {
// if (Blend.isInstanceOf(config.children, Blend.ui.Collection)) {
// (<Blend.ui.Collection<Blend.dom.Component>>(<any>config).children).renderTo(el);
// } else {
Blend.wrapInArray(config.children).forEach((item: any) => {
if (Blend.isString(item)) {
el.appendChild(window.document.createTextNode(item));
} else if (Blend.isInstanceOf(item, HTMLElement) || Blend.isInstanceOf(item, SVGElement)) {
el.appendChild(item as HTMLElement);
const $el = DOMElement.getElement(item as HTMLElement);
if ($el.getReference() && refCallback) {
refCallback($el.getReference(), item);
}
} else if (!Blend.isNullOrUndef(item)) {
if ((item as IHTMLElementProvider).getElement) {
el.appendChild(item.getElement());
} else {
(item as ICreateElementConfig).isSVG = config.isSVG || false;
el.appendChild(
Dom.createElement(item as ICreateElementConfig, refCallback, defaultEventTarget)
);
}
}
});
// }
}
if (config.reference) {
if (!el.$blend) {
el.$blend = {};
}
el.$blend.reference = config.reference;
if (refCallback) {
refCallback(config.reference, el);
}
}
return el as T;
}
}
}
/**
* Utility class providing various functions to manipulate or
* get information from an HTMLElement|SVGElement this class
* also can be used to create lightweight "components"
*
* @usage
* Use $e() for convenience
*
* @export
* @class DOMElement
* @implements {IHTMLElementProvider}
*/
export class DOMElement implements IHTMLElementProvider {
/**
* Wraps an HTMLElement within a Blend.dom.Element for easy manipulation
*
* @export
* @param {HTMLElement} el
* @returns {Blend.dom.Element}
*/
public static getElement(el: HTMLElement): DOMElement {
return new DOMElement(el);
}
/**
* Internal reference to the HTMLElement
*
* @protected
* @type {HTMLElement}
* @memberof Element
*/
protected el: HTMLElement;
/**
* Creates an instance of Element.
* @param {(HTMLElement | string | ICreateElementConfig)} [el]
* @memberof Element
*/
public constructor(el?: HTMLElement | string | ICreateElementConfig) {
const me = this;
me.el = me.renderElement(el);
}
/**
* Internal method that is used to parse and render the HTMLElement.
*
* @protected
* @param {(HTMLElement | string | ICreateElementConfig)} [el]
* @returns {HTMLElement}
* @memberof DOMElement
*/
protected renderElement(el?: HTMLElement | string | ICreateElementConfig): HTMLElement {
const me = this;
return Dom.createElement(
Blend.isString(el) ? { tag: el as any } : Blend.isNullOrUndef(el) ? {} : (el as any),
(ref: string, elem: HTMLElement) => {
if (ref !== "..") {
(me as any)[ref] = elem;
}
}
);
}
/**
* Checks if the element is of a given type.
*
* @param {string} tag
* @returns {boolean}
* @memberof Element
*/
public isTypeOf(tag: string): boolean {
const me = this;
return tag.toLowerCase() === me.el.tagName.toLowerCase();
}
/**
* Checks if the element contains a given css class.
*
* @param {string} className
* @returns {boolean}
* @memberof Element
*/
public hasClass(className: string): boolean {
const me = this;
if (me.el) {
return me.el.classList.contains(className);
} else {
return false;
}
}
/**
* Renders this Element into a container HTMLElement.
*
* @param {HTMLElement} container
* @memberof Element
*/
public renderTo(container: HTMLElement) {
if (container) {
container.appendChild(this.el);
}
}
/**
* Sets one or more css classes to this Element
* This function also accepts a dictionary.
*
* If the dictionary keys are camel/pascal case, they will be
* converted to dashes and optionally prefixed with the `prefix`
* parameter.
*
* The dictionary values can be:
* - `true` which adds the css rule
* - `false` which removes the css rule
* - `null` or `undefined` which toggles the css rule
* ```
* For example:
* var rules = {
* fooBar:true,
* barBaz:false,
* nunChuck:null
* }
*
* element.setCssClass(rules,'b')
* ```
*
* Will result:
* before:
* `class="b-bar-baz"`
* after:
* `class="b-foo-bar b-nun-chuck"`
*
* @param {(string | Array<string>)} css
* @memberof Element
*/
public setCssClass(css: string | string[] | ICssClassDictionary, prefix?: string) {
const me = this,
selector = (key: string): string => {
const parts = [prefix || "b"].concat((key.replace(/([A-Z])/g, " $1") || "").split(" "));
parts.forEach((part: string, index: number) => {
parts[index] = part.trim().toLocaleLowerCase();
});
return parts.join("-").trim();
};
if (Blend.isObject(css)) {
const rules: ICssClassDictionary = css as any;
Blend.forEach(rules, (value: true | false | null | undefined, key: string) => {
const sel = selector(key);
if (value === true && !me.el.classList.contains(sel)) {
me.el.classList.add(sel);
} else if (value === false) {
me.el.classList.remove(sel);
} else if (value === null || value === undefined) {
if (me.el.classList.contains(sel)) {
me.el.classList.remove(sel);
} else {
me.el.classList.add(sel);
}
}
});
} else {
Blend.wrapInArray(css).forEach((item: string) => {
if (!me.el.classList.contains(item)) {
me.el.classList.add(item);
}
});
}
}
/**
* Gets the size and the window location of this element.
*
* @returns {ClientRect}
* @memberof Element
*/
public getBounds(): ClientRect {
return this.el.getBoundingClientRect();
}
/**
* Returns a reference to the internal HTMLElement
*
* @returns {(HTMLElement | null)}
* @memberof Element
*/
public getElement<T extends HTMLElement>(): T | null {
return this.el as T;
}
/**
* Sets a reference key to be used internally for resolving event event targets
*
* @param {string} value
* @returns {this}
* @memberof Element
*/
public setReference(value: string): this {
this.setData("reference", value);
return this;
}
/**
* Utility method to check whether the element
* has/is of a certain reference
*
* @param {string} value
* @returns {boolean}
* @memberof Element
*/
public isReference(value: string): boolean {
return this.getReference() === value;
}
/**
* Utility method that is used for getting a parent element
* should the current element's $reference have the value of '..'
* This function is used in the event handling system.
*
* @returns {(Blend.dom.Element | null)}
* @memberof Element
*/
public getReferencedParent(): DOMElement | null |
/**
* Finds the first parent element containing the given class name
* or the element itself with the class name.
*
* @param {string} cssClass
* @param {HTMLElement} element
* @returns {HTMLElement}
* @memberof Element
*/
public findParentByClass(cssClass: string): HTMLElement {
const me = this;
let result: HTMLElement = null,
search = me.el;
while (search !== null) {
if (search.classList.contains(cssClass)) {
result = search;
search = null;
} else {
search = search.parentElement;
}
}
return result;
}
/**
* Gets the event target reference key
*
* @returns {string}
* @memberof Element
*/
public getReference(): string {
return this.getData<string>("reference");
}
/**
* Gets an arbitrary data from the HTMLElement
*
* @template T
* @param {string} key
* @param {T} [defaultValue]
* @returns {T}
* @memberof Element
*/
public getData<T>(key: string, defaultValue?: T): T {
const me = this;
if (me.el && me.el.$blend) {
return me.el.$blend[key] || defaultValue;
} else {
return defaultValue;
}
}
/**
* Sets an arbitrary data to the HTMLElement
*
* @param {string} key
* @param {*} value
* @returns {this}
* @memberof Element
*/
public setData(key: string, value: any): this {
const me = this;
if (me.el) {
if (!me.el.$blend) {
me.el.$blend = {};
}
me.el.$blend[key] = value;
}
return this;
}
/**
* Set a UID (unique component id) value for this element
* which can be used to identify this element to a {{Blend.core.Component}}
*
* if no id is provided a automatic id will be generated for this element.
*
* @param {string} [id]
* @returns {this}
* @memberof Element
*/
public setUID(id?: string): this {
const me = this;
me.setData(Component.KEY_UID, id || Blend.ID());
return me;
}
/**
* Gets the UID (unique component id) value that was previously set
* on this element
*
* @returns {string}
* @memberof Element
*/
public getUID(): string {
const me = this;
return me.getData(Component.KEY_UID);
}
}
| {
const me = this,
ref = me.getReference();
if (ref) {
if (ref === ".." && me.el.parentElement) {
return DOMElement.getElement(me.el.parentElement).getReferencedParent();
} else {
return DOMElement.getElement(me.el);
}
} else {
return null;
}
} | identifier_body |
Dom.ts | import { Blend, Component } from "@blendsdk/core";
import { ICreateElementConfig, ICssClassDictionary, IHTMLElementProvider } from "./Types";
// tslint:disable-next-line:no-namespace
export namespace Dom {
/**
* Finds an element using a CSS selector. This method internally uses querySelector.
*
* @export
* @template T
* @param {string} selector
* @param {HTMLElement} [fromRoot]
* @returns {(T | null)}
*/
export function findElement<T extends HTMLElement>(selector: string, fromRoot?: HTMLElement | Document): T | null {
return (fromRoot || document).querySelector(selector) as T;
}
/**
* Clears the contents of an element
*
* @export
* @param {HTMLElement} el
*/
export function clearElement(el: HTMLElement) {
if (el) {
while (el.childNodes.length !== 0) {
el.children[0].parentElement.removeChild(el.children[0]);
}
// perhaps overkill!
el.textContent = "";
el.innerHTML = "";
}
}
/**
* Finds a list of elements using a CSS selector. This method internally uses querySelectorAll.
*
* @export
* @template T
* @param {string} selector
* @param {HTMLElement} [fromRoot]
* @returns {NodeListOf<T>}
*/
export function findElements<T extends HTMLElement & Node>(
selector: string,
fromRoot?: HTMLElement | Document | }
/**
* Utility function for creating en `HTMLElement`.
* The reference callback function `refCallback` can be used
* to assign child elements which have a `reference` to class
* properties.
*
* The windows parameters `win` can be used to create the element from
* a specific `Window` context
*
* @export
* @template T
* @param {(ICreateElementConfig | ICreateElementConfigFunction)} [conf]
* @param {(reference: string, element: HTMLElement) => any} [refCallback]
* @param {Window} [win]
* @returns {T}
*/
export function createElement<T extends HTMLElement>(
conf?: HTMLElement | ICreateElementConfig,
refCallback?: (reference: string, element: HTMLElement) => any,
defaultEventTarget?: EventListenerObject
): T {
if (conf instanceof HTMLElement || conf instanceof Node) {
// TODO:1110 Check if there is a better way!
// Node to skip(fix for) SVGElement
return conf as T;
} else {
let config: ICreateElementConfig = conf as any;
/**
* Normalize the config for processing
*/
config = config || {};
config.tag = config.tag || "DIV";
refCallback = refCallback || null;
let el: HTMLElement;
if (config.tag.toLowerCase() === "svg" || config.isSVG === true) {
el = window.document.createElementNS("http://www.w3.org/2000/svg", config.tag) as any;
config.isSVG = true;
} else {
el = window.document.createElement(config.tag);
}
/**
* Internal function to parse the data-* values
*/
const parseData = (value: any) => {
if (Blend.isNullOrUndef(value)) {
value = "null";
}
if (Blend.isObject(value) || Blend.isArray(value)) {
return JSON.stringify(value);
} else {
return value;
}
};
if (config.id) {
el.id = config.id;
}
if (config.textContent) {
if (config.isSVG) {
el.textContent = config.textContent;
} else {
el.innerText = config.textContent;
}
}
if (config.htmlContent) {
el.innerHTML = config.htmlContent;
}
if (config.data) {
Blend.forEach(config.data, (item: any, key: string) => {
el.setAttribute("data-" + key, parseData(item));
});
}
if (config.attrs) {
Blend.forEach(config.attrs, (item: any, key: string) => {
if (item !== undefined) {
el.setAttribute(key, parseData(item));
}
});
}
if (config.listeners) {
Blend.forEach(config.listeners, (item: EventListenerOrEventListenerObject, key: string) => {
if (!Blend.isNullOrUndef(item)) {
item = (((item as any) === true ? defaultEventTarget : item) ||
new Function(item as any)) as any;
el.addEventListener(key, item, false);
}
});
}
if (config.css) {
el.setAttribute(
"class",
Blend.wrapInArray(config.css)
.join(" ")
.replace(/\s\s+/g, " ")
);
}
if (config.style) {
const styles: string[] = [];
Blend.forEach(config.style, (rule: string, key: string) => {
if (rule) {
styles.push(`${Blend.dashedCase(key)}:${rule}`);
}
});
const t = styles.join(";");
if (t.length !== 0) {
el.setAttribute("style", t);
}
}
/**
* The children accepts either a function or string/item/items[]
*/
if (config.children) {
// if (Blend.isInstanceOf(config.children, Blend.ui.Collection)) {
// (<Blend.ui.Collection<Blend.dom.Component>>(<any>config).children).renderTo(el);
// } else {
Blend.wrapInArray(config.children).forEach((item: any) => {
if (Blend.isString(item)) {
el.appendChild(window.document.createTextNode(item));
} else if (Blend.isInstanceOf(item, HTMLElement) || Blend.isInstanceOf(item, SVGElement)) {
el.appendChild(item as HTMLElement);
const $el = DOMElement.getElement(item as HTMLElement);
if ($el.getReference() && refCallback) {
refCallback($el.getReference(), item);
}
} else if (!Blend.isNullOrUndef(item)) {
if ((item as IHTMLElementProvider).getElement) {
el.appendChild(item.getElement());
} else {
(item as ICreateElementConfig).isSVG = config.isSVG || false;
el.appendChild(
Dom.createElement(item as ICreateElementConfig, refCallback, defaultEventTarget)
);
}
}
});
// }
}
if (config.reference) {
if (!el.$blend) {
el.$blend = {};
}
el.$blend.reference = config.reference;
if (refCallback) {
refCallback(config.reference, el);
}
}
return el as T;
}
}
}
/**
* Utility class providing various functions to manipulate or
* get information from an HTMLElement|SVGElement this class
* also can be used to create lightweight "components"
*
* @usage
* Use $e() for convenience
*
* @export
* @class DOMElement
* @implements {IHTMLElementProvider}
*/
export class DOMElement implements IHTMLElementProvider {
/**
* Wraps an HTMLElement within a Blend.dom.Element for easy manipulation
*
* @export
* @param {HTMLElement} el
* @returns {Blend.dom.Element}
*/
public static getElement(el: HTMLElement): DOMElement {
return new DOMElement(el);
}
/**
* Internal reference to the HTMLElement
*
* @protected
* @type {HTMLElement}
* @memberof Element
*/
protected el: HTMLElement;
/**
* Creates an instance of Element.
* @param {(HTMLElement | string | ICreateElementConfig)} [el]
* @memberof Element
*/
public constructor(el?: HTMLElement | string | ICreateElementConfig) {
const me = this;
me.el = me.renderElement(el);
}
/**
* Internal method that is used to parse and render the HTMLElement.
*
* @protected
* @param {(HTMLElement | string | ICreateElementConfig)} [el]
* @returns {HTMLElement}
* @memberof DOMElement
*/
protected renderElement(el?: HTMLElement | string | ICreateElementConfig): HTMLElement {
const me = this;
return Dom.createElement(
Blend.isString(el) ? { tag: el as any } : Blend.isNullOrUndef(el) ? {} : (el as any),
(ref: string, elem: HTMLElement) => {
if (ref !== "..") {
(me as any)[ref] = elem;
}
}
);
}
/**
* Checks if the element is of a given type.
*
* @param {string} tag
* @returns {boolean}
* @memberof Element
*/
public isTypeOf(tag: string): boolean {
const me = this;
return tag.toLowerCase() === me.el.tagName.toLowerCase();
}
/**
* Checks if the element contains a given css class.
*
* @param {string} className
* @returns {boolean}
* @memberof Element
*/
public hasClass(className: string): boolean {
const me = this;
if (me.el) {
return me.el.classList.contains(className);
} else {
return false;
}
}
/**
* Renders this Element into a container HTMLElement.
*
* @param {HTMLElement} container
* @memberof Element
*/
public renderTo(container: HTMLElement) {
if (container) {
container.appendChild(this.el);
}
}
/**
* Sets one or more css classes to this Element
* This function also accepts a dictionary.
*
* If the dictionary keys are camel/pascal case, they will be
* converted to dashes and optionally prefixed with the `prefix`
* parameter.
*
* The dictionary values can be:
* - `true` which adds the css rule
* - `false` which removes the css rule
* - `null` or `undefined` which toggles the css rule
* ```
* For example:
* var rules = {
* fooBar:true,
* barBaz:false,
* nunChuck:null
* }
*
* element.setCssClass(rules,'b')
* ```
*
* Will result:
* before:
* `class="b-bar-baz"`
* after:
* `class="b-foo-bar b-nun-chuck"`
*
* @param {(string | Array<string>)} css
* @memberof Element
*/
public setCssClass(css: string | string[] | ICssClassDictionary, prefix?: string) {
const me = this,
selector = (key: string): string => {
const parts = [prefix || "b"].concat((key.replace(/([A-Z])/g, " $1") || "").split(" "));
parts.forEach((part: string, index: number) => {
parts[index] = part.trim().toLocaleLowerCase();
});
return parts.join("-").trim();
};
if (Blend.isObject(css)) {
const rules: ICssClassDictionary = css as any;
Blend.forEach(rules, (value: true | false | null | undefined, key: string) => {
const sel = selector(key);
if (value === true && !me.el.classList.contains(sel)) {
me.el.classList.add(sel);
} else if (value === false) {
me.el.classList.remove(sel);
} else if (value === null || value === undefined) {
if (me.el.classList.contains(sel)) {
me.el.classList.remove(sel);
} else {
me.el.classList.add(sel);
}
}
});
} else {
Blend.wrapInArray(css).forEach((item: string) => {
if (!me.el.classList.contains(item)) {
me.el.classList.add(item);
}
});
}
}
/**
* Gets the size and the window location of this element.
*
* @returns {ClientRect}
* @memberof Element
*/
public getBounds(): ClientRect {
return this.el.getBoundingClientRect();
}
/**
* Returns a reference to the internal HTMLElement
*
* @returns {(HTMLElement | null)}
* @memberof Element
*/
public getElement<T extends HTMLElement>(): T | null {
return this.el as T;
}
/**
* Sets a reference key to be used internally for resolving event event targets
*
* @param {string} value
* @returns {this}
* @memberof Element
*/
public setReference(value: string): this {
this.setData("reference", value);
return this;
}
/**
* Utility method to check whether the element
* has/is of a certain reference
*
* @param {string} value
* @returns {boolean}
* @memberof Element
*/
public isReference(value: string): boolean {
return this.getReference() === value;
}
/**
* Utility method that is used for getting a parent element
* should the current element's $reference have the value of '..'
* This function is used in the event handling system.
*
* @returns {(Blend.dom.Element | null)}
* @memberof Element
*/
public getReferencedParent(): DOMElement | null {
const me = this,
ref = me.getReference();
if (ref) {
if (ref === ".." && me.el.parentElement) {
return DOMElement.getElement(me.el.parentElement).getReferencedParent();
} else {
return DOMElement.getElement(me.el);
}
} else {
return null;
}
}
/**
* Finds the first parent element containing the given class name
* or the element itself with the class name.
*
* @param {string} cssClass
* @param {HTMLElement} element
* @returns {HTMLElement}
* @memberof Element
*/
public findParentByClass(cssClass: string): HTMLElement {
const me = this;
let result: HTMLElement = null,
search = me.el;
while (search !== null) {
if (search.classList.contains(cssClass)) {
result = search;
search = null;
} else {
search = search.parentElement;
}
}
return result;
}
/**
* Gets the event target reference key
*
* @returns {string}
* @memberof Element
*/
public getReference(): string {
return this.getData<string>("reference");
}
/**
* Gets an arbitrary data from the HTMLElement
*
* @template T
* @param {string} key
* @param {T} [defaultValue]
* @returns {T}
* @memberof Element
*/
public getData<T>(key: string, defaultValue?: T): T {
const me = this;
if (me.el && me.el.$blend) {
return me.el.$blend[key] || defaultValue;
} else {
return defaultValue;
}
}
/**
* Sets an arbitrary data to the HTMLElement
*
* @param {string} key
* @param {*} value
* @returns {this}
* @memberof Element
*/
public setData(key: string, value: any): this {
const me = this;
if (me.el) {
if (!me.el.$blend) {
me.el.$blend = {};
}
me.el.$blend[key] = value;
}
return this;
}
/**
* Set a UID (unique component id) value for this element
* which can be used to identify this element to a {{Blend.core.Component}}
*
* if no id is provided a automatic id will be generated for this element.
*
* @param {string} [id]
* @returns {this}
* @memberof Element
*/
public setUID(id?: string): this {
const me = this;
me.setData(Component.KEY_UID, id || Blend.ID());
return me;
}
/**
* Gets the UID (unique component id) value that was previously set
* on this element
*
* @returns {string}
* @memberof Element
*/
public getUID(): string {
const me = this;
return me.getData(Component.KEY_UID);
}
} | ): NodeListOf<T> {
return (fromRoot || document).querySelectorAll(selector) as NodeListOf<T>; | random_line_split |
Dom.ts | import { Blend, Component } from "@blendsdk/core";
import { ICreateElementConfig, ICssClassDictionary, IHTMLElementProvider } from "./Types";
// tslint:disable-next-line:no-namespace
export namespace Dom {
/**
* Finds an element using a CSS selector. This method internally uses querySelector.
*
* @export
* @template T
* @param {string} selector
* @param {HTMLElement} [fromRoot]
* @returns {(T | null)}
*/
export function findElement<T extends HTMLElement>(selector: string, fromRoot?: HTMLElement | Document): T | null {
return (fromRoot || document).querySelector(selector) as T;
}
/**
* Clears the contents of an element
*
* @export
* @param {HTMLElement} el
*/
export function clearElement(el: HTMLElement) {
if (el) {
while (el.childNodes.length !== 0) {
el.children[0].parentElement.removeChild(el.children[0]);
}
// perhaps overkill!
el.textContent = "";
el.innerHTML = "";
}
}
/**
* Finds a list of elements using a CSS selector. This method internally uses querySelectorAll.
*
* @export
* @template T
* @param {string} selector
* @param {HTMLElement} [fromRoot]
* @returns {NodeListOf<T>}
*/
export function findElements<T extends HTMLElement & Node>(
selector: string,
fromRoot?: HTMLElement | Document
): NodeListOf<T> {
return (fromRoot || document).querySelectorAll(selector) as NodeListOf<T>;
}
/**
* Utility function for creating en `HTMLElement`.
* The reference callback function `refCallback` can be used
* to assign child elements which have a `reference` to class
* properties.
*
* The windows parameters `win` can be used to create the element from
* a specific `Window` context
*
* @export
* @template T
* @param {(ICreateElementConfig | ICreateElementConfigFunction)} [conf]
* @param {(reference: string, element: HTMLElement) => any} [refCallback]
* @param {Window} [win]
* @returns {T}
*/
export function createElement<T extends HTMLElement>(
conf?: HTMLElement | ICreateElementConfig,
refCallback?: (reference: string, element: HTMLElement) => any,
defaultEventTarget?: EventListenerObject
): T {
if (conf instanceof HTMLElement || conf instanceof Node) {
// TODO:1110 Check if there is a better way!
// Node to skip(fix for) SVGElement
return conf as T;
} else {
let config: ICreateElementConfig = conf as any;
/**
* Normalize the config for processing
*/
config = config || {};
config.tag = config.tag || "DIV";
refCallback = refCallback || null;
let el: HTMLElement;
if (config.tag.toLowerCase() === "svg" || config.isSVG === true) {
el = window.document.createElementNS("http://www.w3.org/2000/svg", config.tag) as any;
config.isSVG = true;
} else {
el = window.document.createElement(config.tag);
}
/**
* Internal function to parse the data-* values
*/
const parseData = (value: any) => {
if (Blend.isNullOrUndef(value)) {
value = "null";
}
if (Blend.isObject(value) || Blend.isArray(value)) {
return JSON.stringify(value);
} else {
return value;
}
};
if (config.id) {
el.id = config.id;
}
if (config.textContent) {
if (config.isSVG) {
el.textContent = config.textContent;
} else {
el.innerText = config.textContent;
}
}
if (config.htmlContent) {
el.innerHTML = config.htmlContent;
}
if (config.data) {
Blend.forEach(config.data, (item: any, key: string) => {
el.setAttribute("data-" + key, parseData(item));
});
}
if (config.attrs) {
Blend.forEach(config.attrs, (item: any, key: string) => {
if (item !== undefined) {
el.setAttribute(key, parseData(item));
}
});
}
if (config.listeners) {
Blend.forEach(config.listeners, (item: EventListenerOrEventListenerObject, key: string) => {
if (!Blend.isNullOrUndef(item)) {
item = (((item as any) === true ? defaultEventTarget : item) ||
new Function(item as any)) as any;
el.addEventListener(key, item, false);
}
});
}
if (config.css) {
el.setAttribute(
"class",
Blend.wrapInArray(config.css)
.join(" ")
.replace(/\s\s+/g, " ")
);
}
if (config.style) {
const styles: string[] = [];
Blend.forEach(config.style, (rule: string, key: string) => {
if (rule) {
styles.push(`${Blend.dashedCase(key)}:${rule}`);
}
});
const t = styles.join(";");
if (t.length !== 0) {
el.setAttribute("style", t);
}
}
/**
* The children accepts either a function or string/item/items[]
*/
if (config.children) {
// if (Blend.isInstanceOf(config.children, Blend.ui.Collection)) {
// (<Blend.ui.Collection<Blend.dom.Component>>(<any>config).children).renderTo(el);
// } else {
Blend.wrapInArray(config.children).forEach((item: any) => {
if (Blend.isString(item)) {
el.appendChild(window.document.createTextNode(item));
} else if (Blend.isInstanceOf(item, HTMLElement) || Blend.isInstanceOf(item, SVGElement)) {
el.appendChild(item as HTMLElement);
const $el = DOMElement.getElement(item as HTMLElement);
if ($el.getReference() && refCallback) {
refCallback($el.getReference(), item);
}
} else if (!Blend.isNullOrUndef(item)) {
if ((item as IHTMLElementProvider).getElement) {
el.appendChild(item.getElement());
} else {
(item as ICreateElementConfig).isSVG = config.isSVG || false;
el.appendChild(
Dom.createElement(item as ICreateElementConfig, refCallback, defaultEventTarget)
);
}
}
});
// }
}
if (config.reference) {
if (!el.$blend) {
el.$blend = {};
}
el.$blend.reference = config.reference;
if (refCallback) {
refCallback(config.reference, el);
}
}
return el as T;
}
}
}
/**
* Utility class providing various functions to manipulate or
* get information from an HTMLElement|SVGElement this class
* also can be used to create lightweight "components"
*
* @usage
* Use $e() for convenience
*
* @export
* @class DOMElement
* @implements {IHTMLElementProvider}
*/
export class DOMElement implements IHTMLElementProvider {
/**
* Wraps an HTMLElement within a Blend.dom.Element for easy manipulation
*
* @export
* @param {HTMLElement} el
* @returns {Blend.dom.Element}
*/
public static getElement(el: HTMLElement): DOMElement {
return new DOMElement(el);
}
/**
* Internal reference to the HTMLElement
*
* @protected
* @type {HTMLElement}
* @memberof Element
*/
protected el: HTMLElement;
/**
* Creates an instance of Element.
* @param {(HTMLElement | string | ICreateElementConfig)} [el]
* @memberof Element
*/
public constructor(el?: HTMLElement | string | ICreateElementConfig) {
const me = this;
me.el = me.renderElement(el);
}
/**
* Internal method that is used to parse and render the HTMLElement.
*
* @protected
* @param {(HTMLElement | string | ICreateElementConfig)} [el]
* @returns {HTMLElement}
* @memberof DOMElement
*/
protected renderElement(el?: HTMLElement | string | ICreateElementConfig): HTMLElement {
const me = this;
return Dom.createElement(
Blend.isString(el) ? { tag: el as any } : Blend.isNullOrUndef(el) ? {} : (el as any),
(ref: string, elem: HTMLElement) => {
if (ref !== "..") {
(me as any)[ref] = elem;
}
}
);
}
/**
* Checks if the element is of a given type.
*
* @param {string} tag
* @returns {boolean}
* @memberof Element
*/
public isTypeOf(tag: string): boolean {
const me = this;
return tag.toLowerCase() === me.el.tagName.toLowerCase();
}
/**
* Checks if the element contains a given css class.
*
* @param {string} className
* @returns {boolean}
* @memberof Element
*/
public hasClass(className: string): boolean {
const me = this;
if (me.el) {
return me.el.classList.contains(className);
} else {
return false;
}
}
/**
* Renders this Element into a container HTMLElement.
*
* @param {HTMLElement} container
* @memberof Element
*/
public renderTo(container: HTMLElement) {
if (container) {
container.appendChild(this.el);
}
}
/**
* Sets one or more css classes to this Element
* This function also accepts a dictionary.
*
* If the dictionary keys are camel/pascal case, they will be
* converted to dashes and optionally prefixed with the `prefix`
* parameter.
*
* The dictionary values can be:
* - `true` which adds the css rule
* - `false` which removes the css rule
* - `null` or `undefined` which toggles the css rule
* ```
* For example:
* var rules = {
* fooBar:true,
* barBaz:false,
* nunChuck:null
* }
*
* element.setCssClass(rules,'b')
* ```
*
* Will result:
* before:
* `class="b-bar-baz"`
* after:
* `class="b-foo-bar b-nun-chuck"`
*
* @param {(string | Array<string>)} css
* @memberof Element
*/
public setCssClass(css: string | string[] | ICssClassDictionary, prefix?: string) {
const me = this,
selector = (key: string): string => {
const parts = [prefix || "b"].concat((key.replace(/([A-Z])/g, " $1") || "").split(" "));
parts.forEach((part: string, index: number) => {
parts[index] = part.trim().toLocaleLowerCase();
});
return parts.join("-").trim();
};
if (Blend.isObject(css)) {
const rules: ICssClassDictionary = css as any;
Blend.forEach(rules, (value: true | false | null | undefined, key: string) => {
const sel = selector(key);
if (value === true && !me.el.classList.contains(sel)) {
me.el.classList.add(sel);
} else if (value === false) {
me.el.classList.remove(sel);
} else if (value === null || value === undefined) {
if (me.el.classList.contains(sel)) {
me.el.classList.remove(sel);
} else {
me.el.classList.add(sel);
}
}
});
} else {
Blend.wrapInArray(css).forEach((item: string) => {
if (!me.el.classList.contains(item)) {
me.el.classList.add(item);
}
});
}
}
/**
* Gets the size and the window location of this element.
*
* @returns {ClientRect}
* @memberof Element
*/
public getBounds(): ClientRect {
return this.el.getBoundingClientRect();
}
/**
* Returns a reference to the internal HTMLElement
*
* @returns {(HTMLElement | null)}
* @memberof Element
*/
public getElement<T extends HTMLElement>(): T | null {
return this.el as T;
}
/**
* Sets a reference key to be used internally for resolving event event targets
*
* @param {string} value
* @returns {this}
* @memberof Element
*/
public setReference(value: string): this {
this.setData("reference", value);
return this;
}
/**
* Utility method to check whether the element
* has/is of a certain reference
*
* @param {string} value
* @returns {boolean}
* @memberof Element
*/
public isReference(value: string): boolean {
return this.getReference() === value;
}
/**
* Utility method that is used for getting a parent element
* should the current element's $reference have the value of '..'
* This function is used in the event handling system.
*
* @returns {(Blend.dom.Element | null)}
* @memberof Element
*/
public getReferencedParent(): DOMElement | null {
const me = this,
ref = me.getReference();
if (ref) {
if (ref === ".." && me.el.parentElement) | else {
return DOMElement.getElement(me.el);
}
} else {
return null;
}
}
/**
* Finds the first parent element containing the given class name
* or the element itself with the class name.
*
* @param {string} cssClass
* @param {HTMLElement} element
* @returns {HTMLElement}
* @memberof Element
*/
public findParentByClass(cssClass: string): HTMLElement {
const me = this;
let result: HTMLElement = null,
search = me.el;
while (search !== null) {
if (search.classList.contains(cssClass)) {
result = search;
search = null;
} else {
search = search.parentElement;
}
}
return result;
}
/**
* Gets the event target reference key
*
* @returns {string}
* @memberof Element
*/
public getReference(): string {
return this.getData<string>("reference");
}
/**
* Gets an arbitrary data from the HTMLElement
*
* @template T
* @param {string} key
* @param {T} [defaultValue]
* @returns {T}
* @memberof Element
*/
public getData<T>(key: string, defaultValue?: T): T {
const me = this;
if (me.el && me.el.$blend) {
return me.el.$blend[key] || defaultValue;
} else {
return defaultValue;
}
}
/**
* Sets an arbitrary data to the HTMLElement
*
* @param {string} key
* @param {*} value
* @returns {this}
* @memberof Element
*/
public setData(key: string, value: any): this {
const me = this;
if (me.el) {
if (!me.el.$blend) {
me.el.$blend = {};
}
me.el.$blend[key] = value;
}
return this;
}
/**
* Set a UID (unique component id) value for this element
* which can be used to identify this element to a {{Blend.core.Component}}
*
* if no id is provided a automatic id will be generated for this element.
*
* @param {string} [id]
* @returns {this}
* @memberof Element
*/
public setUID(id?: string): this {
const me = this;
me.setData(Component.KEY_UID, id || Blend.ID());
return me;
}
/**
* Gets the UID (unique component id) value that was previously set
* on this element
*
* @returns {string}
* @memberof Element
*/
public getUID(): string {
const me = this;
return me.getData(Component.KEY_UID);
}
}
| {
return DOMElement.getElement(me.el.parentElement).getReferencedParent();
} | conditional_block |
Dom.ts | import { Blend, Component } from "@blendsdk/core";
import { ICreateElementConfig, ICssClassDictionary, IHTMLElementProvider } from "./Types";
// tslint:disable-next-line:no-namespace
export namespace Dom {
/**
* Finds an element using a CSS selector. This method internally uses querySelector.
*
* @export
* @template T
* @param {string} selector
* @param {HTMLElement} [fromRoot]
* @returns {(T | null)}
*/
export function findElement<T extends HTMLElement>(selector: string, fromRoot?: HTMLElement | Document): T | null {
return (fromRoot || document).querySelector(selector) as T;
}
/**
* Clears the contents of an element
*
* @export
* @param {HTMLElement} el
*/
export function clearElement(el: HTMLElement) {
if (el) {
while (el.childNodes.length !== 0) {
el.children[0].parentElement.removeChild(el.children[0]);
}
// perhaps overkill!
el.textContent = "";
el.innerHTML = "";
}
}
/**
* Finds a list of elements using a CSS selector. This method internally uses querySelectorAll.
*
* @export
* @template T
* @param {string} selector
* @param {HTMLElement} [fromRoot]
* @returns {NodeListOf<T>}
*/
export function findElements<T extends HTMLElement & Node>(
selector: string,
fromRoot?: HTMLElement | Document
): NodeListOf<T> {
return (fromRoot || document).querySelectorAll(selector) as NodeListOf<T>;
}
/**
* Utility function for creating en `HTMLElement`.
* The reference callback function `refCallback` can be used
* to assign child elements which have a `reference` to class
* properties.
*
* The windows parameters `win` can be used to create the element from
* a specific `Window` context
*
* @export
* @template T
* @param {(ICreateElementConfig | ICreateElementConfigFunction)} [conf]
* @param {(reference: string, element: HTMLElement) => any} [refCallback]
* @param {Window} [win]
* @returns {T}
*/
export function createElement<T extends HTMLElement>(
conf?: HTMLElement | ICreateElementConfig,
refCallback?: (reference: string, element: HTMLElement) => any,
defaultEventTarget?: EventListenerObject
): T {
if (conf instanceof HTMLElement || conf instanceof Node) {
// TODO:1110 Check if there is a better way!
// Node to skip(fix for) SVGElement
return conf as T;
} else {
let config: ICreateElementConfig = conf as any;
/**
* Normalize the config for processing
*/
config = config || {};
config.tag = config.tag || "DIV";
refCallback = refCallback || null;
let el: HTMLElement;
if (config.tag.toLowerCase() === "svg" || config.isSVG === true) {
el = window.document.createElementNS("http://www.w3.org/2000/svg", config.tag) as any;
config.isSVG = true;
} else {
el = window.document.createElement(config.tag);
}
/**
* Internal function to parse the data-* values
*/
const parseData = (value: any) => {
if (Blend.isNullOrUndef(value)) {
value = "null";
}
if (Blend.isObject(value) || Blend.isArray(value)) {
return JSON.stringify(value);
} else {
return value;
}
};
if (config.id) {
el.id = config.id;
}
if (config.textContent) {
if (config.isSVG) {
el.textContent = config.textContent;
} else {
el.innerText = config.textContent;
}
}
if (config.htmlContent) {
el.innerHTML = config.htmlContent;
}
if (config.data) {
Blend.forEach(config.data, (item: any, key: string) => {
el.setAttribute("data-" + key, parseData(item));
});
}
if (config.attrs) {
Blend.forEach(config.attrs, (item: any, key: string) => {
if (item !== undefined) {
el.setAttribute(key, parseData(item));
}
});
}
if (config.listeners) {
Blend.forEach(config.listeners, (item: EventListenerOrEventListenerObject, key: string) => {
if (!Blend.isNullOrUndef(item)) {
item = (((item as any) === true ? defaultEventTarget : item) ||
new Function(item as any)) as any;
el.addEventListener(key, item, false);
}
});
}
if (config.css) {
el.setAttribute(
"class",
Blend.wrapInArray(config.css)
.join(" ")
.replace(/\s\s+/g, " ")
);
}
if (config.style) {
const styles: string[] = [];
Blend.forEach(config.style, (rule: string, key: string) => {
if (rule) {
styles.push(`${Blend.dashedCase(key)}:${rule}`);
}
});
const t = styles.join(";");
if (t.length !== 0) {
el.setAttribute("style", t);
}
}
/**
* The children accepts either a function or string/item/items[]
*/
if (config.children) {
// if (Blend.isInstanceOf(config.children, Blend.ui.Collection)) {
// (<Blend.ui.Collection<Blend.dom.Component>>(<any>config).children).renderTo(el);
// } else {
Blend.wrapInArray(config.children).forEach((item: any) => {
if (Blend.isString(item)) {
el.appendChild(window.document.createTextNode(item));
} else if (Blend.isInstanceOf(item, HTMLElement) || Blend.isInstanceOf(item, SVGElement)) {
el.appendChild(item as HTMLElement);
const $el = DOMElement.getElement(item as HTMLElement);
if ($el.getReference() && refCallback) {
refCallback($el.getReference(), item);
}
} else if (!Blend.isNullOrUndef(item)) {
if ((item as IHTMLElementProvider).getElement) {
el.appendChild(item.getElement());
} else {
(item as ICreateElementConfig).isSVG = config.isSVG || false;
el.appendChild(
Dom.createElement(item as ICreateElementConfig, refCallback, defaultEventTarget)
);
}
}
});
// }
}
if (config.reference) {
if (!el.$blend) {
el.$blend = {};
}
el.$blend.reference = config.reference;
if (refCallback) {
refCallback(config.reference, el);
}
}
return el as T;
}
}
}
/**
* Utility class providing various functions to manipulate or
* get information from an HTMLElement|SVGElement this class
* also can be used to create lightweight "components"
*
* @usage
* Use $e() for convenience
*
* @export
* @class DOMElement
* @implements {IHTMLElementProvider}
*/
export class DOMElement implements IHTMLElementProvider {
/**
* Wraps an HTMLElement within a Blend.dom.Element for easy manipulation
*
* @export
* @param {HTMLElement} el
* @returns {Blend.dom.Element}
*/
public static getElement(el: HTMLElement): DOMElement {
return new DOMElement(el);
}
/**
* Internal reference to the HTMLElement
*
* @protected
* @type {HTMLElement}
* @memberof Element
*/
protected el: HTMLElement;
/**
* Creates an instance of Element.
* @param {(HTMLElement | string | ICreateElementConfig)} [el]
* @memberof Element
*/
public | (el?: HTMLElement | string | ICreateElementConfig) {
const me = this;
me.el = me.renderElement(el);
}
/**
* Internal method that is used to parse and render the HTMLElement.
*
* @protected
* @param {(HTMLElement | string | ICreateElementConfig)} [el]
* @returns {HTMLElement}
* @memberof DOMElement
*/
protected renderElement(el?: HTMLElement | string | ICreateElementConfig): HTMLElement {
const me = this;
return Dom.createElement(
Blend.isString(el) ? { tag: el as any } : Blend.isNullOrUndef(el) ? {} : (el as any),
(ref: string, elem: HTMLElement) => {
if (ref !== "..") {
(me as any)[ref] = elem;
}
}
);
}
/**
* Checks if the element is of a given type.
*
* @param {string} tag
* @returns {boolean}
* @memberof Element
*/
public isTypeOf(tag: string): boolean {
const me = this;
return tag.toLowerCase() === me.el.tagName.toLowerCase();
}
/**
* Checks if the element contains a given css class.
*
* @param {string} className
* @returns {boolean}
* @memberof Element
*/
public hasClass(className: string): boolean {
const me = this;
if (me.el) {
return me.el.classList.contains(className);
} else {
return false;
}
}
/**
* Renders this Element into a container HTMLElement.
*
* @param {HTMLElement} container
* @memberof Element
*/
public renderTo(container: HTMLElement) {
if (container) {
container.appendChild(this.el);
}
}
/**
* Sets one or more css classes to this Element
* This function also accepts a dictionary.
*
* If the dictionary keys are camel/pascal case, they will be
* converted to dashes and optionally prefixed with the `prefix`
* parameter.
*
* The dictionary values can be:
* - `true` which adds the css rule
* - `false` which removes the css rule
* - `null` or `undefined` which toggles the css rule
* ```
* For example:
* var rules = {
* fooBar:true,
* barBaz:false,
* nunChuck:null
* }
*
* element.setCssClass(rules,'b')
* ```
*
* Will result:
* before:
* `class="b-bar-baz"`
* after:
* `class="b-foo-bar b-nun-chuck"`
*
* @param {(string | Array<string>)} css
* @memberof Element
*/
public setCssClass(css: string | string[] | ICssClassDictionary, prefix?: string) {
const me = this,
selector = (key: string): string => {
const parts = [prefix || "b"].concat((key.replace(/([A-Z])/g, " $1") || "").split(" "));
parts.forEach((part: string, index: number) => {
parts[index] = part.trim().toLocaleLowerCase();
});
return parts.join("-").trim();
};
if (Blend.isObject(css)) {
const rules: ICssClassDictionary = css as any;
Blend.forEach(rules, (value: true | false | null | undefined, key: string) => {
const sel = selector(key);
if (value === true && !me.el.classList.contains(sel)) {
me.el.classList.add(sel);
} else if (value === false) {
me.el.classList.remove(sel);
} else if (value === null || value === undefined) {
if (me.el.classList.contains(sel)) {
me.el.classList.remove(sel);
} else {
me.el.classList.add(sel);
}
}
});
} else {
Blend.wrapInArray(css).forEach((item: string) => {
if (!me.el.classList.contains(item)) {
me.el.classList.add(item);
}
});
}
}
/**
* Gets the size and the window location of this element.
*
* @returns {ClientRect}
* @memberof Element
*/
public getBounds(): ClientRect {
return this.el.getBoundingClientRect();
}
/**
* Returns a reference to the internal HTMLElement
*
* @returns {(HTMLElement | null)}
* @memberof Element
*/
public getElement<T extends HTMLElement>(): T | null {
return this.el as T;
}
/**
* Sets a reference key to be used internally for resolving event event targets
*
* @param {string} value
* @returns {this}
* @memberof Element
*/
public setReference(value: string): this {
this.setData("reference", value);
return this;
}
/**
* Utility method to check whether the element
* has/is of a certain reference
*
* @param {string} value
* @returns {boolean}
* @memberof Element
*/
public isReference(value: string): boolean {
return this.getReference() === value;
}
/**
* Utility method that is used for getting a parent element
* should the current element's $reference have the value of '..'
* This function is used in the event handling system.
*
* @returns {(Blend.dom.Element | null)}
* @memberof Element
*/
public getReferencedParent(): DOMElement | null {
const me = this,
ref = me.getReference();
if (ref) {
if (ref === ".." && me.el.parentElement) {
return DOMElement.getElement(me.el.parentElement).getReferencedParent();
} else {
return DOMElement.getElement(me.el);
}
} else {
return null;
}
}
/**
* Finds the first parent element containing the given class name
* or the element itself with the class name.
*
* @param {string} cssClass
* @param {HTMLElement} element
* @returns {HTMLElement}
* @memberof Element
*/
public findParentByClass(cssClass: string): HTMLElement {
const me = this;
let result: HTMLElement = null,
search = me.el;
while (search !== null) {
if (search.classList.contains(cssClass)) {
result = search;
search = null;
} else {
search = search.parentElement;
}
}
return result;
}
/**
* Gets the event target reference key
*
* @returns {string}
* @memberof Element
*/
public getReference(): string {
return this.getData<string>("reference");
}
/**
* Gets an arbitrary data from the HTMLElement
*
* @template T
* @param {string} key
* @param {T} [defaultValue]
* @returns {T}
* @memberof Element
*/
public getData<T>(key: string, defaultValue?: T): T {
const me = this;
if (me.el && me.el.$blend) {
return me.el.$blend[key] || defaultValue;
} else {
return defaultValue;
}
}
/**
* Sets an arbitrary data to the HTMLElement
*
* @param {string} key
* @param {*} value
* @returns {this}
* @memberof Element
*/
public setData(key: string, value: any): this {
const me = this;
if (me.el) {
if (!me.el.$blend) {
me.el.$blend = {};
}
me.el.$blend[key] = value;
}
return this;
}
/**
* Set a UID (unique component id) value for this element
* which can be used to identify this element to a {{Blend.core.Component}}
*
* if no id is provided a automatic id will be generated for this element.
*
* @param {string} [id]
* @returns {this}
* @memberof Element
*/
public setUID(id?: string): this {
const me = this;
me.setData(Component.KEY_UID, id || Blend.ID());
return me;
}
/**
* Gets the UID (unique component id) value that was previously set
* on this element
*
* @returns {string}
* @memberof Element
*/
public getUID(): string {
const me = this;
return me.getData(Component.KEY_UID);
}
}
| constructor | identifier_name |
Career and Job Agent.py | ##Author: Rashmi Varma
##Created: September 28, 2017
##Career and Job Center Agent
##Agent accepts a list of free-form keywords from the command line and outputs Career
##and job opportunities that closely match the input keywords. Agent uses
##k-nearest neighbor algorithm to find matches for any given value of k.
libnames = ['math', 'os', 'operator','matplotlib', 'matplotlib.pyplot']
for libname in libnames:
try:
lib = __import__(libname)
except:
print ("One of the required libraries has not been installed. Please install %s" ,lib)
try:
from bs4 import BeautifulSoup
except:
print("\nCould not import Beautiful Soup library")
try:
import urllib2
except:
print("\nCould not import UrlLib2")
try:
import os.path
except:
print("\nCould not import os path")
try:
import csv
except:
print("\nCould not import csv")
##Checks if the input keyword is present in the dictionary, if not it scrapes
##from websites.
##If input present, it fetches the links associated with the keyword.
def clusterFunc(key):
clusterData = {}
try:
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows[1]==key):
print("Keyword present in lookup file")
for rows in reader:
clusterData['title'] = rows[1]
clusterData['content'] = rows[4]
clusterData['link'] = rows[2]
agent(key, '0')
infile.close()
except:
print("The lookup table has no data. Please perform a search on Step 2 to populate the table")
##Compares the index of the sliced down Jaccard values according to k,
##to their position in the link's list. This makes finding the
##appropriate link to display easier
def list_duplicates_of(seq,item):
start_at = -1
locs = []
while True:
try:
loc = seq.index(item,start_at+1)
except ValueError:
break
else:
locs.append(loc)
start_at = loc
return locs
##def sendToScreen(d,knn_k,link):
## for i in range(0,knn_k):
## index = d[i]
## print(link[index])
## links = link[index]
## print('<a href="{0}">{0}</a>'.format(link))
##Actuator computes which documents are closest to our keyword and retrieves them
def actuator(k1,link, k):
## l = len(k1)
d=[]
d1=[]
knn_k = int(k)
orderedJacc = sorted(k1)
takingK = []
## for x in range(0,k):
takingK = (orderedJacc[:k])
for x in range(0,len(link)):
for k in takingK:
d.append(list_duplicates_of(k1,k))
count=0
for everyd in range(0,len(d)):
if count==knn_k:
break;
else:
dnd=d[everyd]
for nn in dnd:
d1.append(nn)
if len(d1)==knn_k:
break
else:
links = link[nn]
print ("\n",links)
count=count+1
#Here, we calculate Jaccard's distance and send it back to the Analysis
##function of the agent
## http://journocode.com/2016/03/10/similarity-and-distance-part-1/
def JacCal(str1, str2, lenFreq, lenLink, lenKey):
num = float(str1)
den = float(lenKey + lenLink)
deno = den - num
j = num/den
j = 1 - j
j = j*100
j = round(j,2)
return j
##This function sends data from agent about frequency to our
##function which computes Jaccard's distance
def frequencyAnalysis(freq, link, freqLength, lenLink,key,k):
k1 = []
for x in range(0,freqLength):
str1 = freq[x]
str2 = link[x]
jacc = JacCal(str1, str2, freqLength, lenLink, len(key))
k1.append(jacc)
actuator(k1, link,k)
##Agent computes all the details. Agent reads details from our table and computes
##frequency of the keyword's occurence in our retreieved links
def agent(key,k):
filename = 'Dictionary.csv'
title = []
content = []
link = []
freq = []
index = 0
ind = []
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
for rows in reader:
title.append(rows[1])
content.append(rows[4])
link.append(rows[2])
lenTitle = len(title)
lenContent = len(content)
lenLink = len(link)
infile.close()
kk = len(key)
for x in range(0,lenTitle):
countC = 0
for y in range(0,kk):
countC = countC + content[x].count(key[y])
freq.append(countC)
freqLength = len(freq)
frequencyAnalysis(freq, link, freqLength, lenLink,key,k)
##The function used to write to the file
def writeFile(key,title,link,src,content):
filename = 'Dictionary.csv'
lists = [key, title, link, src, content]
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==lists):
print("\n\nAlready present")
infile.close()
else:
with open(filename,'a') as outfile:
try:
writer = csv.writer(outfile,dialect='excel')
writer.writerow(lists)
outfile.close()
except UnicodeError:
pass
##This function is used to retrieve the data from the URL's scrapped.
##Every job search opens to an individual page which contains more details about it.
##This function retrieves those details
def findContent(source, page,source_page):
co = []
urlPage1 = urllib2.urlopen(page)
soup1 = BeautifulSoup(urlPage1, 'html.parser')
urlPageIndeed = urllib2.urlopen(source_page)
soup2 = BeautifulSoup(urlPageIndeed, 'html.parser')
if source=='acm':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
if source=='ieee':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
|
return co
if source == 'indeed':
for everyline in soup2.find_all('span',{'class':'summary','itemprop':'description'}):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
##The scrapper is a web scrapping function which uses BeautifulSoup library
##The scrapper scraps data and saves it to the lookup table for future use
def scrapper(source, page,key,k):
urlPage = urllib2.urlopen(page)
soup = BeautifulSoup(urlPage, 'html.parser')
if source=='acm' or 'ieee':
for row in soup.find_all('h3'):
if hasattr(row, "text"):
title = row.text
for a in row.find_all('a', href=True):
links = page + a['href']
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
if source=='indeed':
for row in soup.find_all('a', {'target' : '_blank', 'data-tn-element' : 'jobTitle'}):
if hasattr(row, "text"):
title = row.text
l = row.get('href')
links = page + l
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
##The sensor is responsible for getting input data to the agent.
## Here, the sensor readies the URL and calls the web scrapping function
##We have currently restricted the reading to 15 values per page. This can be increased but it also increases the execution time of the program
##The program currently takes 3-4 minutes for scrapping a new keyword sequence
def sensor(acm_page, ieee_page, indeed_page, keywords, k,key1):
print("\nGathering data...")
for everyKeyword in keywords:
acm_page = acm_page + everyKeyword
ieee_page = ieee_page + everyKeyword
indeed_page = indeed_page + everyKeyword
if len(keywords) > 1:
acm_page = acm_page + '+'
ieee_page = ieee_page + '+'
indeed_page = indeed_page + '+'
if len(keywords) > 1:
acm_page = acm_page[:-1]
ieee_page = ieee_page[:-1]
indeed_page = indeed_page[:-1]
acm_page = acm_page + '?rows=15'
ieee_page = ieee_page + '?rows=15'
scrapper('acm', acm_page,key1,k)
scrapper('ieee',ieee_page,key1,k)
scrapper('indeed',indeed_page,key1,k)
#The environment creates the url for scrapping data and sends these Url's to the sensor.
#The environment also checks if entered keyword is present in the look up table. Ideally if it is present, it won't send data to the sensor but simply read from look up table
def environment(keywords, k,key1):
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==key1):
print("\n\nAlready present")
infile.close()
acm_page = 'http://jobs.acm.org/jobs/results/keyword/'
ieee_page = 'http://jobs.ieee.org/jobs/results/keyword/'
indeed_page = 'https://www.indeed.com/jobs?q='
sensor(acm_page, ieee_page, indeed_page, keywords, k,key1)
agent(key1, k)
## The code runs continuously till 0 is pressed to quit it.
##On opening, the look up table gets created. If it already exists then we do nothing,
##otherwise we create and write headers to it
## Program can take multiple keywords as input from the user.
##User also takes the value of k here.These values are passed to the environment
def main():
quitFlag=False
filename = 'Dictionary.csv'
file_exists = os.path.isfile(filename)
headers = ['Keyword','Title','Link','Source','Content' ]
with open (filename, 'wb') as csvfile:
dw = csv.DictWriter(csvfile, headers)
dw.writeheader()
if not file_exists:
writer.writeheader()
while quitFlag==False:
keywords = []
key1 = []
keyCounter = 0
try:
x = int(raw_input("\nPlease select one of the options given below: \n0. Quit \n1. Find job ads \n2. Cluster\nYour choice:"))
except:
print("\nChoice entered is not an integer.")
try:
if x==0:
quitFlag==True
break
if x==1:
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
try:
k = int(raw_input("\nPlease enter how many job searches you want to see at a time(k):"))
except:
print("Value of number of job searches needs to be an integer only. Please run the program again and try search again")
break
environment(keywords, k,key1)
quitFlag=False
if x==2:
print("Clustering")
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
clusterFunc(key1)
else:
print("\nPlease input choices again")
quitFlag=False
except:
print("Please enter appropriate values only")
main()
| co.append(everyline.text) | conditional_block |
Career and Job Agent.py | ##Author: Rashmi Varma
##Created: September 28, 2017
##Career and Job Center Agent
##Agent accepts a list of free-form keywords from the command line and outputs Career
##and job opportunities that closely match the input keywords. Agent uses
##k-nearest neighbor algorithm to find matches for any given value of k.
libnames = ['math', 'os', 'operator','matplotlib', 'matplotlib.pyplot']
for libname in libnames:
try:
lib = __import__(libname)
except:
print ("One of the required libraries has not been installed. Please install %s" ,lib)
try:
from bs4 import BeautifulSoup
except:
print("\nCould not import Beautiful Soup library")
try:
import urllib2
except:
print("\nCould not import UrlLib2")
try:
import os.path
except:
print("\nCould not import os path")
try:
import csv
except:
print("\nCould not import csv")
##Checks if the input keyword is present in the dictionary, if not it scrapes
##from websites.
##If input present, it fetches the links associated with the keyword.
def clusterFunc(key):
clusterData = {}
try:
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows[1]==key):
print("Keyword present in lookup file")
for rows in reader:
clusterData['title'] = rows[1]
clusterData['content'] = rows[4]
clusterData['link'] = rows[2]
agent(key, '0')
infile.close()
except:
print("The lookup table has no data. Please perform a search on Step 2 to populate the table")
##Compares the index of the sliced down Jaccard values according to k,
##to their position in the link's list. This makes finding the
##appropriate link to display easier
def list_duplicates_of(seq,item):
start_at = -1
locs = []
while True:
try:
loc = seq.index(item,start_at+1)
except ValueError:
break
else:
locs.append(loc)
start_at = loc
return locs
##def sendToScreen(d,knn_k,link):
## for i in range(0,knn_k):
## index = d[i]
## print(link[index])
## links = link[index]
## print('<a href="{0}">{0}</a>'.format(link))
##Actuator computes which documents are closest to our keyword and retrieves them
def actuator(k1,link, k):
## l = len(k1)
|
#Here, we calculate Jaccard's distance and send it back to the Analysis
##function of the agent
## http://journocode.com/2016/03/10/similarity-and-distance-part-1/
def JacCal(str1, str2, lenFreq, lenLink, lenKey):
num = float(str1)
den = float(lenKey + lenLink)
deno = den - num
j = num/den
j = 1 - j
j = j*100
j = round(j,2)
return j
##This function sends data from agent about frequency to our
##function which computes Jaccard's distance
def frequencyAnalysis(freq, link, freqLength, lenLink,key,k):
k1 = []
for x in range(0,freqLength):
str1 = freq[x]
str2 = link[x]
jacc = JacCal(str1, str2, freqLength, lenLink, len(key))
k1.append(jacc)
actuator(k1, link,k)
##Agent computes all the details. Agent reads details from our table and computes
##frequency of the keyword's occurence in our retreieved links
def agent(key,k):
filename = 'Dictionary.csv'
title = []
content = []
link = []
freq = []
index = 0
ind = []
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
for rows in reader:
title.append(rows[1])
content.append(rows[4])
link.append(rows[2])
lenTitle = len(title)
lenContent = len(content)
lenLink = len(link)
infile.close()
kk = len(key)
for x in range(0,lenTitle):
countC = 0
for y in range(0,kk):
countC = countC + content[x].count(key[y])
freq.append(countC)
freqLength = len(freq)
frequencyAnalysis(freq, link, freqLength, lenLink,key,k)
##The function used to write to the file
def writeFile(key,title,link,src,content):
filename = 'Dictionary.csv'
lists = [key, title, link, src, content]
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==lists):
print("\n\nAlready present")
infile.close()
else:
with open(filename,'a') as outfile:
try:
writer = csv.writer(outfile,dialect='excel')
writer.writerow(lists)
outfile.close()
except UnicodeError:
pass
##This function is used to retrieve the data from the URL's scrapped.
##Every job search opens to an individual page which contains more details about it.
##This function retrieves those details
def findContent(source, page,source_page):
co = []
urlPage1 = urllib2.urlopen(page)
soup1 = BeautifulSoup(urlPage1, 'html.parser')
urlPageIndeed = urllib2.urlopen(source_page)
soup2 = BeautifulSoup(urlPageIndeed, 'html.parser')
if source=='acm':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
if source=='ieee':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
if source == 'indeed':
for everyline in soup2.find_all('span',{'class':'summary','itemprop':'description'}):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
##The scrapper is a web scrapping function which uses BeautifulSoup library
##The scrapper scraps data and saves it to the lookup table for future use
def scrapper(source, page,key,k):
urlPage = urllib2.urlopen(page)
soup = BeautifulSoup(urlPage, 'html.parser')
if source=='acm' or 'ieee':
for row in soup.find_all('h3'):
if hasattr(row, "text"):
title = row.text
for a in row.find_all('a', href=True):
links = page + a['href']
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
if source=='indeed':
for row in soup.find_all('a', {'target' : '_blank', 'data-tn-element' : 'jobTitle'}):
if hasattr(row, "text"):
title = row.text
l = row.get('href')
links = page + l
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
##The sensor is responsible for getting input data to the agent.
## Here, the sensor readies the URL and calls the web scrapping function
##We have currently restricted the reading to 15 values per page. This can be increased but it also increases the execution time of the program
##The program currently takes 3-4 minutes for scrapping a new keyword sequence
def sensor(acm_page, ieee_page, indeed_page, keywords, k,key1):
print("\nGathering data...")
for everyKeyword in keywords:
acm_page = acm_page + everyKeyword
ieee_page = ieee_page + everyKeyword
indeed_page = indeed_page + everyKeyword
if len(keywords) > 1:
acm_page = acm_page + '+'
ieee_page = ieee_page + '+'
indeed_page = indeed_page + '+'
if len(keywords) > 1:
acm_page = acm_page[:-1]
ieee_page = ieee_page[:-1]
indeed_page = indeed_page[:-1]
acm_page = acm_page + '?rows=15'
ieee_page = ieee_page + '?rows=15'
scrapper('acm', acm_page,key1,k)
scrapper('ieee',ieee_page,key1,k)
scrapper('indeed',indeed_page,key1,k)
#The environment creates the url for scrapping data and sends these Url's to the sensor.
#The environment also checks if entered keyword is present in the look up table. Ideally if it is present, it won't send data to the sensor but simply read from look up table
def environment(keywords, k,key1):
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==key1):
print("\n\nAlready present")
infile.close()
acm_page = 'http://jobs.acm.org/jobs/results/keyword/'
ieee_page = 'http://jobs.ieee.org/jobs/results/keyword/'
indeed_page = 'https://www.indeed.com/jobs?q='
sensor(acm_page, ieee_page, indeed_page, keywords, k,key1)
agent(key1, k)
## The code runs continuously till 0 is pressed to quit it.
##On opening, the look up table gets created. If it already exists then we do nothing,
##otherwise we create and write headers to it
## Program can take multiple keywords as input from the user.
##User also takes the value of k here.These values are passed to the environment
def main():
quitFlag=False
filename = 'Dictionary.csv'
file_exists = os.path.isfile(filename)
headers = ['Keyword','Title','Link','Source','Content' ]
with open (filename, 'wb') as csvfile:
dw = csv.DictWriter(csvfile, headers)
dw.writeheader()
if not file_exists:
writer.writeheader()
while quitFlag==False:
keywords = []
key1 = []
keyCounter = 0
try:
x = int(raw_input("\nPlease select one of the options given below: \n0. Quit \n1. Find job ads \n2. Cluster\nYour choice:"))
except:
print("\nChoice entered is not an integer.")
try:
if x==0:
quitFlag==True
break
if x==1:
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
try:
k = int(raw_input("\nPlease enter how many job searches you want to see at a time(k):"))
except:
print("Value of number of job searches needs to be an integer only. Please run the program again and try search again")
break
environment(keywords, k,key1)
quitFlag=False
if x==2:
print("Clustering")
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
clusterFunc(key1)
else:
print("\nPlease input choices again")
quitFlag=False
except:
print("Please enter appropriate values only")
main()
| d=[]
d1=[]
knn_k = int(k)
orderedJacc = sorted(k1)
takingK = []
## for x in range(0,k):
takingK = (orderedJacc[:k])
for x in range(0,len(link)):
for k in takingK:
d.append(list_duplicates_of(k1,k))
count=0
for everyd in range(0,len(d)):
if count==knn_k:
break;
else:
dnd=d[everyd]
for nn in dnd:
d1.append(nn)
if len(d1)==knn_k:
break
else:
links = link[nn]
print ("\n",links)
count=count+1 | identifier_body |
Career and Job Agent.py | ##Author: Rashmi Varma
##Created: September 28, 2017
##Career and Job Center Agent
##Agent accepts a list of free-form keywords from the command line and outputs Career
##and job opportunities that closely match the input keywords. Agent uses
##k-nearest neighbor algorithm to find matches for any given value of k.
libnames = ['math', 'os', 'operator','matplotlib', 'matplotlib.pyplot']
for libname in libnames:
try:
lib = __import__(libname)
except:
print ("One of the required libraries has not been installed. Please install %s" ,lib)
try:
from bs4 import BeautifulSoup
except:
print("\nCould not import Beautiful Soup library")
try:
import urllib2
except:
print("\nCould not import UrlLib2")
try:
import os.path
except:
print("\nCould not import os path")
try:
import csv
except:
print("\nCould not import csv")
##Checks if the input keyword is present in the dictionary, if not it scrapes
##from websites.
##If input present, it fetches the links associated with the keyword.
def clusterFunc(key):
clusterData = {}
try:
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows[1]==key):
print("Keyword present in lookup file")
for rows in reader:
clusterData['title'] = rows[1]
clusterData['content'] = rows[4]
clusterData['link'] = rows[2]
agent(key, '0')
infile.close()
except:
print("The lookup table has no data. Please perform a search on Step 2 to populate the table")
##Compares the index of the sliced down Jaccard values according to k,
##to their position in the link's list. This makes finding the
##appropriate link to display easier
def list_duplicates_of(seq,item):
start_at = -1
locs = []
while True:
try:
loc = seq.index(item,start_at+1)
except ValueError:
break
else:
locs.append(loc)
start_at = loc
return locs
##def sendToScreen(d,knn_k,link):
## for i in range(0,knn_k):
## index = d[i]
## print(link[index])
## links = link[index]
## print('<a href="{0}">{0}</a>'.format(link))
##Actuator computes which documents are closest to our keyword and retrieves them
def actuator(k1,link, k):
## l = len(k1)
d=[]
d1=[]
knn_k = int(k)
orderedJacc = sorted(k1)
takingK = []
## for x in range(0,k):
takingK = (orderedJacc[:k])
for x in range(0,len(link)):
for k in takingK:
d.append(list_duplicates_of(k1,k))
count=0
for everyd in range(0,len(d)):
if count==knn_k:
break;
else:
dnd=d[everyd]
for nn in dnd:
d1.append(nn)
if len(d1)==knn_k:
break
else:
links = link[nn]
print ("\n",links)
count=count+1
#Here, we calculate Jaccard's distance and send it back to the Analysis
##function of the agent
## http://journocode.com/2016/03/10/similarity-and-distance-part-1/
def JacCal(str1, str2, lenFreq, lenLink, lenKey):
num = float(str1)
den = float(lenKey + lenLink)
deno = den - num
j = num/den
j = 1 - j
j = j*100
j = round(j,2)
return j
##This function sends data from agent about frequency to our
##function which computes Jaccard's distance
def frequencyAnalysis(freq, link, freqLength, lenLink,key,k):
k1 = []
for x in range(0,freqLength):
str1 = freq[x]
str2 = link[x]
jacc = JacCal(str1, str2, freqLength, lenLink, len(key))
k1.append(jacc)
actuator(k1, link,k)
##Agent computes all the details. Agent reads details from our table and computes
##frequency of the keyword's occurence in our retreieved links
def | (key,k):
filename = 'Dictionary.csv'
title = []
content = []
link = []
freq = []
index = 0
ind = []
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
for rows in reader:
title.append(rows[1])
content.append(rows[4])
link.append(rows[2])
lenTitle = len(title)
lenContent = len(content)
lenLink = len(link)
infile.close()
kk = len(key)
for x in range(0,lenTitle):
countC = 0
for y in range(0,kk):
countC = countC + content[x].count(key[y])
freq.append(countC)
freqLength = len(freq)
frequencyAnalysis(freq, link, freqLength, lenLink,key,k)
##The function used to write to the file
def writeFile(key,title,link,src,content):
filename = 'Dictionary.csv'
lists = [key, title, link, src, content]
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==lists):
print("\n\nAlready present")
infile.close()
else:
with open(filename,'a') as outfile:
try:
writer = csv.writer(outfile,dialect='excel')
writer.writerow(lists)
outfile.close()
except UnicodeError:
pass
##This function is used to retrieve the data from the URL's scrapped.
##Every job search opens to an individual page which contains more details about it.
##This function retrieves those details
def findContent(source, page,source_page):
co = []
urlPage1 = urllib2.urlopen(page)
soup1 = BeautifulSoup(urlPage1, 'html.parser')
urlPageIndeed = urllib2.urlopen(source_page)
soup2 = BeautifulSoup(urlPageIndeed, 'html.parser')
if source=='acm':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
if source=='ieee':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
if source == 'indeed':
for everyline in soup2.find_all('span',{'class':'summary','itemprop':'description'}):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
##The scrapper is a web scrapping function which uses BeautifulSoup library
##The scrapper scraps data and saves it to the lookup table for future use
def scrapper(source, page,key,k):
urlPage = urllib2.urlopen(page)
soup = BeautifulSoup(urlPage, 'html.parser')
if source=='acm' or 'ieee':
for row in soup.find_all('h3'):
if hasattr(row, "text"):
title = row.text
for a in row.find_all('a', href=True):
links = page + a['href']
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
if source=='indeed':
for row in soup.find_all('a', {'target' : '_blank', 'data-tn-element' : 'jobTitle'}):
if hasattr(row, "text"):
title = row.text
l = row.get('href')
links = page + l
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
##The sensor is responsible for getting input data to the agent.
## Here, the sensor readies the URL and calls the web scrapping function
##We have currently restricted the reading to 15 values per page. This can be increased but it also increases the execution time of the program
##The program currently takes 3-4 minutes for scrapping a new keyword sequence
def sensor(acm_page, ieee_page, indeed_page, keywords, k,key1):
print("\nGathering data...")
for everyKeyword in keywords:
acm_page = acm_page + everyKeyword
ieee_page = ieee_page + everyKeyword
indeed_page = indeed_page + everyKeyword
if len(keywords) > 1:
acm_page = acm_page + '+'
ieee_page = ieee_page + '+'
indeed_page = indeed_page + '+'
if len(keywords) > 1:
acm_page = acm_page[:-1]
ieee_page = ieee_page[:-1]
indeed_page = indeed_page[:-1]
acm_page = acm_page + '?rows=15'
ieee_page = ieee_page + '?rows=15'
scrapper('acm', acm_page,key1,k)
scrapper('ieee',ieee_page,key1,k)
scrapper('indeed',indeed_page,key1,k)
#The environment creates the url for scrapping data and sends these Url's to the sensor.
#The environment also checks if entered keyword is present in the look up table. Ideally if it is present, it won't send data to the sensor but simply read from look up table
def environment(keywords, k,key1):
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==key1):
print("\n\nAlready present")
infile.close()
acm_page = 'http://jobs.acm.org/jobs/results/keyword/'
ieee_page = 'http://jobs.ieee.org/jobs/results/keyword/'
indeed_page = 'https://www.indeed.com/jobs?q='
sensor(acm_page, ieee_page, indeed_page, keywords, k,key1)
agent(key1, k)
## The code runs continuously till 0 is pressed to quit it.
##On opening, the look up table gets created. If it already exists then we do nothing,
##otherwise we create and write headers to it
## Program can take multiple keywords as input from the user.
##User also takes the value of k here.These values are passed to the environment
def main():
quitFlag=False
filename = 'Dictionary.csv'
file_exists = os.path.isfile(filename)
headers = ['Keyword','Title','Link','Source','Content' ]
with open (filename, 'wb') as csvfile:
dw = csv.DictWriter(csvfile, headers)
dw.writeheader()
if not file_exists:
writer.writeheader()
while quitFlag==False:
keywords = []
key1 = []
keyCounter = 0
try:
x = int(raw_input("\nPlease select one of the options given below: \n0. Quit \n1. Find job ads \n2. Cluster\nYour choice:"))
except:
print("\nChoice entered is not an integer.")
try:
if x==0:
quitFlag==True
break
if x==1:
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
try:
k = int(raw_input("\nPlease enter how many job searches you want to see at a time(k):"))
except:
print("Value of number of job searches needs to be an integer only. Please run the program again and try search again")
break
environment(keywords, k,key1)
quitFlag=False
if x==2:
print("Clustering")
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
clusterFunc(key1)
else:
print("\nPlease input choices again")
quitFlag=False
except:
print("Please enter appropriate values only")
main()
| agent | identifier_name |
Career and Job Agent.py | ##Author: Rashmi Varma
##Created: September 28, 2017
##Career and Job Center Agent
##Agent accepts a list of free-form keywords from the command line and outputs Career
##and job opportunities that closely match the input keywords. Agent uses
##k-nearest neighbor algorithm to find matches for any given value of k.
libnames = ['math', 'os', 'operator','matplotlib', 'matplotlib.pyplot']
for libname in libnames:
try:
lib = __import__(libname)
except:
print ("One of the required libraries has not been installed. Please install %s" ,lib)
try:
from bs4 import BeautifulSoup
except:
print("\nCould not import Beautiful Soup library")
try:
import urllib2
except:
print("\nCould not import UrlLib2")
try:
import os.path
except:
print("\nCould not import os path")
try:
import csv
except:
print("\nCould not import csv")
##Checks if the input keyword is present in the dictionary, if not it scrapes
##from websites.
##If input present, it fetches the links associated with the keyword.
def clusterFunc(key):
clusterData = {}
try:
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows[1]==key):
print("Keyword present in lookup file")
for rows in reader:
clusterData['title'] = rows[1]
clusterData['content'] = rows[4]
clusterData['link'] = rows[2]
agent(key, '0')
infile.close()
except:
print("The lookup table has no data. Please perform a search on Step 2 to populate the table")
##Compares the index of the sliced down Jaccard values according to k,
##to their position in the link's list. This makes finding the
##appropriate link to display easier
def list_duplicates_of(seq,item):
start_at = -1
locs = []
while True:
try:
loc = seq.index(item,start_at+1)
except ValueError:
break
else:
locs.append(loc)
start_at = loc
return locs
##def sendToScreen(d,knn_k,link):
## for i in range(0,knn_k):
## index = d[i]
## print(link[index])
## links = link[index]
## print('<a href="{0}">{0}</a>'.format(link))
##Actuator computes which documents are closest to our keyword and retrieves them
def actuator(k1,link, k):
## l = len(k1)
d=[]
d1=[]
knn_k = int(k)
orderedJacc = sorted(k1)
takingK = []
## for x in range(0,k):
takingK = (orderedJacc[:k])
for x in range(0,len(link)):
for k in takingK:
d.append(list_duplicates_of(k1,k))
count=0
for everyd in range(0,len(d)):
if count==knn_k:
break;
else:
dnd=d[everyd]
for nn in dnd:
d1.append(nn)
if len(d1)==knn_k:
break
else:
links = link[nn]
print ("\n",links)
count=count+1
#Here, we calculate Jaccard's distance and send it back to the Analysis
##function of the agent
## http://journocode.com/2016/03/10/similarity-and-distance-part-1/
def JacCal(str1, str2, lenFreq, lenLink, lenKey):
num = float(str1)
den = float(lenKey + lenLink)
deno = den - num
j = num/den
j = 1 - j
j = j*100
j = round(j,2)
return j
##This function sends data from agent about frequency to our
##function which computes Jaccard's distance
def frequencyAnalysis(freq, link, freqLength, lenLink,key,k):
k1 = []
for x in range(0,freqLength):
str1 = freq[x]
str2 = link[x]
jacc = JacCal(str1, str2, freqLength, lenLink, len(key)) | ##frequency of the keyword's occurence in our retreieved links
def agent(key,k):
filename = 'Dictionary.csv'
title = []
content = []
link = []
freq = []
index = 0
ind = []
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
for rows in reader:
title.append(rows[1])
content.append(rows[4])
link.append(rows[2])
lenTitle = len(title)
lenContent = len(content)
lenLink = len(link)
infile.close()
kk = len(key)
for x in range(0,lenTitle):
countC = 0
for y in range(0,kk):
countC = countC + content[x].count(key[y])
freq.append(countC)
freqLength = len(freq)
frequencyAnalysis(freq, link, freqLength, lenLink,key,k)
##The function used to write to the file
def writeFile(key,title,link,src,content):
filename = 'Dictionary.csv'
lists = [key, title, link, src, content]
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==lists):
print("\n\nAlready present")
infile.close()
else:
with open(filename,'a') as outfile:
try:
writer = csv.writer(outfile,dialect='excel')
writer.writerow(lists)
outfile.close()
except UnicodeError:
pass
##This function is used to retrieve the data from the URL's scrapped.
##Every job search opens to an individual page which contains more details about it.
##This function retrieves those details
def findContent(source, page,source_page):
co = []
urlPage1 = urllib2.urlopen(page)
soup1 = BeautifulSoup(urlPage1, 'html.parser')
urlPageIndeed = urllib2.urlopen(source_page)
soup2 = BeautifulSoup(urlPageIndeed, 'html.parser')
if source=='acm':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
if source=='ieee':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
if source == 'indeed':
for everyline in soup2.find_all('span',{'class':'summary','itemprop':'description'}):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
##The scrapper is a web scrapping function which uses BeautifulSoup library
##The scrapper scraps data and saves it to the lookup table for future use
def scrapper(source, page,key,k):
urlPage = urllib2.urlopen(page)
soup = BeautifulSoup(urlPage, 'html.parser')
if source=='acm' or 'ieee':
for row in soup.find_all('h3'):
if hasattr(row, "text"):
title = row.text
for a in row.find_all('a', href=True):
links = page + a['href']
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
if source=='indeed':
for row in soup.find_all('a', {'target' : '_blank', 'data-tn-element' : 'jobTitle'}):
if hasattr(row, "text"):
title = row.text
l = row.get('href')
links = page + l
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
##The sensor is responsible for getting input data to the agent.
## Here, the sensor readies the URL and calls the web scrapping function
##We have currently restricted the reading to 15 values per page. This can be increased but it also increases the execution time of the program
##The program currently takes 3-4 minutes for scrapping a new keyword sequence
def sensor(acm_page, ieee_page, indeed_page, keywords, k,key1):
print("\nGathering data...")
for everyKeyword in keywords:
acm_page = acm_page + everyKeyword
ieee_page = ieee_page + everyKeyword
indeed_page = indeed_page + everyKeyword
if len(keywords) > 1:
acm_page = acm_page + '+'
ieee_page = ieee_page + '+'
indeed_page = indeed_page + '+'
if len(keywords) > 1:
acm_page = acm_page[:-1]
ieee_page = ieee_page[:-1]
indeed_page = indeed_page[:-1]
acm_page = acm_page + '?rows=15'
ieee_page = ieee_page + '?rows=15'
scrapper('acm', acm_page,key1,k)
scrapper('ieee',ieee_page,key1,k)
scrapper('indeed',indeed_page,key1,k)
#The environment creates the url for scrapping data and sends these Url's to the sensor.
#The environment also checks if entered keyword is present in the look up table. Ideally if it is present, it won't send data to the sensor but simply read from look up table
def environment(keywords, k,key1):
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==key1):
print("\n\nAlready present")
infile.close()
acm_page = 'http://jobs.acm.org/jobs/results/keyword/'
ieee_page = 'http://jobs.ieee.org/jobs/results/keyword/'
indeed_page = 'https://www.indeed.com/jobs?q='
sensor(acm_page, ieee_page, indeed_page, keywords, k,key1)
agent(key1, k)
## The code runs continuously till 0 is pressed to quit it.
##On opening, the look up table gets created. If it already exists then we do nothing,
##otherwise we create and write headers to it
## Program can take multiple keywords as input from the user.
##User also takes the value of k here.These values are passed to the environment
def main():
quitFlag=False
filename = 'Dictionary.csv'
file_exists = os.path.isfile(filename)
headers = ['Keyword','Title','Link','Source','Content' ]
with open (filename, 'wb') as csvfile:
dw = csv.DictWriter(csvfile, headers)
dw.writeheader()
if not file_exists:
writer.writeheader()
while quitFlag==False:
keywords = []
key1 = []
keyCounter = 0
try:
x = int(raw_input("\nPlease select one of the options given below: \n0. Quit \n1. Find job ads \n2. Cluster\nYour choice:"))
except:
print("\nChoice entered is not an integer.")
try:
if x==0:
quitFlag==True
break
if x==1:
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
try:
k = int(raw_input("\nPlease enter how many job searches you want to see at a time(k):"))
except:
print("Value of number of job searches needs to be an integer only. Please run the program again and try search again")
break
environment(keywords, k,key1)
quitFlag=False
if x==2:
print("Clustering")
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
clusterFunc(key1)
else:
print("\nPlease input choices again")
quitFlag=False
except:
print("Please enter appropriate values only")
main() | k1.append(jacc)
actuator(k1, link,k)
##Agent computes all the details. Agent reads details from our table and computes | random_line_split |
implicit.go | package teams
import (
"fmt"
"sort"
"strings"
lru "github.com/hashicorp/golang-lru"
"github.com/keybase/client/go/kbfs/tlf"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"golang.org/x/net/context"
)
type implicitTeamConflict struct {
// Note this TeamID is not validated by LookupImplicitTeam. Be aware of server trust.
TeamID keybase1.TeamID `json:"team_id"`
Generation int `json:"generation"`
ConflictDate string `json:"conflict_date"`
}
func (i *implicitTeamConflict) parse() (*keybase1.ImplicitTeamConflictInfo, error) {
return libkb.ParseImplicitTeamDisplayNameSuffix(fmt.Sprintf("(conflicted copy %s #%d)", i.ConflictDate, i.Generation))
}
type implicitTeam struct {
TeamID keybase1.TeamID `json:"team_id"`
DisplayName string `json:"display_name"`
Private bool `json:"is_private"`
Conflicts []implicitTeamConflict `json:"conflicts,omitempty"`
Status libkb.AppStatus `json:"status"`
}
func (i *implicitTeam) GetAppStatus() *libkb.AppStatus {
return &i.Status
}
type ImplicitTeamOptions struct {
NoForceRepoll bool
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupImplicitTeam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, err error) {
team, teamName, impTeamName, _, err = LookupImplicitTeamAndConflicts(ctx, g, displayName, public, opts)
return team, teamName, impTeamName, err
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupImplicitTeamAndConflicts(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, conflicts []keybase1.ImplicitTeamConflictInfo, err error) {
impName, err := ResolveImplicitTeamDisplayName(ctx, g, displayName, public)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
return lookupImplicitTeamAndConflicts(ctx, g, displayName, impName, opts)
}
func LookupImplicitTeamIDUntrusted(ctx context.Context, g *libkb.GlobalContext, displayName string,
public bool) (res keybase1.TeamID, err error) {
imp, _, err := loadImpteam(ctx, g, displayName, public, false /* skipCache */)
if err != nil {
return res, err
}
return imp.TeamID, nil
}
func loadImpteam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, skipCache bool) (imp implicitTeam, hitCache bool, err error) {
cacheKey := impTeamCacheKey(displayName, public)
cacher := g.GetImplicitTeamCacher()
if !skipCache && cacher != nil {
if cv, ok := cacher.Get(cacheKey); ok {
if imp, ok := cv.(implicitTeam); ok {
g.Log.CDebugf(ctx, "using cached iteam")
return imp, true, nil
}
g.Log.CDebugf(ctx, "Bad element of wrong type from cache: %T", cv)
}
}
imp, err = loadImpteamFromServer(ctx, g, displayName, public)
if err != nil {
return imp, false, err
}
// If the team has any assertions skip caching.
if cacher != nil && !strings.Contains(imp.DisplayName, "@") {
cacher.Put(cacheKey, imp)
}
return imp, false, nil
}
func loadImpteamFromServer(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool) (imp implicitTeam, err error) {
mctx := libkb.NewMetaContext(ctx, g)
arg := libkb.NewAPIArg("team/implicit")
arg.SessionType = libkb.APISessionTypeOPTIONAL
arg.Args = libkb.HTTPArgs{
"display_name": libkb.S{Val: displayName},
"public": libkb.B{Val: public},
}
if err = mctx.G().API.GetDecode(mctx, arg, &imp); err != nil {
if aerr, ok := err.(libkb.AppStatusError); ok {
code := keybase1.StatusCode(aerr.Code)
switch code {
case keybase1.StatusCode_SCTeamReadError:
return imp, NewTeamDoesNotExistError(public, displayName)
case keybase1.StatusCode_SCTeamProvisionalCanKey, keybase1.StatusCode_SCTeamProvisionalCannotKey:
return imp, libkb.NewTeamProvisionalError(
(code == keybase1.StatusCode_SCTeamProvisionalCanKey), public, displayName)
}
}
return imp, err
}
return imp, nil
}
// attemptLoadImpteamAndConflits attempts to lead the implicit team with
// conflict, but it might find the team but not the specific conflict if the
// conflict was not in cache. This can be detected with `hitCache` return
// value, and mitigated by passing skipCache=false argument.
func | (ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName,
nameWithoutConflict string, preResolveDisplayName string, skipCache bool) (conflicts []keybase1.ImplicitTeamConflictInfo, teamID keybase1.TeamID, hitCache bool, err error) {
defer g.CTrace(ctx,
fmt.Sprintf("attemptLoadImpteamAndConflict(impName=%q,woConflict=%q,preResolve=%q,skipCache=%t)", impTeamName, nameWithoutConflict, preResolveDisplayName, skipCache),
&err)()
imp, hitCache, err := loadImpteam(ctx, g, nameWithoutConflict, impTeamName.IsPublic, skipCache)
if err != nil {
return conflicts, teamID, hitCache, err
}
if len(imp.Conflicts) > 0 {
g.Log.CDebugf(ctx, "LookupImplicitTeam found %v conflicts", len(imp.Conflicts))
}
// We will use this team. Changed later if we selected a conflict.
var foundSelectedConflict bool
teamID = imp.TeamID
// We still need to iterate over Conflicts because we are returning parsed
// conflict list. So even if caller is not requesting a conflict team, go
// through this loop.
for i, conflict := range imp.Conflicts {
g.Log.CDebugf(ctx, "| checking conflict: %+v (iter %d)", conflict, i)
conflictInfo, err := conflict.parse()
if err != nil {
// warn, don't fail
g.Log.CDebugf(ctx, "LookupImplicitTeam got conflict suffix: %v", err)
continue
}
if conflictInfo == nil {
g.Log.CDebugf(ctx, "| got unexpected nil conflictInfo (iter %d)", i)
continue
}
conflicts = append(conflicts, *conflictInfo)
g.Log.CDebugf(ctx, "| parsed conflict into conflictInfo: %+v", *conflictInfo)
if impTeamName.ConflictInfo != nil {
match := libkb.FormatImplicitTeamDisplayNameSuffix(*impTeamName.ConflictInfo) == libkb.FormatImplicitTeamDisplayNameSuffix(*conflictInfo)
if match {
teamID = conflict.TeamID
foundSelectedConflict = true
g.Log.CDebugf(ctx, "| found conflict suffix match: %v", teamID)
} else {
g.Log.CDebugf(ctx, "| conflict suffix didn't match (teamID %v)", conflict.TeamID)
}
}
}
if impTeamName.ConflictInfo != nil && !foundSelectedConflict {
// We got the team but didn't find the specific conflict requested.
return conflicts, teamID, hitCache, NewTeamDoesNotExistError(
impTeamName.IsPublic, "could not find team with suffix: %v", preResolveDisplayName)
}
return conflicts, teamID, hitCache, nil
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Does not resolve social assertions.
// preResolveDisplayName is used for logging and errors
func lookupImplicitTeamAndConflicts(ctx context.Context, g *libkb.GlobalContext,
preResolveDisplayName string, impTeamNameInput keybase1.ImplicitTeamDisplayName, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, conflicts []keybase1.ImplicitTeamConflictInfo, err error) {
defer g.CTrace(ctx, fmt.Sprintf("lookupImplicitTeamAndConflicts(%v,opts=%+v)", preResolveDisplayName, opts), &err)()
impTeamName = impTeamNameInput
// Use a copy without the conflict info to hit the api endpoint
impTeamNameWithoutConflict := impTeamName
impTeamNameWithoutConflict.ConflictInfo = nil
lookupNameWithoutConflict, err := FormatImplicitTeamDisplayName(ctx, g, impTeamNameWithoutConflict)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
// Try the load first -- once with a cache, and once nameWithoutConflict.
var teamID keybase1.TeamID
var hitCache bool
conflicts, teamID, hitCache, err = attemptLoadImpteamAndConflict(ctx, g, impTeamName, lookupNameWithoutConflict, preResolveDisplayName, false /* skipCache */)
if _, dne := err.(TeamDoesNotExistError); dne && hitCache {
// We are looking for conflict team that we didn't find. Maybe we have the team
// cached from before another team was resolved and this team became conflicted.
// Try again skipping cache.
g.Log.CDebugf(ctx, "attemptLoadImpteamAndConflict failed to load team %q from cache, trying again skipping cache", preResolveDisplayName)
conflicts, teamID, _, err = attemptLoadImpteamAndConflict(ctx, g, impTeamName, lookupNameWithoutConflict, preResolveDisplayName, true /* skipCache */)
}
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
team, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: teamID,
Public: impTeamName.IsPublic,
ForceRepoll: !opts.NoForceRepoll,
})
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
// Check the display names. This is how we make sure the server returned a team with the right members.
teamDisplayName, err := team.ImplicitTeamDisplayNameString(ctx)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
referenceImpName, err := FormatImplicitTeamDisplayName(ctx, g, impTeamName)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
if teamDisplayName != referenceImpName {
return team, teamName, impTeamName, conflicts, fmt.Errorf("implicit team name mismatch: %s != %s",
teamDisplayName, referenceImpName)
}
if team.IsPublic() != impTeamName.IsPublic {
return team, teamName, impTeamName, conflicts, fmt.Errorf("implicit team public-ness mismatch: %v != %v", team.IsPublic(), impTeamName.IsPublic)
}
return team, team.Name(), impTeamName, conflicts, nil
}
func isDupImplicitTeamError(err error) bool {
if err != nil {
if aerr, ok := err.(libkb.AppStatusError); ok {
code := keybase1.StatusCode(aerr.Code)
switch code {
case keybase1.StatusCode_SCTeamImplicitDuplicate:
return true
default:
// Nothing to do for other codes.
}
}
}
return false
}
func assertIsDisplayNameNormalized(displayName keybase1.ImplicitTeamDisplayName) error {
var errs []error
for _, userSet := range []keybase1.ImplicitTeamUserSet{displayName.Writers, displayName.Readers} {
for _, username := range userSet.KeybaseUsers {
if !libkb.IsLowercase(username) {
errs = append(errs, fmt.Errorf("Keybase username %q has mixed case", username))
}
}
for _, assertion := range userSet.UnresolvedUsers {
if !libkb.IsLowercase(assertion.User) {
errs = append(errs, fmt.Errorf("User %q in assertion %q has mixed case", assertion.User, assertion.String()))
}
}
}
return libkb.CombineErrors(errs...)
}
// LookupOrCreateImplicitTeam by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupOrCreateImplicitTeam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool) (res *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, err error) {
ctx = libkb.WithLogTag(ctx, "LOCIT")
defer g.CTrace(ctx, fmt.Sprintf("LookupOrCreateImplicitTeam(%v)", displayName),
&err)()
lookupName, err := ResolveImplicitTeamDisplayName(ctx, g, displayName, public)
if err != nil {
return res, teamName, impTeamName, err
}
if err := assertIsDisplayNameNormalized(lookupName); err != nil {
// Do not allow display names with mixed letter case - while it's legal
// to create them, it will not be possible to load them because API
// server always downcases during normalization.
return res, teamName, impTeamName, fmt.Errorf("Display name is not normalized: %s", err)
}
res, teamName, impTeamName, _, err = lookupImplicitTeamAndConflicts(ctx, g, displayName, lookupName, ImplicitTeamOptions{})
if err != nil {
if _, ok := err.(TeamDoesNotExistError); ok {
if lookupName.ConflictInfo != nil {
// Don't create it if a conflict is specified.
// Unlikely a caller would know the conflict info if it didn't exist.
return res, teamName, impTeamName, err
}
// If the team does not exist, then let's create it
impTeamName = lookupName
var teamID keybase1.TeamID
teamID, teamName, err = CreateImplicitTeam(ctx, g, impTeamName)
if err != nil {
if isDupImplicitTeamError(err) {
g.Log.CDebugf(ctx, "LookupOrCreateImplicitTeam: duplicate team, trying to lookup again: err: %s", err)
res, teamName, impTeamName, _, err = lookupImplicitTeamAndConflicts(ctx, g, displayName,
lookupName, ImplicitTeamOptions{})
}
return res, teamName, impTeamName, err
}
res, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: teamID,
Public: impTeamName.IsPublic,
ForceRepoll: true,
AuditMode: keybase1.AuditMode_JUST_CREATED,
})
return res, teamName, impTeamName, err
}
return res, teamName, impTeamName, err
}
return res, teamName, impTeamName, nil
}
func FormatImplicitTeamDisplayName(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName) (string, error) {
return formatImplicitTeamDisplayNameCommon(ctx, g, impTeamName, nil)
}
// Format an implicit display name, but order the specified username first in each of the writer and reader lists if it appears.
func FormatImplicitTeamDisplayNameWithUserFront(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName, frontName libkb.NormalizedUsername) (string, error) {
return formatImplicitTeamDisplayNameCommon(ctx, g, impTeamName, &frontName)
}
func formatImplicitTeamDisplayNameCommon(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName, optionalFrontName *libkb.NormalizedUsername) (string, error) {
writerNames := make([]string, 0, len(impTeamName.Writers.KeybaseUsers)+len(impTeamName.Writers.UnresolvedUsers))
writerNames = append(writerNames, impTeamName.Writers.KeybaseUsers...)
for _, u := range impTeamName.Writers.UnresolvedUsers {
writerNames = append(writerNames, u.String())
}
if optionalFrontName == nil {
sort.Strings(writerNames)
} else {
sortStringsFront(writerNames, optionalFrontName.String())
}
readerNames := make([]string, 0, len(impTeamName.Readers.KeybaseUsers)+len(impTeamName.Readers.UnresolvedUsers))
readerNames = append(readerNames, impTeamName.Readers.KeybaseUsers...)
for _, u := range impTeamName.Readers.UnresolvedUsers {
readerNames = append(readerNames, u.String())
}
if optionalFrontName == nil {
sort.Strings(readerNames)
} else {
sortStringsFront(readerNames, optionalFrontName.String())
}
var suffix string
if impTeamName.ConflictInfo.IsConflict() {
suffix = libkb.FormatImplicitTeamDisplayNameSuffix(*impTeamName.ConflictInfo)
}
if len(writerNames) == 0 {
return "", fmt.Errorf("invalid implicit team name: no writers")
}
return tlf.NormalizeNamesInTLF(libkb.NewMetaContext(ctx, g), writerNames, readerNames, suffix)
}
// Sort a list of strings but order `front` in front IF it appears.
func sortStringsFront(ss []string, front string) {
sort.Slice(ss, func(i, j int) bool {
a := ss[i]
b := ss[j]
if a == front {
return true
}
if b == front {
return false
}
return a < b
})
}
func impTeamCacheKey(displayName string, public bool) string {
return fmt.Sprintf("%s-%v", displayName, public)
}
type implicitTeamCache struct {
cache *lru.Cache
}
func newImplicitTeamCache(g *libkb.GlobalContext) *implicitTeamCache {
cache, err := lru.New(libkb.ImplicitTeamCacheSize)
if err != nil {
panic(err)
}
return &implicitTeamCache{
cache: cache,
}
}
func (i *implicitTeamCache) Get(key interface{}) (interface{}, bool) {
return i.cache.Get(key)
}
func (i *implicitTeamCache) Put(key, value interface{}) bool {
return i.cache.Add(key, value)
}
func (i *implicitTeamCache) OnLogout(m libkb.MetaContext) error {
i.cache.Purge()
return nil
}
func (i *implicitTeamCache) OnDbNuke(m libkb.MetaContext) error {
i.cache.Purge()
return nil
}
var _ libkb.MemLRUer = &implicitTeamCache{}
func NewImplicitTeamCacheAndInstall(g *libkb.GlobalContext) {
cache := newImplicitTeamCache(g)
g.SetImplicitTeamCacher(cache)
g.AddLogoutHook(cache, "implicitTeamCache")
g.AddDbNukeHook(cache, "implicitTeamCache")
}
| attemptLoadImpteamAndConflict | identifier_name |
implicit.go | package teams
import (
"fmt"
"sort"
"strings"
lru "github.com/hashicorp/golang-lru"
"github.com/keybase/client/go/kbfs/tlf"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"golang.org/x/net/context"
)
type implicitTeamConflict struct {
// Note this TeamID is not validated by LookupImplicitTeam. Be aware of server trust.
TeamID keybase1.TeamID `json:"team_id"`
Generation int `json:"generation"`
ConflictDate string `json:"conflict_date"`
}
func (i *implicitTeamConflict) parse() (*keybase1.ImplicitTeamConflictInfo, error) {
return libkb.ParseImplicitTeamDisplayNameSuffix(fmt.Sprintf("(conflicted copy %s #%d)", i.ConflictDate, i.Generation))
}
type implicitTeam struct {
TeamID keybase1.TeamID `json:"team_id"`
DisplayName string `json:"display_name"`
Private bool `json:"is_private"`
Conflicts []implicitTeamConflict `json:"conflicts,omitempty"`
Status libkb.AppStatus `json:"status"`
}
func (i *implicitTeam) GetAppStatus() *libkb.AppStatus {
return &i.Status
}
type ImplicitTeamOptions struct {
NoForceRepoll bool
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupImplicitTeam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, err error) {
team, teamName, impTeamName, _, err = LookupImplicitTeamAndConflicts(ctx, g, displayName, public, opts)
return team, teamName, impTeamName, err
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupImplicitTeamAndConflicts(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, conflicts []keybase1.ImplicitTeamConflictInfo, err error) {
impName, err := ResolveImplicitTeamDisplayName(ctx, g, displayName, public)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
return lookupImplicitTeamAndConflicts(ctx, g, displayName, impName, opts)
}
func LookupImplicitTeamIDUntrusted(ctx context.Context, g *libkb.GlobalContext, displayName string,
public bool) (res keybase1.TeamID, err error) {
imp, _, err := loadImpteam(ctx, g, displayName, public, false /* skipCache */)
if err != nil {
return res, err
}
return imp.TeamID, nil
}
func loadImpteam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, skipCache bool) (imp implicitTeam, hitCache bool, err error) {
cacheKey := impTeamCacheKey(displayName, public)
cacher := g.GetImplicitTeamCacher()
if !skipCache && cacher != nil {
if cv, ok := cacher.Get(cacheKey); ok {
if imp, ok := cv.(implicitTeam); ok {
g.Log.CDebugf(ctx, "using cached iteam")
return imp, true, nil
}
g.Log.CDebugf(ctx, "Bad element of wrong type from cache: %T", cv)
}
}
imp, err = loadImpteamFromServer(ctx, g, displayName, public)
if err != nil {
return imp, false, err
}
// If the team has any assertions skip caching.
if cacher != nil && !strings.Contains(imp.DisplayName, "@") {
cacher.Put(cacheKey, imp)
}
return imp, false, nil
}
func loadImpteamFromServer(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool) (imp implicitTeam, err error) {
mctx := libkb.NewMetaContext(ctx, g)
arg := libkb.NewAPIArg("team/implicit")
arg.SessionType = libkb.APISessionTypeOPTIONAL
arg.Args = libkb.HTTPArgs{
"display_name": libkb.S{Val: displayName},
"public": libkb.B{Val: public},
}
if err = mctx.G().API.GetDecode(mctx, arg, &imp); err != nil {
if aerr, ok := err.(libkb.AppStatusError); ok |
return imp, err
}
return imp, nil
}
// attemptLoadImpteamAndConflits attempts to lead the implicit team with
// conflict, but it might find the team but not the specific conflict if the
// conflict was not in cache. This can be detected with `hitCache` return
// value, and mitigated by passing skipCache=false argument.
func attemptLoadImpteamAndConflict(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName,
nameWithoutConflict string, preResolveDisplayName string, skipCache bool) (conflicts []keybase1.ImplicitTeamConflictInfo, teamID keybase1.TeamID, hitCache bool, err error) {
defer g.CTrace(ctx,
fmt.Sprintf("attemptLoadImpteamAndConflict(impName=%q,woConflict=%q,preResolve=%q,skipCache=%t)", impTeamName, nameWithoutConflict, preResolveDisplayName, skipCache),
&err)()
imp, hitCache, err := loadImpteam(ctx, g, nameWithoutConflict, impTeamName.IsPublic, skipCache)
if err != nil {
return conflicts, teamID, hitCache, err
}
if len(imp.Conflicts) > 0 {
g.Log.CDebugf(ctx, "LookupImplicitTeam found %v conflicts", len(imp.Conflicts))
}
// We will use this team. Changed later if we selected a conflict.
var foundSelectedConflict bool
teamID = imp.TeamID
// We still need to iterate over Conflicts because we are returning parsed
// conflict list. So even if caller is not requesting a conflict team, go
// through this loop.
for i, conflict := range imp.Conflicts {
g.Log.CDebugf(ctx, "| checking conflict: %+v (iter %d)", conflict, i)
conflictInfo, err := conflict.parse()
if err != nil {
// warn, don't fail
g.Log.CDebugf(ctx, "LookupImplicitTeam got conflict suffix: %v", err)
continue
}
if conflictInfo == nil {
g.Log.CDebugf(ctx, "| got unexpected nil conflictInfo (iter %d)", i)
continue
}
conflicts = append(conflicts, *conflictInfo)
g.Log.CDebugf(ctx, "| parsed conflict into conflictInfo: %+v", *conflictInfo)
if impTeamName.ConflictInfo != nil {
match := libkb.FormatImplicitTeamDisplayNameSuffix(*impTeamName.ConflictInfo) == libkb.FormatImplicitTeamDisplayNameSuffix(*conflictInfo)
if match {
teamID = conflict.TeamID
foundSelectedConflict = true
g.Log.CDebugf(ctx, "| found conflict suffix match: %v", teamID)
} else {
g.Log.CDebugf(ctx, "| conflict suffix didn't match (teamID %v)", conflict.TeamID)
}
}
}
if impTeamName.ConflictInfo != nil && !foundSelectedConflict {
// We got the team but didn't find the specific conflict requested.
return conflicts, teamID, hitCache, NewTeamDoesNotExistError(
impTeamName.IsPublic, "could not find team with suffix: %v", preResolveDisplayName)
}
return conflicts, teamID, hitCache, nil
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Does not resolve social assertions.
// preResolveDisplayName is used for logging and errors
func lookupImplicitTeamAndConflicts(ctx context.Context, g *libkb.GlobalContext,
preResolveDisplayName string, impTeamNameInput keybase1.ImplicitTeamDisplayName, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, conflicts []keybase1.ImplicitTeamConflictInfo, err error) {
defer g.CTrace(ctx, fmt.Sprintf("lookupImplicitTeamAndConflicts(%v,opts=%+v)", preResolveDisplayName, opts), &err)()
impTeamName = impTeamNameInput
// Use a copy without the conflict info to hit the api endpoint
impTeamNameWithoutConflict := impTeamName
impTeamNameWithoutConflict.ConflictInfo = nil
lookupNameWithoutConflict, err := FormatImplicitTeamDisplayName(ctx, g, impTeamNameWithoutConflict)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
// Try the load first -- once with a cache, and once nameWithoutConflict.
var teamID keybase1.TeamID
var hitCache bool
conflicts, teamID, hitCache, err = attemptLoadImpteamAndConflict(ctx, g, impTeamName, lookupNameWithoutConflict, preResolveDisplayName, false /* skipCache */)
if _, dne := err.(TeamDoesNotExistError); dne && hitCache {
// We are looking for conflict team that we didn't find. Maybe we have the team
// cached from before another team was resolved and this team became conflicted.
// Try again skipping cache.
g.Log.CDebugf(ctx, "attemptLoadImpteamAndConflict failed to load team %q from cache, trying again skipping cache", preResolveDisplayName)
conflicts, teamID, _, err = attemptLoadImpteamAndConflict(ctx, g, impTeamName, lookupNameWithoutConflict, preResolveDisplayName, true /* skipCache */)
}
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
team, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: teamID,
Public: impTeamName.IsPublic,
ForceRepoll: !opts.NoForceRepoll,
})
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
// Check the display names. This is how we make sure the server returned a team with the right members.
teamDisplayName, err := team.ImplicitTeamDisplayNameString(ctx)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
referenceImpName, err := FormatImplicitTeamDisplayName(ctx, g, impTeamName)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
if teamDisplayName != referenceImpName {
return team, teamName, impTeamName, conflicts, fmt.Errorf("implicit team name mismatch: %s != %s",
teamDisplayName, referenceImpName)
}
if team.IsPublic() != impTeamName.IsPublic {
return team, teamName, impTeamName, conflicts, fmt.Errorf("implicit team public-ness mismatch: %v != %v", team.IsPublic(), impTeamName.IsPublic)
}
return team, team.Name(), impTeamName, conflicts, nil
}
func isDupImplicitTeamError(err error) bool {
if err != nil {
if aerr, ok := err.(libkb.AppStatusError); ok {
code := keybase1.StatusCode(aerr.Code)
switch code {
case keybase1.StatusCode_SCTeamImplicitDuplicate:
return true
default:
// Nothing to do for other codes.
}
}
}
return false
}
func assertIsDisplayNameNormalized(displayName keybase1.ImplicitTeamDisplayName) error {
var errs []error
for _, userSet := range []keybase1.ImplicitTeamUserSet{displayName.Writers, displayName.Readers} {
for _, username := range userSet.KeybaseUsers {
if !libkb.IsLowercase(username) {
errs = append(errs, fmt.Errorf("Keybase username %q has mixed case", username))
}
}
for _, assertion := range userSet.UnresolvedUsers {
if !libkb.IsLowercase(assertion.User) {
errs = append(errs, fmt.Errorf("User %q in assertion %q has mixed case", assertion.User, assertion.String()))
}
}
}
return libkb.CombineErrors(errs...)
}
// LookupOrCreateImplicitTeam by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupOrCreateImplicitTeam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool) (res *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, err error) {
ctx = libkb.WithLogTag(ctx, "LOCIT")
defer g.CTrace(ctx, fmt.Sprintf("LookupOrCreateImplicitTeam(%v)", displayName),
&err)()
lookupName, err := ResolveImplicitTeamDisplayName(ctx, g, displayName, public)
if err != nil {
return res, teamName, impTeamName, err
}
if err := assertIsDisplayNameNormalized(lookupName); err != nil {
// Do not allow display names with mixed letter case - while it's legal
// to create them, it will not be possible to load them because API
// server always downcases during normalization.
return res, teamName, impTeamName, fmt.Errorf("Display name is not normalized: %s", err)
}
res, teamName, impTeamName, _, err = lookupImplicitTeamAndConflicts(ctx, g, displayName, lookupName, ImplicitTeamOptions{})
if err != nil {
if _, ok := err.(TeamDoesNotExistError); ok {
if lookupName.ConflictInfo != nil {
// Don't create it if a conflict is specified.
// Unlikely a caller would know the conflict info if it didn't exist.
return res, teamName, impTeamName, err
}
// If the team does not exist, then let's create it
impTeamName = lookupName
var teamID keybase1.TeamID
teamID, teamName, err = CreateImplicitTeam(ctx, g, impTeamName)
if err != nil {
if isDupImplicitTeamError(err) {
g.Log.CDebugf(ctx, "LookupOrCreateImplicitTeam: duplicate team, trying to lookup again: err: %s", err)
res, teamName, impTeamName, _, err = lookupImplicitTeamAndConflicts(ctx, g, displayName,
lookupName, ImplicitTeamOptions{})
}
return res, teamName, impTeamName, err
}
res, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: teamID,
Public: impTeamName.IsPublic,
ForceRepoll: true,
AuditMode: keybase1.AuditMode_JUST_CREATED,
})
return res, teamName, impTeamName, err
}
return res, teamName, impTeamName, err
}
return res, teamName, impTeamName, nil
}
func FormatImplicitTeamDisplayName(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName) (string, error) {
return formatImplicitTeamDisplayNameCommon(ctx, g, impTeamName, nil)
}
// Format an implicit display name, but order the specified username first in each of the writer and reader lists if it appears.
func FormatImplicitTeamDisplayNameWithUserFront(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName, frontName libkb.NormalizedUsername) (string, error) {
return formatImplicitTeamDisplayNameCommon(ctx, g, impTeamName, &frontName)
}
func formatImplicitTeamDisplayNameCommon(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName, optionalFrontName *libkb.NormalizedUsername) (string, error) {
writerNames := make([]string, 0, len(impTeamName.Writers.KeybaseUsers)+len(impTeamName.Writers.UnresolvedUsers))
writerNames = append(writerNames, impTeamName.Writers.KeybaseUsers...)
for _, u := range impTeamName.Writers.UnresolvedUsers {
writerNames = append(writerNames, u.String())
}
if optionalFrontName == nil {
sort.Strings(writerNames)
} else {
sortStringsFront(writerNames, optionalFrontName.String())
}
readerNames := make([]string, 0, len(impTeamName.Readers.KeybaseUsers)+len(impTeamName.Readers.UnresolvedUsers))
readerNames = append(readerNames, impTeamName.Readers.KeybaseUsers...)
for _, u := range impTeamName.Readers.UnresolvedUsers {
readerNames = append(readerNames, u.String())
}
if optionalFrontName == nil {
sort.Strings(readerNames)
} else {
sortStringsFront(readerNames, optionalFrontName.String())
}
var suffix string
if impTeamName.ConflictInfo.IsConflict() {
suffix = libkb.FormatImplicitTeamDisplayNameSuffix(*impTeamName.ConflictInfo)
}
if len(writerNames) == 0 {
return "", fmt.Errorf("invalid implicit team name: no writers")
}
return tlf.NormalizeNamesInTLF(libkb.NewMetaContext(ctx, g), writerNames, readerNames, suffix)
}
// Sort a list of strings but order `front` in front IF it appears.
func sortStringsFront(ss []string, front string) {
sort.Slice(ss, func(i, j int) bool {
a := ss[i]
b := ss[j]
if a == front {
return true
}
if b == front {
return false
}
return a < b
})
}
func impTeamCacheKey(displayName string, public bool) string {
return fmt.Sprintf("%s-%v", displayName, public)
}
type implicitTeamCache struct {
cache *lru.Cache
}
func newImplicitTeamCache(g *libkb.GlobalContext) *implicitTeamCache {
cache, err := lru.New(libkb.ImplicitTeamCacheSize)
if err != nil {
panic(err)
}
return &implicitTeamCache{
cache: cache,
}
}
func (i *implicitTeamCache) Get(key interface{}) (interface{}, bool) {
return i.cache.Get(key)
}
func (i *implicitTeamCache) Put(key, value interface{}) bool {
return i.cache.Add(key, value)
}
func (i *implicitTeamCache) OnLogout(m libkb.MetaContext) error {
i.cache.Purge()
return nil
}
func (i *implicitTeamCache) OnDbNuke(m libkb.MetaContext) error {
i.cache.Purge()
return nil
}
var _ libkb.MemLRUer = &implicitTeamCache{}
func NewImplicitTeamCacheAndInstall(g *libkb.GlobalContext) {
cache := newImplicitTeamCache(g)
g.SetImplicitTeamCacher(cache)
g.AddLogoutHook(cache, "implicitTeamCache")
g.AddDbNukeHook(cache, "implicitTeamCache")
}
| {
code := keybase1.StatusCode(aerr.Code)
switch code {
case keybase1.StatusCode_SCTeamReadError:
return imp, NewTeamDoesNotExistError(public, displayName)
case keybase1.StatusCode_SCTeamProvisionalCanKey, keybase1.StatusCode_SCTeamProvisionalCannotKey:
return imp, libkb.NewTeamProvisionalError(
(code == keybase1.StatusCode_SCTeamProvisionalCanKey), public, displayName)
}
} | conditional_block |
implicit.go | package teams
import (
"fmt"
"sort"
"strings"
lru "github.com/hashicorp/golang-lru"
"github.com/keybase/client/go/kbfs/tlf"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"golang.org/x/net/context"
)
type implicitTeamConflict struct {
// Note this TeamID is not validated by LookupImplicitTeam. Be aware of server trust.
TeamID keybase1.TeamID `json:"team_id"`
Generation int `json:"generation"`
ConflictDate string `json:"conflict_date"`
}
func (i *implicitTeamConflict) parse() (*keybase1.ImplicitTeamConflictInfo, error) {
return libkb.ParseImplicitTeamDisplayNameSuffix(fmt.Sprintf("(conflicted copy %s #%d)", i.ConflictDate, i.Generation))
}
type implicitTeam struct {
TeamID keybase1.TeamID `json:"team_id"`
DisplayName string `json:"display_name"`
Private bool `json:"is_private"`
Conflicts []implicitTeamConflict `json:"conflicts,omitempty"`
Status libkb.AppStatus `json:"status"`
}
func (i *implicitTeam) GetAppStatus() *libkb.AppStatus {
return &i.Status
}
type ImplicitTeamOptions struct {
NoForceRepoll bool
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupImplicitTeam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, err error) {
team, teamName, impTeamName, _, err = LookupImplicitTeamAndConflicts(ctx, g, displayName, public, opts)
return team, teamName, impTeamName, err
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupImplicitTeamAndConflicts(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, conflicts []keybase1.ImplicitTeamConflictInfo, err error) {
impName, err := ResolveImplicitTeamDisplayName(ctx, g, displayName, public)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
return lookupImplicitTeamAndConflicts(ctx, g, displayName, impName, opts)
}
func LookupImplicitTeamIDUntrusted(ctx context.Context, g *libkb.GlobalContext, displayName string,
public bool) (res keybase1.TeamID, err error) {
imp, _, err := loadImpteam(ctx, g, displayName, public, false /* skipCache */)
if err != nil {
return res, err
}
return imp.TeamID, nil
}
func loadImpteam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, skipCache bool) (imp implicitTeam, hitCache bool, err error) {
cacheKey := impTeamCacheKey(displayName, public)
cacher := g.GetImplicitTeamCacher()
if !skipCache && cacher != nil {
if cv, ok := cacher.Get(cacheKey); ok {
if imp, ok := cv.(implicitTeam); ok {
g.Log.CDebugf(ctx, "using cached iteam")
return imp, true, nil
}
g.Log.CDebugf(ctx, "Bad element of wrong type from cache: %T", cv)
}
}
imp, err = loadImpteamFromServer(ctx, g, displayName, public)
if err != nil {
return imp, false, err
}
// If the team has any assertions skip caching.
if cacher != nil && !strings.Contains(imp.DisplayName, "@") {
cacher.Put(cacheKey, imp)
}
return imp, false, nil
}
func loadImpteamFromServer(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool) (imp implicitTeam, err error) |
// attemptLoadImpteamAndConflits attempts to lead the implicit team with
// conflict, but it might find the team but not the specific conflict if the
// conflict was not in cache. This can be detected with `hitCache` return
// value, and mitigated by passing skipCache=false argument.
func attemptLoadImpteamAndConflict(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName,
nameWithoutConflict string, preResolveDisplayName string, skipCache bool) (conflicts []keybase1.ImplicitTeamConflictInfo, teamID keybase1.TeamID, hitCache bool, err error) {
defer g.CTrace(ctx,
fmt.Sprintf("attemptLoadImpteamAndConflict(impName=%q,woConflict=%q,preResolve=%q,skipCache=%t)", impTeamName, nameWithoutConflict, preResolveDisplayName, skipCache),
&err)()
imp, hitCache, err := loadImpteam(ctx, g, nameWithoutConflict, impTeamName.IsPublic, skipCache)
if err != nil {
return conflicts, teamID, hitCache, err
}
if len(imp.Conflicts) > 0 {
g.Log.CDebugf(ctx, "LookupImplicitTeam found %v conflicts", len(imp.Conflicts))
}
// We will use this team. Changed later if we selected a conflict.
var foundSelectedConflict bool
teamID = imp.TeamID
// We still need to iterate over Conflicts because we are returning parsed
// conflict list. So even if caller is not requesting a conflict team, go
// through this loop.
for i, conflict := range imp.Conflicts {
g.Log.CDebugf(ctx, "| checking conflict: %+v (iter %d)", conflict, i)
conflictInfo, err := conflict.parse()
if err != nil {
// warn, don't fail
g.Log.CDebugf(ctx, "LookupImplicitTeam got conflict suffix: %v", err)
continue
}
if conflictInfo == nil {
g.Log.CDebugf(ctx, "| got unexpected nil conflictInfo (iter %d)", i)
continue
}
conflicts = append(conflicts, *conflictInfo)
g.Log.CDebugf(ctx, "| parsed conflict into conflictInfo: %+v", *conflictInfo)
if impTeamName.ConflictInfo != nil {
match := libkb.FormatImplicitTeamDisplayNameSuffix(*impTeamName.ConflictInfo) == libkb.FormatImplicitTeamDisplayNameSuffix(*conflictInfo)
if match {
teamID = conflict.TeamID
foundSelectedConflict = true
g.Log.CDebugf(ctx, "| found conflict suffix match: %v", teamID)
} else {
g.Log.CDebugf(ctx, "| conflict suffix didn't match (teamID %v)", conflict.TeamID)
}
}
}
if impTeamName.ConflictInfo != nil && !foundSelectedConflict {
// We got the team but didn't find the specific conflict requested.
return conflicts, teamID, hitCache, NewTeamDoesNotExistError(
impTeamName.IsPublic, "could not find team with suffix: %v", preResolveDisplayName)
}
return conflicts, teamID, hitCache, nil
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Does not resolve social assertions.
// preResolveDisplayName is used for logging and errors
func lookupImplicitTeamAndConflicts(ctx context.Context, g *libkb.GlobalContext,
preResolveDisplayName string, impTeamNameInput keybase1.ImplicitTeamDisplayName, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, conflicts []keybase1.ImplicitTeamConflictInfo, err error) {
defer g.CTrace(ctx, fmt.Sprintf("lookupImplicitTeamAndConflicts(%v,opts=%+v)", preResolveDisplayName, opts), &err)()
impTeamName = impTeamNameInput
// Use a copy without the conflict info to hit the api endpoint
impTeamNameWithoutConflict := impTeamName
impTeamNameWithoutConflict.ConflictInfo = nil
lookupNameWithoutConflict, err := FormatImplicitTeamDisplayName(ctx, g, impTeamNameWithoutConflict)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
// Try the load first -- once with a cache, and once nameWithoutConflict.
var teamID keybase1.TeamID
var hitCache bool
conflicts, teamID, hitCache, err = attemptLoadImpteamAndConflict(ctx, g, impTeamName, lookupNameWithoutConflict, preResolveDisplayName, false /* skipCache */)
if _, dne := err.(TeamDoesNotExistError); dne && hitCache {
// We are looking for conflict team that we didn't find. Maybe we have the team
// cached from before another team was resolved and this team became conflicted.
// Try again skipping cache.
g.Log.CDebugf(ctx, "attemptLoadImpteamAndConflict failed to load team %q from cache, trying again skipping cache", preResolveDisplayName)
conflicts, teamID, _, err = attemptLoadImpteamAndConflict(ctx, g, impTeamName, lookupNameWithoutConflict, preResolveDisplayName, true /* skipCache */)
}
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
team, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: teamID,
Public: impTeamName.IsPublic,
ForceRepoll: !opts.NoForceRepoll,
})
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
// Check the display names. This is how we make sure the server returned a team with the right members.
teamDisplayName, err := team.ImplicitTeamDisplayNameString(ctx)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
referenceImpName, err := FormatImplicitTeamDisplayName(ctx, g, impTeamName)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
if teamDisplayName != referenceImpName {
return team, teamName, impTeamName, conflicts, fmt.Errorf("implicit team name mismatch: %s != %s",
teamDisplayName, referenceImpName)
}
if team.IsPublic() != impTeamName.IsPublic {
return team, teamName, impTeamName, conflicts, fmt.Errorf("implicit team public-ness mismatch: %v != %v", team.IsPublic(), impTeamName.IsPublic)
}
return team, team.Name(), impTeamName, conflicts, nil
}
func isDupImplicitTeamError(err error) bool {
if err != nil {
if aerr, ok := err.(libkb.AppStatusError); ok {
code := keybase1.StatusCode(aerr.Code)
switch code {
case keybase1.StatusCode_SCTeamImplicitDuplicate:
return true
default:
// Nothing to do for other codes.
}
}
}
return false
}
func assertIsDisplayNameNormalized(displayName keybase1.ImplicitTeamDisplayName) error {
var errs []error
for _, userSet := range []keybase1.ImplicitTeamUserSet{displayName.Writers, displayName.Readers} {
for _, username := range userSet.KeybaseUsers {
if !libkb.IsLowercase(username) {
errs = append(errs, fmt.Errorf("Keybase username %q has mixed case", username))
}
}
for _, assertion := range userSet.UnresolvedUsers {
if !libkb.IsLowercase(assertion.User) {
errs = append(errs, fmt.Errorf("User %q in assertion %q has mixed case", assertion.User, assertion.String()))
}
}
}
return libkb.CombineErrors(errs...)
}
// LookupOrCreateImplicitTeam by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupOrCreateImplicitTeam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool) (res *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, err error) {
ctx = libkb.WithLogTag(ctx, "LOCIT")
defer g.CTrace(ctx, fmt.Sprintf("LookupOrCreateImplicitTeam(%v)", displayName),
&err)()
lookupName, err := ResolveImplicitTeamDisplayName(ctx, g, displayName, public)
if err != nil {
return res, teamName, impTeamName, err
}
if err := assertIsDisplayNameNormalized(lookupName); err != nil {
// Do not allow display names with mixed letter case - while it's legal
// to create them, it will not be possible to load them because API
// server always downcases during normalization.
return res, teamName, impTeamName, fmt.Errorf("Display name is not normalized: %s", err)
}
res, teamName, impTeamName, _, err = lookupImplicitTeamAndConflicts(ctx, g, displayName, lookupName, ImplicitTeamOptions{})
if err != nil {
if _, ok := err.(TeamDoesNotExistError); ok {
if lookupName.ConflictInfo != nil {
// Don't create it if a conflict is specified.
// Unlikely a caller would know the conflict info if it didn't exist.
return res, teamName, impTeamName, err
}
// If the team does not exist, then let's create it
impTeamName = lookupName
var teamID keybase1.TeamID
teamID, teamName, err = CreateImplicitTeam(ctx, g, impTeamName)
if err != nil {
if isDupImplicitTeamError(err) {
g.Log.CDebugf(ctx, "LookupOrCreateImplicitTeam: duplicate team, trying to lookup again: err: %s", err)
res, teamName, impTeamName, _, err = lookupImplicitTeamAndConflicts(ctx, g, displayName,
lookupName, ImplicitTeamOptions{})
}
return res, teamName, impTeamName, err
}
res, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: teamID,
Public: impTeamName.IsPublic,
ForceRepoll: true,
AuditMode: keybase1.AuditMode_JUST_CREATED,
})
return res, teamName, impTeamName, err
}
return res, teamName, impTeamName, err
}
return res, teamName, impTeamName, nil
}
func FormatImplicitTeamDisplayName(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName) (string, error) {
return formatImplicitTeamDisplayNameCommon(ctx, g, impTeamName, nil)
}
// Format an implicit display name, but order the specified username first in each of the writer and reader lists if it appears.
func FormatImplicitTeamDisplayNameWithUserFront(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName, frontName libkb.NormalizedUsername) (string, error) {
return formatImplicitTeamDisplayNameCommon(ctx, g, impTeamName, &frontName)
}
func formatImplicitTeamDisplayNameCommon(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName, optionalFrontName *libkb.NormalizedUsername) (string, error) {
writerNames := make([]string, 0, len(impTeamName.Writers.KeybaseUsers)+len(impTeamName.Writers.UnresolvedUsers))
writerNames = append(writerNames, impTeamName.Writers.KeybaseUsers...)
for _, u := range impTeamName.Writers.UnresolvedUsers {
writerNames = append(writerNames, u.String())
}
if optionalFrontName == nil {
sort.Strings(writerNames)
} else {
sortStringsFront(writerNames, optionalFrontName.String())
}
readerNames := make([]string, 0, len(impTeamName.Readers.KeybaseUsers)+len(impTeamName.Readers.UnresolvedUsers))
readerNames = append(readerNames, impTeamName.Readers.KeybaseUsers...)
for _, u := range impTeamName.Readers.UnresolvedUsers {
readerNames = append(readerNames, u.String())
}
if optionalFrontName == nil {
sort.Strings(readerNames)
} else {
sortStringsFront(readerNames, optionalFrontName.String())
}
var suffix string
if impTeamName.ConflictInfo.IsConflict() {
suffix = libkb.FormatImplicitTeamDisplayNameSuffix(*impTeamName.ConflictInfo)
}
if len(writerNames) == 0 {
return "", fmt.Errorf("invalid implicit team name: no writers")
}
return tlf.NormalizeNamesInTLF(libkb.NewMetaContext(ctx, g), writerNames, readerNames, suffix)
}
// Sort a list of strings but order `front` in front IF it appears.
func sortStringsFront(ss []string, front string) {
sort.Slice(ss, func(i, j int) bool {
a := ss[i]
b := ss[j]
if a == front {
return true
}
if b == front {
return false
}
return a < b
})
}
func impTeamCacheKey(displayName string, public bool) string {
return fmt.Sprintf("%s-%v", displayName, public)
}
type implicitTeamCache struct {
cache *lru.Cache
}
func newImplicitTeamCache(g *libkb.GlobalContext) *implicitTeamCache {
cache, err := lru.New(libkb.ImplicitTeamCacheSize)
if err != nil {
panic(err)
}
return &implicitTeamCache{
cache: cache,
}
}
func (i *implicitTeamCache) Get(key interface{}) (interface{}, bool) {
return i.cache.Get(key)
}
func (i *implicitTeamCache) Put(key, value interface{}) bool {
return i.cache.Add(key, value)
}
func (i *implicitTeamCache) OnLogout(m libkb.MetaContext) error {
i.cache.Purge()
return nil
}
func (i *implicitTeamCache) OnDbNuke(m libkb.MetaContext) error {
i.cache.Purge()
return nil
}
var _ libkb.MemLRUer = &implicitTeamCache{}
func NewImplicitTeamCacheAndInstall(g *libkb.GlobalContext) {
cache := newImplicitTeamCache(g)
g.SetImplicitTeamCacher(cache)
g.AddLogoutHook(cache, "implicitTeamCache")
g.AddDbNukeHook(cache, "implicitTeamCache")
}
| {
mctx := libkb.NewMetaContext(ctx, g)
arg := libkb.NewAPIArg("team/implicit")
arg.SessionType = libkb.APISessionTypeOPTIONAL
arg.Args = libkb.HTTPArgs{
"display_name": libkb.S{Val: displayName},
"public": libkb.B{Val: public},
}
if err = mctx.G().API.GetDecode(mctx, arg, &imp); err != nil {
if aerr, ok := err.(libkb.AppStatusError); ok {
code := keybase1.StatusCode(aerr.Code)
switch code {
case keybase1.StatusCode_SCTeamReadError:
return imp, NewTeamDoesNotExistError(public, displayName)
case keybase1.StatusCode_SCTeamProvisionalCanKey, keybase1.StatusCode_SCTeamProvisionalCannotKey:
return imp, libkb.NewTeamProvisionalError(
(code == keybase1.StatusCode_SCTeamProvisionalCanKey), public, displayName)
}
}
return imp, err
}
return imp, nil
} | identifier_body |
implicit.go | package teams
import (
"fmt"
"sort"
"strings"
lru "github.com/hashicorp/golang-lru"
"github.com/keybase/client/go/kbfs/tlf"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"golang.org/x/net/context"
)
type implicitTeamConflict struct {
// Note this TeamID is not validated by LookupImplicitTeam. Be aware of server trust.
TeamID keybase1.TeamID `json:"team_id"`
Generation int `json:"generation"`
ConflictDate string `json:"conflict_date"`
}
func (i *implicitTeamConflict) parse() (*keybase1.ImplicitTeamConflictInfo, error) {
return libkb.ParseImplicitTeamDisplayNameSuffix(fmt.Sprintf("(conflicted copy %s #%d)", i.ConflictDate, i.Generation))
}
type implicitTeam struct {
TeamID keybase1.TeamID `json:"team_id"`
DisplayName string `json:"display_name"`
Private bool `json:"is_private"`
Conflicts []implicitTeamConflict `json:"conflicts,omitempty"`
Status libkb.AppStatus `json:"status"`
}
func (i *implicitTeam) GetAppStatus() *libkb.AppStatus {
return &i.Status
}
type ImplicitTeamOptions struct {
NoForceRepoll bool
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupImplicitTeam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, err error) {
team, teamName, impTeamName, _, err = LookupImplicitTeamAndConflicts(ctx, g, displayName, public, opts)
return team, teamName, impTeamName, err
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupImplicitTeamAndConflicts(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, conflicts []keybase1.ImplicitTeamConflictInfo, err error) {
impName, err := ResolveImplicitTeamDisplayName(ctx, g, displayName, public)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
return lookupImplicitTeamAndConflicts(ctx, g, displayName, impName, opts)
}
func LookupImplicitTeamIDUntrusted(ctx context.Context, g *libkb.GlobalContext, displayName string,
public bool) (res keybase1.TeamID, err error) {
imp, _, err := loadImpteam(ctx, g, displayName, public, false /* skipCache */)
if err != nil {
return res, err
}
return imp.TeamID, nil
}
func loadImpteam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool, skipCache bool) (imp implicitTeam, hitCache bool, err error) {
cacheKey := impTeamCacheKey(displayName, public)
cacher := g.GetImplicitTeamCacher()
if !skipCache && cacher != nil {
if cv, ok := cacher.Get(cacheKey); ok {
if imp, ok := cv.(implicitTeam); ok {
g.Log.CDebugf(ctx, "using cached iteam")
return imp, true, nil
}
g.Log.CDebugf(ctx, "Bad element of wrong type from cache: %T", cv)
}
}
imp, err = loadImpteamFromServer(ctx, g, displayName, public)
if err != nil {
return imp, false, err
}
// If the team has any assertions skip caching.
if cacher != nil && !strings.Contains(imp.DisplayName, "@") {
cacher.Put(cacheKey, imp)
}
return imp, false, nil
}
func loadImpteamFromServer(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool) (imp implicitTeam, err error) {
mctx := libkb.NewMetaContext(ctx, g)
arg := libkb.NewAPIArg("team/implicit")
arg.SessionType = libkb.APISessionTypeOPTIONAL
arg.Args = libkb.HTTPArgs{
"display_name": libkb.S{Val: displayName},
"public": libkb.B{Val: public},
}
if err = mctx.G().API.GetDecode(mctx, arg, &imp); err != nil {
if aerr, ok := err.(libkb.AppStatusError); ok {
code := keybase1.StatusCode(aerr.Code)
switch code {
case keybase1.StatusCode_SCTeamReadError:
return imp, NewTeamDoesNotExistError(public, displayName)
case keybase1.StatusCode_SCTeamProvisionalCanKey, keybase1.StatusCode_SCTeamProvisionalCannotKey:
return imp, libkb.NewTeamProvisionalError(
(code == keybase1.StatusCode_SCTeamProvisionalCanKey), public, displayName)
}
}
return imp, err
}
return imp, nil
}
// attemptLoadImpteamAndConflits attempts to lead the implicit team with
// conflict, but it might find the team but not the specific conflict if the
// conflict was not in cache. This can be detected with `hitCache` return
// value, and mitigated by passing skipCache=false argument.
func attemptLoadImpteamAndConflict(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName,
nameWithoutConflict string, preResolveDisplayName string, skipCache bool) (conflicts []keybase1.ImplicitTeamConflictInfo, teamID keybase1.TeamID, hitCache bool, err error) {
defer g.CTrace(ctx,
fmt.Sprintf("attemptLoadImpteamAndConflict(impName=%q,woConflict=%q,preResolve=%q,skipCache=%t)", impTeamName, nameWithoutConflict, preResolveDisplayName, skipCache),
&err)()
imp, hitCache, err := loadImpteam(ctx, g, nameWithoutConflict, impTeamName.IsPublic, skipCache)
if err != nil {
return conflicts, teamID, hitCache, err
}
if len(imp.Conflicts) > 0 {
g.Log.CDebugf(ctx, "LookupImplicitTeam found %v conflicts", len(imp.Conflicts))
}
// We will use this team. Changed later if we selected a conflict.
var foundSelectedConflict bool
teamID = imp.TeamID
// We still need to iterate over Conflicts because we are returning parsed
// conflict list. So even if caller is not requesting a conflict team, go
// through this loop.
for i, conflict := range imp.Conflicts {
g.Log.CDebugf(ctx, "| checking conflict: %+v (iter %d)", conflict, i)
conflictInfo, err := conflict.parse()
if err != nil {
// warn, don't fail
g.Log.CDebugf(ctx, "LookupImplicitTeam got conflict suffix: %v", err)
continue
}
if conflictInfo == nil {
g.Log.CDebugf(ctx, "| got unexpected nil conflictInfo (iter %d)", i)
continue
}
conflicts = append(conflicts, *conflictInfo)
g.Log.CDebugf(ctx, "| parsed conflict into conflictInfo: %+v", *conflictInfo)
if impTeamName.ConflictInfo != nil {
match := libkb.FormatImplicitTeamDisplayNameSuffix(*impTeamName.ConflictInfo) == libkb.FormatImplicitTeamDisplayNameSuffix(*conflictInfo)
if match {
teamID = conflict.TeamID
foundSelectedConflict = true
g.Log.CDebugf(ctx, "| found conflict suffix match: %v", teamID)
} else {
g.Log.CDebugf(ctx, "| conflict suffix didn't match (teamID %v)", conflict.TeamID)
}
}
}
if impTeamName.ConflictInfo != nil && !foundSelectedConflict {
// We got the team but didn't find the specific conflict requested.
return conflicts, teamID, hitCache, NewTeamDoesNotExistError(
impTeamName.IsPublic, "could not find team with suffix: %v", preResolveDisplayName)
}
return conflicts, teamID, hitCache, nil
}
// Lookup an implicit team by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Does not resolve social assertions.
// preResolveDisplayName is used for logging and errors
func lookupImplicitTeamAndConflicts(ctx context.Context, g *libkb.GlobalContext,
preResolveDisplayName string, impTeamNameInput keybase1.ImplicitTeamDisplayName, opts ImplicitTeamOptions) (
team *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, conflicts []keybase1.ImplicitTeamConflictInfo, err error) {
defer g.CTrace(ctx, fmt.Sprintf("lookupImplicitTeamAndConflicts(%v,opts=%+v)", preResolveDisplayName, opts), &err)()
impTeamName = impTeamNameInput
// Use a copy without the conflict info to hit the api endpoint
impTeamNameWithoutConflict := impTeamName
impTeamNameWithoutConflict.ConflictInfo = nil
lookupNameWithoutConflict, err := FormatImplicitTeamDisplayName(ctx, g, impTeamNameWithoutConflict)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
// Try the load first -- once with a cache, and once nameWithoutConflict.
var teamID keybase1.TeamID
var hitCache bool
conflicts, teamID, hitCache, err = attemptLoadImpteamAndConflict(ctx, g, impTeamName, lookupNameWithoutConflict, preResolveDisplayName, false /* skipCache */)
if _, dne := err.(TeamDoesNotExistError); dne && hitCache {
// We are looking for conflict team that we didn't find. Maybe we have the team
// cached from before another team was resolved and this team became conflicted.
// Try again skipping cache.
g.Log.CDebugf(ctx, "attemptLoadImpteamAndConflict failed to load team %q from cache, trying again skipping cache", preResolveDisplayName)
conflicts, teamID, _, err = attemptLoadImpteamAndConflict(ctx, g, impTeamName, lookupNameWithoutConflict, preResolveDisplayName, true /* skipCache */)
}
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
team, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: teamID,
Public: impTeamName.IsPublic,
ForceRepoll: !opts.NoForceRepoll,
})
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
// Check the display names. This is how we make sure the server returned a team with the right members.
teamDisplayName, err := team.ImplicitTeamDisplayNameString(ctx)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
referenceImpName, err := FormatImplicitTeamDisplayName(ctx, g, impTeamName)
if err != nil {
return team, teamName, impTeamName, conflicts, err
}
if teamDisplayName != referenceImpName {
return team, teamName, impTeamName, conflicts, fmt.Errorf("implicit team name mismatch: %s != %s",
teamDisplayName, referenceImpName)
}
if team.IsPublic() != impTeamName.IsPublic {
return team, teamName, impTeamName, conflicts, fmt.Errorf("implicit team public-ness mismatch: %v != %v", team.IsPublic(), impTeamName.IsPublic)
}
return team, team.Name(), impTeamName, conflicts, nil
}
func isDupImplicitTeamError(err error) bool {
if err != nil {
if aerr, ok := err.(libkb.AppStatusError); ok {
code := keybase1.StatusCode(aerr.Code)
switch code {
case keybase1.StatusCode_SCTeamImplicitDuplicate:
return true
default:
// Nothing to do for other codes.
}
}
}
return false
}
func assertIsDisplayNameNormalized(displayName keybase1.ImplicitTeamDisplayName) error {
var errs []error
for _, userSet := range []keybase1.ImplicitTeamUserSet{displayName.Writers, displayName.Readers} {
for _, username := range userSet.KeybaseUsers {
if !libkb.IsLowercase(username) {
errs = append(errs, fmt.Errorf("Keybase username %q has mixed case", username))
}
}
for _, assertion := range userSet.UnresolvedUsers {
if !libkb.IsLowercase(assertion.User) {
errs = append(errs, fmt.Errorf("User %q in assertion %q has mixed case", assertion.User, assertion.String()))
}
}
}
return libkb.CombineErrors(errs...)
}
// LookupOrCreateImplicitTeam by name like "alice,bob+bob@twitter (conflicted copy 2017-03-04 #1)"
// Resolves social assertions.
func LookupOrCreateImplicitTeam(ctx context.Context, g *libkb.GlobalContext, displayName string, public bool) (res *Team, teamName keybase1.TeamName, impTeamName keybase1.ImplicitTeamDisplayName, err error) {
ctx = libkb.WithLogTag(ctx, "LOCIT")
defer g.CTrace(ctx, fmt.Sprintf("LookupOrCreateImplicitTeam(%v)", displayName),
&err)()
lookupName, err := ResolveImplicitTeamDisplayName(ctx, g, displayName, public)
if err != nil {
return res, teamName, impTeamName, err
}
if err := assertIsDisplayNameNormalized(lookupName); err != nil {
// Do not allow display names with mixed letter case - while it's legal
// to create them, it will not be possible to load them because API
// server always downcases during normalization.
return res, teamName, impTeamName, fmt.Errorf("Display name is not normalized: %s", err)
}
res, teamName, impTeamName, _, err = lookupImplicitTeamAndConflicts(ctx, g, displayName, lookupName, ImplicitTeamOptions{})
if err != nil {
if _, ok := err.(TeamDoesNotExistError); ok {
if lookupName.ConflictInfo != nil {
// Don't create it if a conflict is specified.
// Unlikely a caller would know the conflict info if it didn't exist.
return res, teamName, impTeamName, err
}
// If the team does not exist, then let's create it
impTeamName = lookupName
var teamID keybase1.TeamID
teamID, teamName, err = CreateImplicitTeam(ctx, g, impTeamName)
if err != nil {
if isDupImplicitTeamError(err) {
g.Log.CDebugf(ctx, "LookupOrCreateImplicitTeam: duplicate team, trying to lookup again: err: %s", err)
res, teamName, impTeamName, _, err = lookupImplicitTeamAndConflicts(ctx, g, displayName,
lookupName, ImplicitTeamOptions{})
}
return res, teamName, impTeamName, err
}
res, err = Load(ctx, g, keybase1.LoadTeamArg{
ID: teamID,
Public: impTeamName.IsPublic,
ForceRepoll: true,
AuditMode: keybase1.AuditMode_JUST_CREATED,
})
return res, teamName, impTeamName, err
}
return res, teamName, impTeamName, err
}
return res, teamName, impTeamName, nil
}
func FormatImplicitTeamDisplayName(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName) (string, error) {
return formatImplicitTeamDisplayNameCommon(ctx, g, impTeamName, nil)
}
// Format an implicit display name, but order the specified username first in each of the writer and reader lists if it appears.
func FormatImplicitTeamDisplayNameWithUserFront(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName, frontName libkb.NormalizedUsername) (string, error) {
return formatImplicitTeamDisplayNameCommon(ctx, g, impTeamName, &frontName)
} | for _, u := range impTeamName.Writers.UnresolvedUsers {
writerNames = append(writerNames, u.String())
}
if optionalFrontName == nil {
sort.Strings(writerNames)
} else {
sortStringsFront(writerNames, optionalFrontName.String())
}
readerNames := make([]string, 0, len(impTeamName.Readers.KeybaseUsers)+len(impTeamName.Readers.UnresolvedUsers))
readerNames = append(readerNames, impTeamName.Readers.KeybaseUsers...)
for _, u := range impTeamName.Readers.UnresolvedUsers {
readerNames = append(readerNames, u.String())
}
if optionalFrontName == nil {
sort.Strings(readerNames)
} else {
sortStringsFront(readerNames, optionalFrontName.String())
}
var suffix string
if impTeamName.ConflictInfo.IsConflict() {
suffix = libkb.FormatImplicitTeamDisplayNameSuffix(*impTeamName.ConflictInfo)
}
if len(writerNames) == 0 {
return "", fmt.Errorf("invalid implicit team name: no writers")
}
return tlf.NormalizeNamesInTLF(libkb.NewMetaContext(ctx, g), writerNames, readerNames, suffix)
}
// Sort a list of strings but order `front` in front IF it appears.
func sortStringsFront(ss []string, front string) {
sort.Slice(ss, func(i, j int) bool {
a := ss[i]
b := ss[j]
if a == front {
return true
}
if b == front {
return false
}
return a < b
})
}
func impTeamCacheKey(displayName string, public bool) string {
return fmt.Sprintf("%s-%v", displayName, public)
}
type implicitTeamCache struct {
cache *lru.Cache
}
func newImplicitTeamCache(g *libkb.GlobalContext) *implicitTeamCache {
cache, err := lru.New(libkb.ImplicitTeamCacheSize)
if err != nil {
panic(err)
}
return &implicitTeamCache{
cache: cache,
}
}
func (i *implicitTeamCache) Get(key interface{}) (interface{}, bool) {
return i.cache.Get(key)
}
func (i *implicitTeamCache) Put(key, value interface{}) bool {
return i.cache.Add(key, value)
}
func (i *implicitTeamCache) OnLogout(m libkb.MetaContext) error {
i.cache.Purge()
return nil
}
func (i *implicitTeamCache) OnDbNuke(m libkb.MetaContext) error {
i.cache.Purge()
return nil
}
var _ libkb.MemLRUer = &implicitTeamCache{}
func NewImplicitTeamCacheAndInstall(g *libkb.GlobalContext) {
cache := newImplicitTeamCache(g)
g.SetImplicitTeamCacher(cache)
g.AddLogoutHook(cache, "implicitTeamCache")
g.AddDbNukeHook(cache, "implicitTeamCache")
} |
func formatImplicitTeamDisplayNameCommon(ctx context.Context, g *libkb.GlobalContext, impTeamName keybase1.ImplicitTeamDisplayName, optionalFrontName *libkb.NormalizedUsername) (string, error) {
writerNames := make([]string, 0, len(impTeamName.Writers.KeybaseUsers)+len(impTeamName.Writers.UnresolvedUsers))
writerNames = append(writerNames, impTeamName.Writers.KeybaseUsers...) | random_line_split |
keyframe.go | // keyframe: take a snapshot (as a raw raster) of th specified slide
package main
import (
"code.google.com/p/go-charset/charset"
_ "code.google.com/p/go-charset/data"
"flag"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/ajstarks/deck"
"github.com/ajstarks/openvg"
)
var wintrans, _ = charset.TranslatorTo("windows-1252")
var codemap = strings.NewReplacer("\t", " ")
// dodeck sets up the graphics environment and kicks off the interaction
func dodeck(filename string, slidenum int) {
w, h := openvg.Init()
defer openvg.Finish()
d, err := deck.Read(filename, w, h)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
openvg.FillRGB(200, 200, 200, 1)
openvg.Rect(0, 0, openvg.VGfloat(w), openvg.VGfloat(h))
showslide(d, slidenum)
openvg.SaveEnd(fmt.Sprintf("%s-slide-%04d", filename, slidenum))
}
// pct computes percentages
func pct(p float64, m openvg.VGfloat) openvg.VGfloat {
return openvg.VGfloat((p / 100.0)) * m
}
func pctwidth(p float64, p1, p2 openvg.VGfloat) openvg.VGfloat |
func fromUTF8(s string) string {
_, b, err := wintrans.Translate([]byte(s), true)
if err != nil {
return s
}
return string(b)
}
//showtext displays text
func showtext(x, y openvg.VGfloat, s, align, font string, fs openvg.VGfloat) {
t := fromUTF8(s)
fontsize := int(fs)
switch align {
case "center", "middle", "mid", "c":
openvg.TextMid(x, y, t, font, fontsize)
case "right", "end", "e":
openvg.TextEnd(x, y, t, font, fontsize)
default:
openvg.Text(x, y, t, font, fontsize)
}
}
// dimen returns device dimemsion from percentages
func dimen(d deck.Deck, x, y, s float64) (xo, yo, so openvg.VGfloat) {
xf, yf, sf := deck.Dimen(d.Canvas, x, y, s)
xo, yo, so = openvg.VGfloat(xf), openvg.VGfloat(yf), openvg.VGfloat(sf)*0.8
return
}
// showlide displays slides
func showslide(d deck.Deck, n int) {
if n < 0 || n > len(d.Slide)-1 {
return
}
slide := d.Slide[n]
if slide.Bg == "" {
slide.Bg = "white"
}
if slide.Fg == "" {
slide.Fg = "black"
}
openvg.Start(d.Canvas.Width, d.Canvas.Height)
cw := openvg.VGfloat(d.Canvas.Width)
ch := openvg.VGfloat(d.Canvas.Height)
openvg.FillColor(slide.Bg)
openvg.Rect(0, 0, cw, ch)
var x, y, fs openvg.VGfloat
// every image in the slide
for _, im := range slide.Image {
x = pct(im.Xp, cw)
y = pct(im.Yp, ch)
midx := openvg.VGfloat(im.Width / 2)
midy := openvg.VGfloat(im.Height / 2)
openvg.Image(x-midx, y-midy, im.Width, im.Height, im.Name)
if len(im.Caption) > 0 {
capfs := pctwidth(im.Sp, cw, cw/100)
if im.Font == "" {
im.Font = "sans"
}
if im.Color == "" {
openvg.FillColor(slide.Fg)
} else {
openvg.FillColor(im.Color)
}
if im.Align == "" {
im.Align = "center"
}
switch im.Align {
case "left", "start":
x -= midx
case "right", "end":
x += midx
}
showtext(x, y-((midy)+(capfs*2.0)), im.Caption, im.Align, im.Font, capfs)
}
}
// every graphic on the slide
const defaultColor = "rgb(127,127,127)"
const defaultSw = 1.5
var strokeopacity float64
// line
for _, line := range slide.Line {
if line.Color == "" {
line.Color = slide.Fg // defaultColor
}
if line.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = line.Opacity / 100.0
}
x1, y1, sw := dimen(d, line.Xp1, line.Yp1, line.Sp)
x2, y2, _ := dimen(d, line.Xp2, line.Yp2, 0)
openvg.StrokeColor(line.Color, openvg.VGfloat(strokeopacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(openvg.VGfloat(sw))
openvg.StrokeColor(line.Color)
openvg.Line(x1, y1, x2, y2)
openvg.StrokeWidth(0)
}
// ellipse
for _, ellipse := range slide.Ellipse {
x, y, _ = dimen(d, ellipse.Xp, ellipse.Yp, 0)
var w, h openvg.VGfloat
w = pct(ellipse.Wp, cw)
if ellipse.Hr == 0 { // if relative height not specified, base height on overall height
h = pct(ellipse.Hp, ch)
} else {
h = pct(ellipse.Hr, w)
}
if ellipse.Color == "" {
ellipse.Color = defaultColor
}
if ellipse.Opacity == 0 {
ellipse.Opacity = 1
} else {
ellipse.Opacity /= 100
}
openvg.FillColor(ellipse.Color, openvg.VGfloat(ellipse.Opacity))
openvg.Ellipse(x, y, w, h)
}
// rect
for _, rect := range slide.Rect {
x, y, _ = dimen(d, rect.Xp, rect.Yp, 0)
var w, h openvg.VGfloat
w = pct(rect.Wp, cw)
if rect.Hr == 0 { // if relative height not specified, base height on overall height
h = pct(rect.Hp, ch)
} else {
h = pct(rect.Hr, w)
}
if rect.Color == "" {
rect.Color = defaultColor
}
if rect.Opacity == 0 {
rect.Opacity = 1
} else {
rect.Opacity /= 100
}
openvg.FillColor(rect.Color, openvg.VGfloat(rect.Opacity))
openvg.Rect(x-(w/2), y-(h/2), w, h)
}
// curve
for _, curve := range slide.Curve {
if curve.Color == "" {
curve.Color = defaultColor
}
if curve.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = curve.Opacity / 100.0
}
x1, y1, sw := dimen(d, curve.Xp1, curve.Yp1, curve.Sp)
x2, y2, _ := dimen(d, curve.Xp2, curve.Yp2, 0)
x3, y3, _ := dimen(d, curve.Xp3, curve.Yp3, 0)
openvg.StrokeColor(curve.Color, openvg.VGfloat(strokeopacity))
openvg.FillColor(slide.Bg, openvg.VGfloat(curve.Opacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(sw)
openvg.Qbezier(x1, y1, x2, y2, x3, y3)
openvg.StrokeWidth(0)
}
// arc
for _, arc := range slide.Arc {
if arc.Color == "" {
arc.Color = defaultColor
}
if arc.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = arc.Opacity / 100.0
}
ax, ay, sw := dimen(d, arc.Xp, arc.Yp, arc.Sp)
w := pct(arc.Wp, cw)
h := pct(arc.Hp, cw)
openvg.StrokeColor(arc.Color, openvg.VGfloat(strokeopacity))
openvg.FillColor(slide.Bg, openvg.VGfloat(arc.Opacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(sw)
openvg.Arc(ax, ay, w, h, openvg.VGfloat(arc.A1), openvg.VGfloat(arc.A2))
openvg.StrokeWidth(0)
}
// polygon
for _, poly := range slide.Polygon {
if poly.Color == "" {
poly.Color = defaultColor
}
if poly.Opacity == 0 {
poly.Opacity = 1
} else {
poly.Opacity /= 100
}
xs := strings.Split(poly.XC, " ")
ys := strings.Split(poly.YC, " ")
if len(xs) != len(ys) {
continue
}
if len(xs) < 3 || len(ys) < 3 {
continue
}
px := make([]openvg.VGfloat, len(xs))
py := make([]openvg.VGfloat, len(ys))
for i := 0; i < len(xs); i++ {
x, err := strconv.ParseFloat(xs[i], 32)
if err != nil {
px[i] = 0
} else {
px[i] = pct(x, cw)
}
y, err := strconv.ParseFloat(ys[i], 32)
if err != nil {
py[i] = 0
} else {
py[i] = pct(y, ch)
}
}
openvg.FillColor(poly.Color, openvg.VGfloat(poly.Opacity))
openvg.Polygon(px, py)
}
openvg.FillColor(slide.Fg)
// every list in the slide
var offset, textopacity openvg.VGfloat
const blinespacing = 2.4
for _, l := range slide.List {
if l.Font == "" {
l.Font = "sans"
}
x, y, fs = dimen(d, l.Xp, l.Yp, l.Sp)
if l.Type == "bullet" {
offset = 1.2 * fs
} else {
offset = 0
}
if l.Opacity == 0 {
textopacity = 1
} else {
textopacity = openvg.VGfloat(l.Opacity / 100)
}
// every list item
var li, lifont string
for ln, tl := range l.Li {
if len(l.Color) > 0 {
openvg.FillColor(l.Color, textopacity)
} else {
openvg.FillColor(slide.Fg)
}
if l.Type == "bullet" {
boffset := fs / 2
openvg.Ellipse(x, y+boffset, boffset, boffset)
//openvg.Rect(x, y+boffset/2, boffset, boffset)
}
if l.Type == "number" {
li = fmt.Sprintf("%d. ", ln+1) + tl.ListText
} else {
li = tl.ListText
}
if len(tl.Color) > 0 {
openvg.FillColor(tl.Color, textopacity)
}
if len(tl.Font) > 0 {
lifont = tl.Font
} else {
lifont = l.Font
}
showtext(x+offset, y, li, l.Align, lifont, fs)
y -= fs * blinespacing
}
}
openvg.FillColor(slide.Fg)
// every text in the slide
const linespacing = 1.8
var tdata string
for _, t := range slide.Text {
if t.File != "" {
tdata = includefile(t.File)
} else {
tdata = t.Tdata
}
if t.Font == "" {
t.Font = "sans"
}
if t.Opacity == 0 {
textopacity = 1
} else {
textopacity = openvg.VGfloat(t.Opacity / 100)
}
x, y, fs = dimen(d, t.Xp, t.Yp, t.Sp)
td := strings.Split(tdata, "\n")
if t.Type == "code" {
t.Font = "mono"
tdepth := ((fs * linespacing) * openvg.VGfloat(len(td))) + fs
openvg.FillColor("rgb(240,240,240)")
openvg.Rect(x-20, y-tdepth+(fs*linespacing), pctwidth(t.Wp, cw, cw-x-20), tdepth)
}
if t.Color == "" {
openvg.FillColor(slide.Fg, textopacity)
} else {
openvg.FillColor(t.Color, textopacity)
}
if t.Type == "block" {
textwrap(x, y, pctwidth(t.Wp, cw, cw/2), tdata, t.Font, fs, fs*linespacing, 0.3)
} else {
// every text line
for _, txt := range td {
showtext(x, y, txt, t.Align, t.Font, fs)
y -= (fs * linespacing)
}
}
}
openvg.FillColor(slide.Fg)
openvg.End()
}
// whitespace determines if a rune is whitespace
func whitespace(r rune) bool {
return r == ' ' || r == '\n' || r == '\t' || r == '-'
}
// textwrap draws text at location, wrapping at the specified width
func textwrap(x, y, w openvg.VGfloat, s string, font string, fs, leading, factor openvg.VGfloat) {
size := int(fs)
if font == "mono" {
factor = 1.0
}
wordspacing := openvg.TextWidth("m", font, size)
words := strings.FieldsFunc(s, whitespace)
xp := x
yp := y
edge := x + w
for _, s := range words {
tw := openvg.TextWidth(s, font, size)
openvg.Text(xp, yp, s, font, size)
xp += tw + (wordspacing * factor)
if xp > edge {
xp = x
yp -= leading
}
}
}
// includefile returns the contents of a file as string
func includefile(filename string) string {
data, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return ""
}
return codemap.Replace(string(data))
}
// for every file, make a snapshot (<file>-slide-nnnn)
func main() {
var slidenum = flag.Int("slide", 0, "initial slide")
flag.Parse()
for _, f := range flag.Args() {
dodeck(f, *slidenum)
}
}
| {
pw := deck.Pwidth(p, float64(p1), float64(p2))
return openvg.VGfloat(pw)
} | identifier_body |
keyframe.go | // keyframe: take a snapshot (as a raw raster) of th specified slide
package main
import (
"code.google.com/p/go-charset/charset"
_ "code.google.com/p/go-charset/data"
"flag"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/ajstarks/deck"
"github.com/ajstarks/openvg"
)
var wintrans, _ = charset.TranslatorTo("windows-1252")
var codemap = strings.NewReplacer("\t", " ")
// dodeck sets up the graphics environment and kicks off the interaction
func dodeck(filename string, slidenum int) {
w, h := openvg.Init()
defer openvg.Finish()
d, err := deck.Read(filename, w, h)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
openvg.FillRGB(200, 200, 200, 1)
openvg.Rect(0, 0, openvg.VGfloat(w), openvg.VGfloat(h))
showslide(d, slidenum)
openvg.SaveEnd(fmt.Sprintf("%s-slide-%04d", filename, slidenum))
}
// pct computes percentages
func pct(p float64, m openvg.VGfloat) openvg.VGfloat {
return openvg.VGfloat((p / 100.0)) * m
}
func pctwidth(p float64, p1, p2 openvg.VGfloat) openvg.VGfloat {
pw := deck.Pwidth(p, float64(p1), float64(p2))
return openvg.VGfloat(pw)
}
func fromUTF8(s string) string {
_, b, err := wintrans.Translate([]byte(s), true)
if err != nil {
return s
}
return string(b)
}
//showtext displays text
func showtext(x, y openvg.VGfloat, s, align, font string, fs openvg.VGfloat) {
t := fromUTF8(s)
fontsize := int(fs)
switch align {
case "center", "middle", "mid", "c":
openvg.TextMid(x, y, t, font, fontsize)
case "right", "end", "e":
openvg.TextEnd(x, y, t, font, fontsize)
default:
openvg.Text(x, y, t, font, fontsize)
}
}
// dimen returns device dimemsion from percentages
func dimen(d deck.Deck, x, y, s float64) (xo, yo, so openvg.VGfloat) {
xf, yf, sf := deck.Dimen(d.Canvas, x, y, s)
xo, yo, so = openvg.VGfloat(xf), openvg.VGfloat(yf), openvg.VGfloat(sf)*0.8
return
}
// showlide displays slides
func showslide(d deck.Deck, n int) {
if n < 0 || n > len(d.Slide)-1 {
return
}
slide := d.Slide[n]
if slide.Bg == "" {
slide.Bg = "white"
}
if slide.Fg == "" {
slide.Fg = "black"
}
openvg.Start(d.Canvas.Width, d.Canvas.Height)
cw := openvg.VGfloat(d.Canvas.Width)
ch := openvg.VGfloat(d.Canvas.Height)
openvg.FillColor(slide.Bg)
openvg.Rect(0, 0, cw, ch)
var x, y, fs openvg.VGfloat
// every image in the slide
for _, im := range slide.Image {
x = pct(im.Xp, cw)
y = pct(im.Yp, ch)
midx := openvg.VGfloat(im.Width / 2)
midy := openvg.VGfloat(im.Height / 2)
openvg.Image(x-midx, y-midy, im.Width, im.Height, im.Name)
if len(im.Caption) > 0 {
capfs := pctwidth(im.Sp, cw, cw/100)
if im.Font == "" {
im.Font = "sans"
}
if im.Color == "" {
openvg.FillColor(slide.Fg)
} else {
openvg.FillColor(im.Color)
}
if im.Align == "" {
im.Align = "center"
}
switch im.Align {
case "left", "start":
x -= midx
case "right", "end":
x += midx
}
showtext(x, y-((midy)+(capfs*2.0)), im.Caption, im.Align, im.Font, capfs)
}
}
// every graphic on the slide
const defaultColor = "rgb(127,127,127)"
const defaultSw = 1.5
var strokeopacity float64
// line
for _, line := range slide.Line {
if line.Color == "" {
line.Color = slide.Fg // defaultColor
}
if line.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = line.Opacity / 100.0
}
x1, y1, sw := dimen(d, line.Xp1, line.Yp1, line.Sp)
x2, y2, _ := dimen(d, line.Xp2, line.Yp2, 0)
openvg.StrokeColor(line.Color, openvg.VGfloat(strokeopacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(openvg.VGfloat(sw))
openvg.StrokeColor(line.Color)
openvg.Line(x1, y1, x2, y2)
openvg.StrokeWidth(0)
}
// ellipse
for _, ellipse := range slide.Ellipse {
x, y, _ = dimen(d, ellipse.Xp, ellipse.Yp, 0)
var w, h openvg.VGfloat
w = pct(ellipse.Wp, cw)
if ellipse.Hr == 0 { // if relative height not specified, base height on overall height
h = pct(ellipse.Hp, ch)
} else {
h = pct(ellipse.Hr, w)
}
if ellipse.Color == "" {
ellipse.Color = defaultColor
}
if ellipse.Opacity == 0 {
ellipse.Opacity = 1
} else {
ellipse.Opacity /= 100
}
openvg.FillColor(ellipse.Color, openvg.VGfloat(ellipse.Opacity))
openvg.Ellipse(x, y, w, h)
}
// rect
for _, rect := range slide.Rect {
x, y, _ = dimen(d, rect.Xp, rect.Yp, 0)
var w, h openvg.VGfloat
w = pct(rect.Wp, cw)
if rect.Hr == 0 { // if relative height not specified, base height on overall height
h = pct(rect.Hp, ch)
} else {
h = pct(rect.Hr, w)
}
if rect.Color == "" {
rect.Color = defaultColor
}
if rect.Opacity == 0 {
rect.Opacity = 1
} else {
rect.Opacity /= 100
}
openvg.FillColor(rect.Color, openvg.VGfloat(rect.Opacity))
openvg.Rect(x-(w/2), y-(h/2), w, h)
}
// curve
for _, curve := range slide.Curve {
if curve.Color == "" {
curve.Color = defaultColor
}
if curve.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = curve.Opacity / 100.0
}
x1, y1, sw := dimen(d, curve.Xp1, curve.Yp1, curve.Sp)
x2, y2, _ := dimen(d, curve.Xp2, curve.Yp2, 0)
x3, y3, _ := dimen(d, curve.Xp3, curve.Yp3, 0)
openvg.StrokeColor(curve.Color, openvg.VGfloat(strokeopacity))
openvg.FillColor(slide.Bg, openvg.VGfloat(curve.Opacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(sw)
openvg.Qbezier(x1, y1, x2, y2, x3, y3)
openvg.StrokeWidth(0)
}
// arc
for _, arc := range slide.Arc {
if arc.Color == "" {
arc.Color = defaultColor
}
if arc.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = arc.Opacity / 100.0
}
ax, ay, sw := dimen(d, arc.Xp, arc.Yp, arc.Sp)
w := pct(arc.Wp, cw)
h := pct(arc.Hp, cw)
openvg.StrokeColor(arc.Color, openvg.VGfloat(strokeopacity))
openvg.FillColor(slide.Bg, openvg.VGfloat(arc.Opacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(sw)
openvg.Arc(ax, ay, w, h, openvg.VGfloat(arc.A1), openvg.VGfloat(arc.A2))
openvg.StrokeWidth(0)
}
// polygon
for _, poly := range slide.Polygon {
if poly.Color == "" {
poly.Color = defaultColor
}
if poly.Opacity == 0 {
poly.Opacity = 1
} else {
poly.Opacity /= 100
}
xs := strings.Split(poly.XC, " ")
ys := strings.Split(poly.YC, " ")
if len(xs) != len(ys) {
continue
}
if len(xs) < 3 || len(ys) < 3 {
continue
}
px := make([]openvg.VGfloat, len(xs))
py := make([]openvg.VGfloat, len(ys))
for i := 0; i < len(xs); i++ {
x, err := strconv.ParseFloat(xs[i], 32)
if err != nil {
px[i] = 0
} else {
px[i] = pct(x, cw)
}
y, err := strconv.ParseFloat(ys[i], 32)
if err != nil {
py[i] = 0
} else {
py[i] = pct(y, ch)
}
}
openvg.FillColor(poly.Color, openvg.VGfloat(poly.Opacity))
openvg.Polygon(px, py)
}
openvg.FillColor(slide.Fg)
// every list in the slide
var offset, textopacity openvg.VGfloat
const blinespacing = 2.4
for _, l := range slide.List {
if l.Font == "" {
l.Font = "sans"
}
x, y, fs = dimen(d, l.Xp, l.Yp, l.Sp)
if l.Type == "bullet" {
offset = 1.2 * fs
} else {
offset = 0
}
if l.Opacity == 0 {
textopacity = 1
} else {
textopacity = openvg.VGfloat(l.Opacity / 100)
}
// every list item
var li, lifont string
for ln, tl := range l.Li {
if len(l.Color) > 0 {
openvg.FillColor(l.Color, textopacity)
} else {
openvg.FillColor(slide.Fg)
}
if l.Type == "bullet" {
boffset := fs / 2
openvg.Ellipse(x, y+boffset, boffset, boffset)
//openvg.Rect(x, y+boffset/2, boffset, boffset)
}
if l.Type == "number" {
li = fmt.Sprintf("%d. ", ln+1) + tl.ListText
} else {
li = tl.ListText
}
if len(tl.Color) > 0 {
openvg.FillColor(tl.Color, textopacity)
}
if len(tl.Font) > 0 {
lifont = tl.Font
} else {
lifont = l.Font
}
showtext(x+offset, y, li, l.Align, lifont, fs)
y -= fs * blinespacing
}
}
openvg.FillColor(slide.Fg)
// every text in the slide
const linespacing = 1.8
var tdata string
for _, t := range slide.Text {
if t.File != "" {
tdata = includefile(t.File)
} else {
tdata = t.Tdata
}
if t.Font == "" {
t.Font = "sans"
}
if t.Opacity == 0 {
textopacity = 1
} else {
textopacity = openvg.VGfloat(t.Opacity / 100)
}
x, y, fs = dimen(d, t.Xp, t.Yp, t.Sp)
td := strings.Split(tdata, "\n")
if t.Type == "code" {
t.Font = "mono"
tdepth := ((fs * linespacing) * openvg.VGfloat(len(td))) + fs
openvg.FillColor("rgb(240,240,240)")
openvg.Rect(x-20, y-tdepth+(fs*linespacing), pctwidth(t.Wp, cw, cw-x-20), tdepth)
}
if t.Color == "" {
openvg.FillColor(slide.Fg, textopacity)
} else {
openvg.FillColor(t.Color, textopacity)
}
if t.Type == "block" {
textwrap(x, y, pctwidth(t.Wp, cw, cw/2), tdata, t.Font, fs, fs*linespacing, 0.3)
} else {
// every text line
for _, txt := range td {
showtext(x, y, txt, t.Align, t.Font, fs)
y -= (fs * linespacing)
}
}
}
openvg.FillColor(slide.Fg)
openvg.End()
}
// whitespace determines if a rune is whitespace
func whitespace(r rune) bool {
return r == ' ' || r == '\n' || r == '\t' || r == '-'
}
// textwrap draws text at location, wrapping at the specified width
func textwrap(x, y, w openvg.VGfloat, s string, font string, fs, leading, factor openvg.VGfloat) {
size := int(fs)
if font == "mono" {
factor = 1.0 | wordspacing := openvg.TextWidth("m", font, size)
words := strings.FieldsFunc(s, whitespace)
xp := x
yp := y
edge := x + w
for _, s := range words {
tw := openvg.TextWidth(s, font, size)
openvg.Text(xp, yp, s, font, size)
xp += tw + (wordspacing * factor)
if xp > edge {
xp = x
yp -= leading
}
}
}
// includefile returns the contents of a file as string
func includefile(filename string) string {
data, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return ""
}
return codemap.Replace(string(data))
}
// for every file, make a snapshot (<file>-slide-nnnn)
func main() {
var slidenum = flag.Int("slide", 0, "initial slide")
flag.Parse()
for _, f := range flag.Args() {
dodeck(f, *slidenum)
}
} | } | random_line_split |
keyframe.go | // keyframe: take a snapshot (as a raw raster) of th specified slide
package main
import (
"code.google.com/p/go-charset/charset"
_ "code.google.com/p/go-charset/data"
"flag"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/ajstarks/deck"
"github.com/ajstarks/openvg"
)
var wintrans, _ = charset.TranslatorTo("windows-1252")
var codemap = strings.NewReplacer("\t", " ")
// dodeck sets up the graphics environment and kicks off the interaction
func dodeck(filename string, slidenum int) {
w, h := openvg.Init()
defer openvg.Finish()
d, err := deck.Read(filename, w, h)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
openvg.FillRGB(200, 200, 200, 1)
openvg.Rect(0, 0, openvg.VGfloat(w), openvg.VGfloat(h))
showslide(d, slidenum)
openvg.SaveEnd(fmt.Sprintf("%s-slide-%04d", filename, slidenum))
}
// pct computes percentages
func pct(p float64, m openvg.VGfloat) openvg.VGfloat {
return openvg.VGfloat((p / 100.0)) * m
}
func pctwidth(p float64, p1, p2 openvg.VGfloat) openvg.VGfloat {
pw := deck.Pwidth(p, float64(p1), float64(p2))
return openvg.VGfloat(pw)
}
func fromUTF8(s string) string {
_, b, err := wintrans.Translate([]byte(s), true)
if err != nil {
return s
}
return string(b)
}
//showtext displays text
func showtext(x, y openvg.VGfloat, s, align, font string, fs openvg.VGfloat) {
t := fromUTF8(s)
fontsize := int(fs)
switch align {
case "center", "middle", "mid", "c":
openvg.TextMid(x, y, t, font, fontsize)
case "right", "end", "e":
openvg.TextEnd(x, y, t, font, fontsize)
default:
openvg.Text(x, y, t, font, fontsize)
}
}
// dimen returns device dimemsion from percentages
func dimen(d deck.Deck, x, y, s float64) (xo, yo, so openvg.VGfloat) {
xf, yf, sf := deck.Dimen(d.Canvas, x, y, s)
xo, yo, so = openvg.VGfloat(xf), openvg.VGfloat(yf), openvg.VGfloat(sf)*0.8
return
}
// showlide displays slides
func showslide(d deck.Deck, n int) {
if n < 0 || n > len(d.Slide)-1 {
return
}
slide := d.Slide[n]
if slide.Bg == "" {
slide.Bg = "white"
}
if slide.Fg == "" {
slide.Fg = "black"
}
openvg.Start(d.Canvas.Width, d.Canvas.Height)
cw := openvg.VGfloat(d.Canvas.Width)
ch := openvg.VGfloat(d.Canvas.Height)
openvg.FillColor(slide.Bg)
openvg.Rect(0, 0, cw, ch)
var x, y, fs openvg.VGfloat
// every image in the slide
for _, im := range slide.Image {
x = pct(im.Xp, cw)
y = pct(im.Yp, ch)
midx := openvg.VGfloat(im.Width / 2)
midy := openvg.VGfloat(im.Height / 2)
openvg.Image(x-midx, y-midy, im.Width, im.Height, im.Name)
if len(im.Caption) > 0 {
capfs := pctwidth(im.Sp, cw, cw/100)
if im.Font == "" {
im.Font = "sans"
}
if im.Color == "" {
openvg.FillColor(slide.Fg)
} else {
openvg.FillColor(im.Color)
}
if im.Align == "" {
im.Align = "center"
}
switch im.Align {
case "left", "start":
x -= midx
case "right", "end":
x += midx
}
showtext(x, y-((midy)+(capfs*2.0)), im.Caption, im.Align, im.Font, capfs)
}
}
// every graphic on the slide
const defaultColor = "rgb(127,127,127)"
const defaultSw = 1.5
var strokeopacity float64
// line
for _, line := range slide.Line {
if line.Color == "" {
line.Color = slide.Fg // defaultColor
}
if line.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = line.Opacity / 100.0
}
x1, y1, sw := dimen(d, line.Xp1, line.Yp1, line.Sp)
x2, y2, _ := dimen(d, line.Xp2, line.Yp2, 0)
openvg.StrokeColor(line.Color, openvg.VGfloat(strokeopacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(openvg.VGfloat(sw))
openvg.StrokeColor(line.Color)
openvg.Line(x1, y1, x2, y2)
openvg.StrokeWidth(0)
}
// ellipse
for _, ellipse := range slide.Ellipse {
x, y, _ = dimen(d, ellipse.Xp, ellipse.Yp, 0)
var w, h openvg.VGfloat
w = pct(ellipse.Wp, cw)
if ellipse.Hr == 0 { // if relative height not specified, base height on overall height
h = pct(ellipse.Hp, ch)
} else {
h = pct(ellipse.Hr, w)
}
if ellipse.Color == "" {
ellipse.Color = defaultColor
}
if ellipse.Opacity == 0 {
ellipse.Opacity = 1
} else {
ellipse.Opacity /= 100
}
openvg.FillColor(ellipse.Color, openvg.VGfloat(ellipse.Opacity))
openvg.Ellipse(x, y, w, h)
}
// rect
for _, rect := range slide.Rect {
x, y, _ = dimen(d, rect.Xp, rect.Yp, 0)
var w, h openvg.VGfloat
w = pct(rect.Wp, cw)
if rect.Hr == 0 { // if relative height not specified, base height on overall height
h = pct(rect.Hp, ch)
} else {
h = pct(rect.Hr, w)
}
if rect.Color == "" {
rect.Color = defaultColor
}
if rect.Opacity == 0 {
rect.Opacity = 1
} else {
rect.Opacity /= 100
}
openvg.FillColor(rect.Color, openvg.VGfloat(rect.Opacity))
openvg.Rect(x-(w/2), y-(h/2), w, h)
}
// curve
for _, curve := range slide.Curve {
if curve.Color == "" {
curve.Color = defaultColor
}
if curve.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = curve.Opacity / 100.0
}
x1, y1, sw := dimen(d, curve.Xp1, curve.Yp1, curve.Sp)
x2, y2, _ := dimen(d, curve.Xp2, curve.Yp2, 0)
x3, y3, _ := dimen(d, curve.Xp3, curve.Yp3, 0)
openvg.StrokeColor(curve.Color, openvg.VGfloat(strokeopacity))
openvg.FillColor(slide.Bg, openvg.VGfloat(curve.Opacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(sw)
openvg.Qbezier(x1, y1, x2, y2, x3, y3)
openvg.StrokeWidth(0)
}
// arc
for _, arc := range slide.Arc {
if arc.Color == "" {
arc.Color = defaultColor
}
if arc.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = arc.Opacity / 100.0
}
ax, ay, sw := dimen(d, arc.Xp, arc.Yp, arc.Sp)
w := pct(arc.Wp, cw)
h := pct(arc.Hp, cw)
openvg.StrokeColor(arc.Color, openvg.VGfloat(strokeopacity))
openvg.FillColor(slide.Bg, openvg.VGfloat(arc.Opacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(sw)
openvg.Arc(ax, ay, w, h, openvg.VGfloat(arc.A1), openvg.VGfloat(arc.A2))
openvg.StrokeWidth(0)
}
// polygon
for _, poly := range slide.Polygon {
if poly.Color == "" {
poly.Color = defaultColor
}
if poly.Opacity == 0 {
poly.Opacity = 1
} else {
poly.Opacity /= 100
}
xs := strings.Split(poly.XC, " ")
ys := strings.Split(poly.YC, " ")
if len(xs) != len(ys) {
continue
}
if len(xs) < 3 || len(ys) < 3 {
continue
}
px := make([]openvg.VGfloat, len(xs))
py := make([]openvg.VGfloat, len(ys))
for i := 0; i < len(xs); i++ {
x, err := strconv.ParseFloat(xs[i], 32)
if err != nil | else {
px[i] = pct(x, cw)
}
y, err := strconv.ParseFloat(ys[i], 32)
if err != nil {
py[i] = 0
} else {
py[i] = pct(y, ch)
}
}
openvg.FillColor(poly.Color, openvg.VGfloat(poly.Opacity))
openvg.Polygon(px, py)
}
openvg.FillColor(slide.Fg)
// every list in the slide
var offset, textopacity openvg.VGfloat
const blinespacing = 2.4
for _, l := range slide.List {
if l.Font == "" {
l.Font = "sans"
}
x, y, fs = dimen(d, l.Xp, l.Yp, l.Sp)
if l.Type == "bullet" {
offset = 1.2 * fs
} else {
offset = 0
}
if l.Opacity == 0 {
textopacity = 1
} else {
textopacity = openvg.VGfloat(l.Opacity / 100)
}
// every list item
var li, lifont string
for ln, tl := range l.Li {
if len(l.Color) > 0 {
openvg.FillColor(l.Color, textopacity)
} else {
openvg.FillColor(slide.Fg)
}
if l.Type == "bullet" {
boffset := fs / 2
openvg.Ellipse(x, y+boffset, boffset, boffset)
//openvg.Rect(x, y+boffset/2, boffset, boffset)
}
if l.Type == "number" {
li = fmt.Sprintf("%d. ", ln+1) + tl.ListText
} else {
li = tl.ListText
}
if len(tl.Color) > 0 {
openvg.FillColor(tl.Color, textopacity)
}
if len(tl.Font) > 0 {
lifont = tl.Font
} else {
lifont = l.Font
}
showtext(x+offset, y, li, l.Align, lifont, fs)
y -= fs * blinespacing
}
}
openvg.FillColor(slide.Fg)
// every text in the slide
const linespacing = 1.8
var tdata string
for _, t := range slide.Text {
if t.File != "" {
tdata = includefile(t.File)
} else {
tdata = t.Tdata
}
if t.Font == "" {
t.Font = "sans"
}
if t.Opacity == 0 {
textopacity = 1
} else {
textopacity = openvg.VGfloat(t.Opacity / 100)
}
x, y, fs = dimen(d, t.Xp, t.Yp, t.Sp)
td := strings.Split(tdata, "\n")
if t.Type == "code" {
t.Font = "mono"
tdepth := ((fs * linespacing) * openvg.VGfloat(len(td))) + fs
openvg.FillColor("rgb(240,240,240)")
openvg.Rect(x-20, y-tdepth+(fs*linespacing), pctwidth(t.Wp, cw, cw-x-20), tdepth)
}
if t.Color == "" {
openvg.FillColor(slide.Fg, textopacity)
} else {
openvg.FillColor(t.Color, textopacity)
}
if t.Type == "block" {
textwrap(x, y, pctwidth(t.Wp, cw, cw/2), tdata, t.Font, fs, fs*linespacing, 0.3)
} else {
// every text line
for _, txt := range td {
showtext(x, y, txt, t.Align, t.Font, fs)
y -= (fs * linespacing)
}
}
}
openvg.FillColor(slide.Fg)
openvg.End()
}
// whitespace determines if a rune is whitespace
func whitespace(r rune) bool {
return r == ' ' || r == '\n' || r == '\t' || r == '-'
}
// textwrap draws text at location, wrapping at the specified width
func textwrap(x, y, w openvg.VGfloat, s string, font string, fs, leading, factor openvg.VGfloat) {
size := int(fs)
if font == "mono" {
factor = 1.0
}
wordspacing := openvg.TextWidth("m", font, size)
words := strings.FieldsFunc(s, whitespace)
xp := x
yp := y
edge := x + w
for _, s := range words {
tw := openvg.TextWidth(s, font, size)
openvg.Text(xp, yp, s, font, size)
xp += tw + (wordspacing * factor)
if xp > edge {
xp = x
yp -= leading
}
}
}
// includefile returns the contents of a file as string
func includefile(filename string) string {
data, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return ""
}
return codemap.Replace(string(data))
}
// for every file, make a snapshot (<file>-slide-nnnn)
func main() {
var slidenum = flag.Int("slide", 0, "initial slide")
flag.Parse()
for _, f := range flag.Args() {
dodeck(f, *slidenum)
}
}
| {
px[i] = 0
} | conditional_block |
keyframe.go | // keyframe: take a snapshot (as a raw raster) of th specified slide
package main
import (
"code.google.com/p/go-charset/charset"
_ "code.google.com/p/go-charset/data"
"flag"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/ajstarks/deck"
"github.com/ajstarks/openvg"
)
var wintrans, _ = charset.TranslatorTo("windows-1252")
var codemap = strings.NewReplacer("\t", " ")
// dodeck sets up the graphics environment and kicks off the interaction
func dodeck(filename string, slidenum int) {
w, h := openvg.Init()
defer openvg.Finish()
d, err := deck.Read(filename, w, h)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
openvg.FillRGB(200, 200, 200, 1)
openvg.Rect(0, 0, openvg.VGfloat(w), openvg.VGfloat(h))
showslide(d, slidenum)
openvg.SaveEnd(fmt.Sprintf("%s-slide-%04d", filename, slidenum))
}
// pct computes percentages
func pct(p float64, m openvg.VGfloat) openvg.VGfloat {
return openvg.VGfloat((p / 100.0)) * m
}
func pctwidth(p float64, p1, p2 openvg.VGfloat) openvg.VGfloat {
pw := deck.Pwidth(p, float64(p1), float64(p2))
return openvg.VGfloat(pw)
}
func fromUTF8(s string) string {
_, b, err := wintrans.Translate([]byte(s), true)
if err != nil {
return s
}
return string(b)
}
//showtext displays text
func showtext(x, y openvg.VGfloat, s, align, font string, fs openvg.VGfloat) {
t := fromUTF8(s)
fontsize := int(fs)
switch align {
case "center", "middle", "mid", "c":
openvg.TextMid(x, y, t, font, fontsize)
case "right", "end", "e":
openvg.TextEnd(x, y, t, font, fontsize)
default:
openvg.Text(x, y, t, font, fontsize)
}
}
// dimen returns device dimemsion from percentages
func dimen(d deck.Deck, x, y, s float64) (xo, yo, so openvg.VGfloat) {
xf, yf, sf := deck.Dimen(d.Canvas, x, y, s)
xo, yo, so = openvg.VGfloat(xf), openvg.VGfloat(yf), openvg.VGfloat(sf)*0.8
return
}
// showlide displays slides
func showslide(d deck.Deck, n int) {
if n < 0 || n > len(d.Slide)-1 {
return
}
slide := d.Slide[n]
if slide.Bg == "" {
slide.Bg = "white"
}
if slide.Fg == "" {
slide.Fg = "black"
}
openvg.Start(d.Canvas.Width, d.Canvas.Height)
cw := openvg.VGfloat(d.Canvas.Width)
ch := openvg.VGfloat(d.Canvas.Height)
openvg.FillColor(slide.Bg)
openvg.Rect(0, 0, cw, ch)
var x, y, fs openvg.VGfloat
// every image in the slide
for _, im := range slide.Image {
x = pct(im.Xp, cw)
y = pct(im.Yp, ch)
midx := openvg.VGfloat(im.Width / 2)
midy := openvg.VGfloat(im.Height / 2)
openvg.Image(x-midx, y-midy, im.Width, im.Height, im.Name)
if len(im.Caption) > 0 {
capfs := pctwidth(im.Sp, cw, cw/100)
if im.Font == "" {
im.Font = "sans"
}
if im.Color == "" {
openvg.FillColor(slide.Fg)
} else {
openvg.FillColor(im.Color)
}
if im.Align == "" {
im.Align = "center"
}
switch im.Align {
case "left", "start":
x -= midx
case "right", "end":
x += midx
}
showtext(x, y-((midy)+(capfs*2.0)), im.Caption, im.Align, im.Font, capfs)
}
}
// every graphic on the slide
const defaultColor = "rgb(127,127,127)"
const defaultSw = 1.5
var strokeopacity float64
// line
for _, line := range slide.Line {
if line.Color == "" {
line.Color = slide.Fg // defaultColor
}
if line.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = line.Opacity / 100.0
}
x1, y1, sw := dimen(d, line.Xp1, line.Yp1, line.Sp)
x2, y2, _ := dimen(d, line.Xp2, line.Yp2, 0)
openvg.StrokeColor(line.Color, openvg.VGfloat(strokeopacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(openvg.VGfloat(sw))
openvg.StrokeColor(line.Color)
openvg.Line(x1, y1, x2, y2)
openvg.StrokeWidth(0)
}
// ellipse
for _, ellipse := range slide.Ellipse {
x, y, _ = dimen(d, ellipse.Xp, ellipse.Yp, 0)
var w, h openvg.VGfloat
w = pct(ellipse.Wp, cw)
if ellipse.Hr == 0 { // if relative height not specified, base height on overall height
h = pct(ellipse.Hp, ch)
} else {
h = pct(ellipse.Hr, w)
}
if ellipse.Color == "" {
ellipse.Color = defaultColor
}
if ellipse.Opacity == 0 {
ellipse.Opacity = 1
} else {
ellipse.Opacity /= 100
}
openvg.FillColor(ellipse.Color, openvg.VGfloat(ellipse.Opacity))
openvg.Ellipse(x, y, w, h)
}
// rect
for _, rect := range slide.Rect {
x, y, _ = dimen(d, rect.Xp, rect.Yp, 0)
var w, h openvg.VGfloat
w = pct(rect.Wp, cw)
if rect.Hr == 0 { // if relative height not specified, base height on overall height
h = pct(rect.Hp, ch)
} else {
h = pct(rect.Hr, w)
}
if rect.Color == "" {
rect.Color = defaultColor
}
if rect.Opacity == 0 {
rect.Opacity = 1
} else {
rect.Opacity /= 100
}
openvg.FillColor(rect.Color, openvg.VGfloat(rect.Opacity))
openvg.Rect(x-(w/2), y-(h/2), w, h)
}
// curve
for _, curve := range slide.Curve {
if curve.Color == "" {
curve.Color = defaultColor
}
if curve.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = curve.Opacity / 100.0
}
x1, y1, sw := dimen(d, curve.Xp1, curve.Yp1, curve.Sp)
x2, y2, _ := dimen(d, curve.Xp2, curve.Yp2, 0)
x3, y3, _ := dimen(d, curve.Xp3, curve.Yp3, 0)
openvg.StrokeColor(curve.Color, openvg.VGfloat(strokeopacity))
openvg.FillColor(slide.Bg, openvg.VGfloat(curve.Opacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(sw)
openvg.Qbezier(x1, y1, x2, y2, x3, y3)
openvg.StrokeWidth(0)
}
// arc
for _, arc := range slide.Arc {
if arc.Color == "" {
arc.Color = defaultColor
}
if arc.Opacity == 0 {
strokeopacity = 1
} else {
strokeopacity = arc.Opacity / 100.0
}
ax, ay, sw := dimen(d, arc.Xp, arc.Yp, arc.Sp)
w := pct(arc.Wp, cw)
h := pct(arc.Hp, cw)
openvg.StrokeColor(arc.Color, openvg.VGfloat(strokeopacity))
openvg.FillColor(slide.Bg, openvg.VGfloat(arc.Opacity))
if sw == 0 {
sw = defaultSw
}
openvg.StrokeWidth(sw)
openvg.Arc(ax, ay, w, h, openvg.VGfloat(arc.A1), openvg.VGfloat(arc.A2))
openvg.StrokeWidth(0)
}
// polygon
for _, poly := range slide.Polygon {
if poly.Color == "" {
poly.Color = defaultColor
}
if poly.Opacity == 0 {
poly.Opacity = 1
} else {
poly.Opacity /= 100
}
xs := strings.Split(poly.XC, " ")
ys := strings.Split(poly.YC, " ")
if len(xs) != len(ys) {
continue
}
if len(xs) < 3 || len(ys) < 3 {
continue
}
px := make([]openvg.VGfloat, len(xs))
py := make([]openvg.VGfloat, len(ys))
for i := 0; i < len(xs); i++ {
x, err := strconv.ParseFloat(xs[i], 32)
if err != nil {
px[i] = 0
} else {
px[i] = pct(x, cw)
}
y, err := strconv.ParseFloat(ys[i], 32)
if err != nil {
py[i] = 0
} else {
py[i] = pct(y, ch)
}
}
openvg.FillColor(poly.Color, openvg.VGfloat(poly.Opacity))
openvg.Polygon(px, py)
}
openvg.FillColor(slide.Fg)
// every list in the slide
var offset, textopacity openvg.VGfloat
const blinespacing = 2.4
for _, l := range slide.List {
if l.Font == "" {
l.Font = "sans"
}
x, y, fs = dimen(d, l.Xp, l.Yp, l.Sp)
if l.Type == "bullet" {
offset = 1.2 * fs
} else {
offset = 0
}
if l.Opacity == 0 {
textopacity = 1
} else {
textopacity = openvg.VGfloat(l.Opacity / 100)
}
// every list item
var li, lifont string
for ln, tl := range l.Li {
if len(l.Color) > 0 {
openvg.FillColor(l.Color, textopacity)
} else {
openvg.FillColor(slide.Fg)
}
if l.Type == "bullet" {
boffset := fs / 2
openvg.Ellipse(x, y+boffset, boffset, boffset)
//openvg.Rect(x, y+boffset/2, boffset, boffset)
}
if l.Type == "number" {
li = fmt.Sprintf("%d. ", ln+1) + tl.ListText
} else {
li = tl.ListText
}
if len(tl.Color) > 0 {
openvg.FillColor(tl.Color, textopacity)
}
if len(tl.Font) > 0 {
lifont = tl.Font
} else {
lifont = l.Font
}
showtext(x+offset, y, li, l.Align, lifont, fs)
y -= fs * blinespacing
}
}
openvg.FillColor(slide.Fg)
// every text in the slide
const linespacing = 1.8
var tdata string
for _, t := range slide.Text {
if t.File != "" {
tdata = includefile(t.File)
} else {
tdata = t.Tdata
}
if t.Font == "" {
t.Font = "sans"
}
if t.Opacity == 0 {
textopacity = 1
} else {
textopacity = openvg.VGfloat(t.Opacity / 100)
}
x, y, fs = dimen(d, t.Xp, t.Yp, t.Sp)
td := strings.Split(tdata, "\n")
if t.Type == "code" {
t.Font = "mono"
tdepth := ((fs * linespacing) * openvg.VGfloat(len(td))) + fs
openvg.FillColor("rgb(240,240,240)")
openvg.Rect(x-20, y-tdepth+(fs*linespacing), pctwidth(t.Wp, cw, cw-x-20), tdepth)
}
if t.Color == "" {
openvg.FillColor(slide.Fg, textopacity)
} else {
openvg.FillColor(t.Color, textopacity)
}
if t.Type == "block" {
textwrap(x, y, pctwidth(t.Wp, cw, cw/2), tdata, t.Font, fs, fs*linespacing, 0.3)
} else {
// every text line
for _, txt := range td {
showtext(x, y, txt, t.Align, t.Font, fs)
y -= (fs * linespacing)
}
}
}
openvg.FillColor(slide.Fg)
openvg.End()
}
// whitespace determines if a rune is whitespace
func | (r rune) bool {
return r == ' ' || r == '\n' || r == '\t' || r == '-'
}
// textwrap draws text at location, wrapping at the specified width
func textwrap(x, y, w openvg.VGfloat, s string, font string, fs, leading, factor openvg.VGfloat) {
size := int(fs)
if font == "mono" {
factor = 1.0
}
wordspacing := openvg.TextWidth("m", font, size)
words := strings.FieldsFunc(s, whitespace)
xp := x
yp := y
edge := x + w
for _, s := range words {
tw := openvg.TextWidth(s, font, size)
openvg.Text(xp, yp, s, font, size)
xp += tw + (wordspacing * factor)
if xp > edge {
xp = x
yp -= leading
}
}
}
// includefile returns the contents of a file as string
func includefile(filename string) string {
data, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return ""
}
return codemap.Replace(string(data))
}
// for every file, make a snapshot (<file>-slide-nnnn)
func main() {
var slidenum = flag.Int("slide", 0, "initial slide")
flag.Parse()
for _, f := range flag.Args() {
dodeck(f, *slidenum)
}
}
| whitespace | identifier_name |
Mash.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 3 23:35:49 2016
@author: Beiwen Liu
"""
from music21 import *
c = converter.parse('./ChopinNocturneOp9No2.xml') # Get Info from Original Sheet
sc = stream.Score(id="MainScore") # New Stream
def switch():
c = converter.parse('./KL_Rains_of_Castamere.xml')
c2 = converter.parse('./FurElise.xml')
number = len(c2.parts[1]) #number of measures
number2 = len(c2.parts[1].measure(4))
c = c.parts[0].flat
print c2.parts[1].measure(4).notes[1].duration.type
print number2
#s = stream.Score()
#s.insert(0,c)
#s.insert(1,c2)
#s.show()
def determineChord(measure):
#measure.notes[x].offset for offset
for x in range(0,len(measure.notes)):
print "chord" + str(measure.notes[x].offset)
def determineMelody(measure):
for x in range(0,len(measure.notes)):
|
def main():
c = converter.parse('./ChopinNocturneOp9No2.xml')
melody = c.parts[0] #Melody part
chord = c.parts[1] #Chord part
measureLength = len(melody) #Number of measures
print "length: " + str(measureLength)
for x in range(1,measureLength): #For all measures
print "index" + str(x)
if (len(chord.measure(x).notes) == 0 and len(melody.measure(x).notes) == 0):
break
determineChord(chord.measure(x))
determineMelody(melody.measure(x))
def streamCreate():
c = chord.Chord(['C4','E4','G4'])
n = note.Note('F#4')
m = stream.Measure()
m.append(n)
m.append(c)
n = stream.Measure()
n.append(n)
n.append(c)
"""
p1 = stream.Part()
n2 = note.Note('C6')
p1.append(n2)
p2 = stream.Part()
n3 = note.Note('G6')
p2.append(n3)"""
s = stream.Stream()
s.insert(0,m)
s.show()
def createNewStream():
c = converter.parse('./bumblebee.xml') # Get Info from Original Sheet
c5 = converter.parse('./waitingontheworld.xml')
#sc = stream.Score(id="MainScore") # New Stream
melody = stream.Part(id="part0") # Melody part
chord1 = stream.Part(id="part1") # Chord part
findAllMeasuresWithinParts(c.parts[0],c5.parts[1],chord1,melody)
"""
timeSignature = c.parts[0].measure(1).getContextByClass('TimeSignature') #Get Time Signature
keySignature = c.parts[1].measure(1).getContextByClass('KeySignature') #Get Key Signature
#melody.timeSignature = timeSignature
#melody.keySignature = keySignature
#chord1.keySignature = keySignature
#chord1.timeSignature = timeSignature
#sc.timeSignature = timeSignature
#sc.keySignature = keySignature
m1 = stream.Measure(number=1)
m1.keySignature = keySignature
m1.timeSignature = timeSignature
m1.append(note.Note('C'))
m2 = stream.Measure(number=2)
m2.append(note.Note('D'))
melody.append([m1,m2])
m11 = stream.Measure(number=1)
m11.keySignature = keySignature
m11.timeSignature = timeSignature
m11.append(note.Note('E'))
m12 = stream.Measure(number=2)
m12.append(note.Note('F'))
chord1.append([m11,m12])
"""
sc.insert(0,melody)
sc.insert(0,chord1)
sc.show()
def noteattributes():
c = converter.parse('./ChopinNocturneOp9No2.xml')
pitch = c.parts[0].measure(1).notes[0].pitch
duration = c.parts[0].measure(1).notes[0].duration
offset = c.parts[0].measure(1).notes[0].offset
print pitch,duration,offset
def noteCreation(pitch, duration, offset):
n = note.Note(pitch)
n.duration = duration
n.offset = offset
return n
def findAllMeasuresWithinParts(melody,chords,newChord,newMelody):
chordMeasures = chords.measure(0)
if chordMeasures is None:
chordMeasures = chords.measure(1)
c1 = chordMeasures
melodyMeasures = melody.measure(0)
if melodyMeasures is None:
melodyMeasures = melody.measure(1)
m1 = melodyMeasures
end = False
counter = 0
melodyList = []
chordList = []
while end == False:
if c1 is None or m1 is None:
end = True
else:
c2 = stream.Measure(number = counter)
c2.offset = c1.offset
c2.timeSignature = c1.timeSignature
m2 = stream.Measure(number = counter)
m2.offset = m1.offset
m2.timeSignature = m1.timeSignature
chordArray, singleNoteChord = findAllNotesWithinMeasureChord(c1)
melodyArray = findAllNotesWithinMeasureMelody(m1)
c2,m2 = createMashForMeasure(chordArray, melodyArray, singleNoteChord, c2, m2)
chordList.append(c2)
melodyList.append(m2)
c1 = c1.next('Measure')
m1 = m1.next('Measure')
print counter
counter = counter + 1
newChord.append(chordList)
newMelody.append(melodyList)
def findAllNotesWithinMeasureChord(measure):
totalList = []
totalList2 = []
for x in measure.flat.recurse():
print x
if type(x) == chord.Chord:
totalList.append([x,x.duration,x.offset])
#print x,x.duration,x.offset
elif type(x) == note.Note:
totalList2.append([x,x.duration,x.offset])
return totalList, totalList2
def findAllNotesWithinMeasureMelody(measure):
totalList = []
for x in measure.flat.recurse():
if type(x) == note.Note:
totalList.append([x.pitch,x.duration,x.offset,x.pitchClass,x])
#print x.pitch,x.duration,x.offset
return totalList
def createMashForMeasure(chordArray, melodyArray, singleNoteChord, chordM, melodyM):
enter = False
newMelodyArray = []
if (len(chordArray) > 0 and len(melodyArray) > 0):
index = 0
for x in range(0,len(chordArray)): #For each chord in this measure, find affected melody, and change them
enter = True
start,end = findWindow(chordArray[x][2],chordArray[x][1]) #Find the window size of specific chord
index, melodyAffected, indexHighest, indexLowest, melodyUnaffected = findMelodiesAffected(start,end,melodyArray,index) #find melodies that are within chord offset + duration
genScale = findScale(chordArray[x][0], melodyAffected, indexHighest, indexLowest) #find scale according to the highest and lowest pitches of melody within chord window
newTempMelodyArray = createNewMelody(chordArray[x], genScale, melodyAffected, melodyUnaffected)
for z in range(0,len(newTempMelodyArray)):
newMelodyArray.append(newTempMelodyArray[z])
if enter:
return createNewMeasure(chordArray,chordM,melodyM, singleNoteChord, newMelodyArray)
enter = False
else:
return createNewMeasure(chordArray,chordM,melodyM, singleNoteChord, melodyArray)
def createNewMelody(genChord, genScale, melodyAffected, melodyUnaffected): #This will generate a new melody array using the scales from the chord
print "---"
print genChord
newMelody = []
if len(melodyAffected) > 0: #If there is any melody affected
for y in range(0,len(melodyAffected)):
minIndex = -1
minValue = 10000
actualValue = 0
value = int(str(melodyAffected[y][0])[-1]) * 12 + melodyAffected[y][3]
for x in range(0,len(genScale)):
tempNum = int(str(genScale[x])[-1]) * 12 + genScale[x].pitchClass
if abs(tempNum - value) < minValue:
minValue = abs(tempNum - value) #used to compare closest
minIndex = x #used to find which index in scale
actualValue = tempNum - value #used to tranpose the target note
if (minValue != 0):
tempNote = melodyAffected[y][-1].transpose(actualValue)
melodyAffected[y][-1] = tempNote
newMelody.append(melodyAffected[y])
for z in range(0,len(melodyUnaffected)):
newMelody.append(melodyUnaffected[z])
#print melodyUnaffected
print newMelody
return newMelody
def createNewMeasure(chordArray,chordM,melodyM,singleNoteChord, melodyArray): #Generate measure here
numberofSingle = len(singleNoteChord)
for x in range(0,len(chordArray)):
if x < numberofSingle:
chordM.insert(singleNoteChord[x][0])
chordM.insert(chordArray[x][2],chordArray[x][0])
for x in range(0,len(melodyArray)):
melodyM.insert(melodyArray[x][-1])
return chordM, melodyM
def findScale(chord1, melodyArray, indexH, indexL):
rootNote = str(chord1.findRoot())[:-1] #Beginning to end - 1 to take out the number
default = False
if indexH == -1 or indexL == -1:
default = True
if chord1.isMajorTriad():
sc1 = scale.MajorScale(str(rootNote))
else:
sc1 = scale.MinorScale(str(rootNote))
if default:
genScale = [p for p in sc1.getPitches("{}5".format(rootNote),"{}6".format(rootNote))]
else:
genScale = [p for p in sc1.getPitches("{}".format(melodyArray[indexL][0].transpose(-11)),"{}".format(melodyArray[indexH][0].transpose(11)))]
#print default,chord1,genScale
#genScale will default to the root scale if no melodies are associated with it
return genScale
def findWindow(offset,duration):
start = offset
end = duration.quarterLength + offset
return start,end
def findMelodiesAffected(start,end,melody,index):
counter = index
melodyAffected = []
melodyUnaffected = []
highestPitch = 0
indexHighest = -1
lowestPitch = 10000
indexLowest = -1
tempIndex = 0
print "start: " + str(start)
print "end: " + str(end)
print "index: " + str(index)
for x in range(index,len(melody)):
print "considering: " + str(melody[x][2])
counter = x
if melody[x][2] >= end: #stop if the offset is past the end offset of chord
break
if melody[x][2] >= start and melody[x][2] < end:
melodyAffected.append(melody[x])
weight = int(str(melody[x][0])[-1]) * 12 + melody[x][3] #Octave * 12 + offset for unique number identifying key
if weight < lowestPitch:
lowestPitch = weight
indexLowest = tempIndex
if weight > highestPitch:
highestPitch = weight
indexHighest = tempIndex
tempIndex = tempIndex + 1
counter = counter + 1
elif melody[x][2] < start or melody[x][2] >= end:
melodyUnaffected.append(melody[x])
counter = counter + 1
print "unaffected" + str(melody[x])
return counter, melodyAffected, indexHighest, indexLowest, melodyUnaffected #return the array here with the counter
#need to also keep track of pitch so that we can give the range of pitches to findScale
def practice1():
sc = stream.Score(id="MainScore")
n1 = note.Note('G')
n1.offset = 5
m1 = stream.Measure()
#m1.timeSignature = meter.TimeSignature('2/4')
m1.insert(n1)
m1.offset = 5
print n1.offset
print m1.offset
#m1.insert(0,n1)
#print m1.activeSite
#print n1.activeSite
sc.insert(m1)
sc.flat.show()
def pr2(m):
noa = note.Note('G')
noa.offset = 5
noa.duration.type="half"
m.insert(100,noa)
def pitchPractice():
ne = note.Note('C4')
ne1 = note.Note('G5')
ads= note.pitch
print ne.pitchClass
print ne1.pitchClass
print abs(-1)
a = ads.transpose(1)
print a
def streamP():
c5 = converter.parse('dreamchaser.xml')
print c5.parts[1].measure(0)
#streamP()
#pitchPractice()
#practice1()
#streamCreate()
createNewStream()
#findAllMeasures()
#findAllNotesWithinMeasure()
#flatStream()
#noteattributes()
#reconstruction()
#streamCreate()
#main()
#findScale() | print "melody" + str(measure.notes[x].offset) | conditional_block |
Mash.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 3 23:35:49 2016
@author: Beiwen Liu
"""
from music21 import *
c = converter.parse('./ChopinNocturneOp9No2.xml') # Get Info from Original Sheet
sc = stream.Score(id="MainScore") # New Stream
def switch():
c = converter.parse('./KL_Rains_of_Castamere.xml')
c2 = converter.parse('./FurElise.xml')
number = len(c2.parts[1]) #number of measures
number2 = len(c2.parts[1].measure(4))
c = c.parts[0].flat
print c2.parts[1].measure(4).notes[1].duration.type
print number2
#s = stream.Score()
#s.insert(0,c)
#s.insert(1,c2)
#s.show()
def determineChord(measure):
#measure.notes[x].offset for offset
for x in range(0,len(measure.notes)):
print "chord" + str(measure.notes[x].offset)
def determineMelody(measure):
for x in range(0,len(measure.notes)):
print "melody" + str(measure.notes[x].offset)
def main():
c = converter.parse('./ChopinNocturneOp9No2.xml')
melody = c.parts[0] #Melody part
chord = c.parts[1] #Chord part
measureLength = len(melody) #Number of measures
print "length: " + str(measureLength)
for x in range(1,measureLength): #For all measures
print "index" + str(x)
if (len(chord.measure(x).notes) == 0 and len(melody.measure(x).notes) == 0):
break
determineChord(chord.measure(x))
determineMelody(melody.measure(x))
def streamCreate():
c = chord.Chord(['C4','E4','G4'])
n = note.Note('F#4')
m = stream.Measure()
m.append(n)
m.append(c)
n = stream.Measure()
n.append(n)
n.append(c)
"""
p1 = stream.Part()
n2 = note.Note('C6')
p1.append(n2)
p2 = stream.Part()
n3 = note.Note('G6')
p2.append(n3)"""
s = stream.Stream()
s.insert(0,m)
s.show()
def createNewStream():
c = converter.parse('./bumblebee.xml') # Get Info from Original Sheet
c5 = converter.parse('./waitingontheworld.xml')
#sc = stream.Score(id="MainScore") # New Stream
melody = stream.Part(id="part0") # Melody part
chord1 = stream.Part(id="part1") # Chord part
findAllMeasuresWithinParts(c.parts[0],c5.parts[1],chord1,melody)
"""
timeSignature = c.parts[0].measure(1).getContextByClass('TimeSignature') #Get Time Signature
keySignature = c.parts[1].measure(1).getContextByClass('KeySignature') #Get Key Signature
#melody.timeSignature = timeSignature
#melody.keySignature = keySignature
#chord1.keySignature = keySignature
#chord1.timeSignature = timeSignature
#sc.timeSignature = timeSignature
#sc.keySignature = keySignature
m1 = stream.Measure(number=1)
m1.keySignature = keySignature
m1.timeSignature = timeSignature
m1.append(note.Note('C'))
m2 = stream.Measure(number=2)
m2.append(note.Note('D'))
melody.append([m1,m2])
m11 = stream.Measure(number=1)
m11.keySignature = keySignature
m11.timeSignature = timeSignature
m11.append(note.Note('E'))
m12 = stream.Measure(number=2)
m12.append(note.Note('F'))
chord1.append([m11,m12])
"""
sc.insert(0,melody)
sc.insert(0,chord1)
sc.show()
def noteattributes():
c = converter.parse('./ChopinNocturneOp9No2.xml')
pitch = c.parts[0].measure(1).notes[0].pitch
duration = c.parts[0].measure(1).notes[0].duration
offset = c.parts[0].measure(1).notes[0].offset
print pitch,duration,offset
def noteCreation(pitch, duration, offset):
n = note.Note(pitch)
n.duration = duration
n.offset = offset
return n
def findAllMeasuresWithinParts(melody,chords,newChord,newMelody):
chordMeasures = chords.measure(0)
if chordMeasures is None:
chordMeasures = chords.measure(1)
c1 = chordMeasures
melodyMeasures = melody.measure(0)
if melodyMeasures is None:
melodyMeasures = melody.measure(1)
m1 = melodyMeasures
end = False
counter = 0
melodyList = []
chordList = []
while end == False:
if c1 is None or m1 is None:
end = True
else:
c2 = stream.Measure(number = counter)
c2.offset = c1.offset
c2.timeSignature = c1.timeSignature
m2 = stream.Measure(number = counter)
m2.offset = m1.offset
m2.timeSignature = m1.timeSignature
chordArray, singleNoteChord = findAllNotesWithinMeasureChord(c1)
melodyArray = findAllNotesWithinMeasureMelody(m1)
c2,m2 = createMashForMeasure(chordArray, melodyArray, singleNoteChord, c2, m2)
chordList.append(c2)
melodyList.append(m2)
c1 = c1.next('Measure')
m1 = m1.next('Measure')
print counter
counter = counter + 1
newChord.append(chordList)
newMelody.append(melodyList)
def | (measure):
totalList = []
totalList2 = []
for x in measure.flat.recurse():
print x
if type(x) == chord.Chord:
totalList.append([x,x.duration,x.offset])
#print x,x.duration,x.offset
elif type(x) == note.Note:
totalList2.append([x,x.duration,x.offset])
return totalList, totalList2
def findAllNotesWithinMeasureMelody(measure):
totalList = []
for x in measure.flat.recurse():
if type(x) == note.Note:
totalList.append([x.pitch,x.duration,x.offset,x.pitchClass,x])
#print x.pitch,x.duration,x.offset
return totalList
def createMashForMeasure(chordArray, melodyArray, singleNoteChord, chordM, melodyM):
enter = False
newMelodyArray = []
if (len(chordArray) > 0 and len(melodyArray) > 0):
index = 0
for x in range(0,len(chordArray)): #For each chord in this measure, find affected melody, and change them
enter = True
start,end = findWindow(chordArray[x][2],chordArray[x][1]) #Find the window size of specific chord
index, melodyAffected, indexHighest, indexLowest, melodyUnaffected = findMelodiesAffected(start,end,melodyArray,index) #find melodies that are within chord offset + duration
genScale = findScale(chordArray[x][0], melodyAffected, indexHighest, indexLowest) #find scale according to the highest and lowest pitches of melody within chord window
newTempMelodyArray = createNewMelody(chordArray[x], genScale, melodyAffected, melodyUnaffected)
for z in range(0,len(newTempMelodyArray)):
newMelodyArray.append(newTempMelodyArray[z])
if enter:
return createNewMeasure(chordArray,chordM,melodyM, singleNoteChord, newMelodyArray)
enter = False
else:
return createNewMeasure(chordArray,chordM,melodyM, singleNoteChord, melodyArray)
def createNewMelody(genChord, genScale, melodyAffected, melodyUnaffected): #This will generate a new melody array using the scales from the chord
print "---"
print genChord
newMelody = []
if len(melodyAffected) > 0: #If there is any melody affected
for y in range(0,len(melodyAffected)):
minIndex = -1
minValue = 10000
actualValue = 0
value = int(str(melodyAffected[y][0])[-1]) * 12 + melodyAffected[y][3]
for x in range(0,len(genScale)):
tempNum = int(str(genScale[x])[-1]) * 12 + genScale[x].pitchClass
if abs(tempNum - value) < minValue:
minValue = abs(tempNum - value) #used to compare closest
minIndex = x #used to find which index in scale
actualValue = tempNum - value #used to tranpose the target note
if (minValue != 0):
tempNote = melodyAffected[y][-1].transpose(actualValue)
melodyAffected[y][-1] = tempNote
newMelody.append(melodyAffected[y])
for z in range(0,len(melodyUnaffected)):
newMelody.append(melodyUnaffected[z])
#print melodyUnaffected
print newMelody
return newMelody
def createNewMeasure(chordArray,chordM,melodyM,singleNoteChord, melodyArray): #Generate measure here
numberofSingle = len(singleNoteChord)
for x in range(0,len(chordArray)):
if x < numberofSingle:
chordM.insert(singleNoteChord[x][0])
chordM.insert(chordArray[x][2],chordArray[x][0])
for x in range(0,len(melodyArray)):
melodyM.insert(melodyArray[x][-1])
return chordM, melodyM
def findScale(chord1, melodyArray, indexH, indexL):
rootNote = str(chord1.findRoot())[:-1] #Beginning to end - 1 to take out the number
default = False
if indexH == -1 or indexL == -1:
default = True
if chord1.isMajorTriad():
sc1 = scale.MajorScale(str(rootNote))
else:
sc1 = scale.MinorScale(str(rootNote))
if default:
genScale = [p for p in sc1.getPitches("{}5".format(rootNote),"{}6".format(rootNote))]
else:
genScale = [p for p in sc1.getPitches("{}".format(melodyArray[indexL][0].transpose(-11)),"{}".format(melodyArray[indexH][0].transpose(11)))]
#print default,chord1,genScale
#genScale will default to the root scale if no melodies are associated with it
return genScale
def findWindow(offset,duration):
start = offset
end = duration.quarterLength + offset
return start,end
def findMelodiesAffected(start,end,melody,index):
counter = index
melodyAffected = []
melodyUnaffected = []
highestPitch = 0
indexHighest = -1
lowestPitch = 10000
indexLowest = -1
tempIndex = 0
print "start: " + str(start)
print "end: " + str(end)
print "index: " + str(index)
for x in range(index,len(melody)):
print "considering: " + str(melody[x][2])
counter = x
if melody[x][2] >= end: #stop if the offset is past the end offset of chord
break
if melody[x][2] >= start and melody[x][2] < end:
melodyAffected.append(melody[x])
weight = int(str(melody[x][0])[-1]) * 12 + melody[x][3] #Octave * 12 + offset for unique number identifying key
if weight < lowestPitch:
lowestPitch = weight
indexLowest = tempIndex
if weight > highestPitch:
highestPitch = weight
indexHighest = tempIndex
tempIndex = tempIndex + 1
counter = counter + 1
elif melody[x][2] < start or melody[x][2] >= end:
melodyUnaffected.append(melody[x])
counter = counter + 1
print "unaffected" + str(melody[x])
return counter, melodyAffected, indexHighest, indexLowest, melodyUnaffected #return the array here with the counter
#need to also keep track of pitch so that we can give the range of pitches to findScale
def practice1():
sc = stream.Score(id="MainScore")
n1 = note.Note('G')
n1.offset = 5
m1 = stream.Measure()
#m1.timeSignature = meter.TimeSignature('2/4')
m1.insert(n1)
m1.offset = 5
print n1.offset
print m1.offset
#m1.insert(0,n1)
#print m1.activeSite
#print n1.activeSite
sc.insert(m1)
sc.flat.show()
def pr2(m):
noa = note.Note('G')
noa.offset = 5
noa.duration.type="half"
m.insert(100,noa)
def pitchPractice():
ne = note.Note('C4')
ne1 = note.Note('G5')
ads= note.pitch
print ne.pitchClass
print ne1.pitchClass
print abs(-1)
a = ads.transpose(1)
print a
def streamP():
c5 = converter.parse('dreamchaser.xml')
print c5.parts[1].measure(0)
#streamP()
#pitchPractice()
#practice1()
#streamCreate()
createNewStream()
#findAllMeasures()
#findAllNotesWithinMeasure()
#flatStream()
#noteattributes()
#reconstruction()
#streamCreate()
#main()
#findScale() | findAllNotesWithinMeasureChord | identifier_name |
Mash.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 3 23:35:49 2016
@author: Beiwen Liu
"""
from music21 import *
c = converter.parse('./ChopinNocturneOp9No2.xml') # Get Info from Original Sheet
sc = stream.Score(id="MainScore") # New Stream
def switch():
c = converter.parse('./KL_Rains_of_Castamere.xml')
c2 = converter.parse('./FurElise.xml')
number = len(c2.parts[1]) #number of measures
number2 = len(c2.parts[1].measure(4))
c = c.parts[0].flat
print c2.parts[1].measure(4).notes[1].duration.type
print number2
#s = stream.Score()
#s.insert(0,c)
#s.insert(1,c2)
#s.show()
def determineChord(measure):
#measure.notes[x].offset for offset
for x in range(0,len(measure.notes)):
print "chord" + str(measure.notes[x].offset)
def determineMelody(measure):
for x in range(0,len(measure.notes)):
print "melody" + str(measure.notes[x].offset)
def main():
c = converter.parse('./ChopinNocturneOp9No2.xml')
melody = c.parts[0] #Melody part
chord = c.parts[1] #Chord part
measureLength = len(melody) #Number of measures
print "length: " + str(measureLength)
for x in range(1,measureLength): #For all measures
print "index" + str(x)
if (len(chord.measure(x).notes) == 0 and len(melody.measure(x).notes) == 0):
break
determineChord(chord.measure(x))
determineMelody(melody.measure(x))
def streamCreate():
c = chord.Chord(['C4','E4','G4'])
n = note.Note('F#4')
m = stream.Measure()
m.append(n)
m.append(c)
n = stream.Measure()
n.append(n)
n.append(c)
"""
p1 = stream.Part()
n2 = note.Note('C6')
p1.append(n2)
p2 = stream.Part()
n3 = note.Note('G6')
p2.append(n3)"""
s = stream.Stream()
s.insert(0,m)
s.show()
def createNewStream():
c = converter.parse('./bumblebee.xml') # Get Info from Original Sheet
c5 = converter.parse('./waitingontheworld.xml')
#sc = stream.Score(id="MainScore") # New Stream
melody = stream.Part(id="part0") # Melody part
chord1 = stream.Part(id="part1") # Chord part
findAllMeasuresWithinParts(c.parts[0],c5.parts[1],chord1,melody)
"""
timeSignature = c.parts[0].measure(1).getContextByClass('TimeSignature') #Get Time Signature
keySignature = c.parts[1].measure(1).getContextByClass('KeySignature') #Get Key Signature
#melody.timeSignature = timeSignature
#melody.keySignature = keySignature
#chord1.keySignature = keySignature
#chord1.timeSignature = timeSignature
#sc.timeSignature = timeSignature
#sc.keySignature = keySignature
m1 = stream.Measure(number=1)
m1.keySignature = keySignature
m1.timeSignature = timeSignature
m1.append(note.Note('C'))
m2 = stream.Measure(number=2)
m2.append(note.Note('D'))
melody.append([m1,m2])
m11 = stream.Measure(number=1)
m11.keySignature = keySignature
m11.timeSignature = timeSignature
m11.append(note.Note('E'))
m12 = stream.Measure(number=2)
m12.append(note.Note('F'))
chord1.append([m11,m12])
"""
sc.insert(0,melody)
sc.insert(0,chord1)
sc.show()
def noteattributes():
c = converter.parse('./ChopinNocturneOp9No2.xml')
pitch = c.parts[0].measure(1).notes[0].pitch
duration = c.parts[0].measure(1).notes[0].duration
offset = c.parts[0].measure(1).notes[0].offset
print pitch,duration,offset
def noteCreation(pitch, duration, offset):
|
def findAllMeasuresWithinParts(melody,chords,newChord,newMelody):
chordMeasures = chords.measure(0)
if chordMeasures is None:
chordMeasures = chords.measure(1)
c1 = chordMeasures
melodyMeasures = melody.measure(0)
if melodyMeasures is None:
melodyMeasures = melody.measure(1)
m1 = melodyMeasures
end = False
counter = 0
melodyList = []
chordList = []
while end == False:
if c1 is None or m1 is None:
end = True
else:
c2 = stream.Measure(number = counter)
c2.offset = c1.offset
c2.timeSignature = c1.timeSignature
m2 = stream.Measure(number = counter)
m2.offset = m1.offset
m2.timeSignature = m1.timeSignature
chordArray, singleNoteChord = findAllNotesWithinMeasureChord(c1)
melodyArray = findAllNotesWithinMeasureMelody(m1)
c2,m2 = createMashForMeasure(chordArray, melodyArray, singleNoteChord, c2, m2)
chordList.append(c2)
melodyList.append(m2)
c1 = c1.next('Measure')
m1 = m1.next('Measure')
print counter
counter = counter + 1
newChord.append(chordList)
newMelody.append(melodyList)
def findAllNotesWithinMeasureChord(measure):
totalList = []
totalList2 = []
for x in measure.flat.recurse():
print x
if type(x) == chord.Chord:
totalList.append([x,x.duration,x.offset])
#print x,x.duration,x.offset
elif type(x) == note.Note:
totalList2.append([x,x.duration,x.offset])
return totalList, totalList2
def findAllNotesWithinMeasureMelody(measure):
totalList = []
for x in measure.flat.recurse():
if type(x) == note.Note:
totalList.append([x.pitch,x.duration,x.offset,x.pitchClass,x])
#print x.pitch,x.duration,x.offset
return totalList
def createMashForMeasure(chordArray, melodyArray, singleNoteChord, chordM, melodyM):
enter = False
newMelodyArray = []
if (len(chordArray) > 0 and len(melodyArray) > 0):
index = 0
for x in range(0,len(chordArray)): #For each chord in this measure, find affected melody, and change them
enter = True
start,end = findWindow(chordArray[x][2],chordArray[x][1]) #Find the window size of specific chord
index, melodyAffected, indexHighest, indexLowest, melodyUnaffected = findMelodiesAffected(start,end,melodyArray,index) #find melodies that are within chord offset + duration
genScale = findScale(chordArray[x][0], melodyAffected, indexHighest, indexLowest) #find scale according to the highest and lowest pitches of melody within chord window
newTempMelodyArray = createNewMelody(chordArray[x], genScale, melodyAffected, melodyUnaffected)
for z in range(0,len(newTempMelodyArray)):
newMelodyArray.append(newTempMelodyArray[z])
if enter:
return createNewMeasure(chordArray,chordM,melodyM, singleNoteChord, newMelodyArray)
enter = False
else:
return createNewMeasure(chordArray,chordM,melodyM, singleNoteChord, melodyArray)
def createNewMelody(genChord, genScale, melodyAffected, melodyUnaffected): #This will generate a new melody array using the scales from the chord
print "---"
print genChord
newMelody = []
if len(melodyAffected) > 0: #If there is any melody affected
for y in range(0,len(melodyAffected)):
minIndex = -1
minValue = 10000
actualValue = 0
value = int(str(melodyAffected[y][0])[-1]) * 12 + melodyAffected[y][3]
for x in range(0,len(genScale)):
tempNum = int(str(genScale[x])[-1]) * 12 + genScale[x].pitchClass
if abs(tempNum - value) < minValue:
minValue = abs(tempNum - value) #used to compare closest
minIndex = x #used to find which index in scale
actualValue = tempNum - value #used to tranpose the target note
if (minValue != 0):
tempNote = melodyAffected[y][-1].transpose(actualValue)
melodyAffected[y][-1] = tempNote
newMelody.append(melodyAffected[y])
for z in range(0,len(melodyUnaffected)):
newMelody.append(melodyUnaffected[z])
#print melodyUnaffected
print newMelody
return newMelody
def createNewMeasure(chordArray,chordM,melodyM,singleNoteChord, melodyArray): #Generate measure here
numberofSingle = len(singleNoteChord)
for x in range(0,len(chordArray)):
if x < numberofSingle:
chordM.insert(singleNoteChord[x][0])
chordM.insert(chordArray[x][2],chordArray[x][0])
for x in range(0,len(melodyArray)):
melodyM.insert(melodyArray[x][-1])
return chordM, melodyM
def findScale(chord1, melodyArray, indexH, indexL):
rootNote = str(chord1.findRoot())[:-1] #Beginning to end - 1 to take out the number
default = False
if indexH == -1 or indexL == -1:
default = True
if chord1.isMajorTriad():
sc1 = scale.MajorScale(str(rootNote))
else:
sc1 = scale.MinorScale(str(rootNote))
if default:
genScale = [p for p in sc1.getPitches("{}5".format(rootNote),"{}6".format(rootNote))]
else:
genScale = [p for p in sc1.getPitches("{}".format(melodyArray[indexL][0].transpose(-11)),"{}".format(melodyArray[indexH][0].transpose(11)))]
#print default,chord1,genScale
#genScale will default to the root scale if no melodies are associated with it
return genScale
def findWindow(offset,duration):
start = offset
end = duration.quarterLength + offset
return start,end
def findMelodiesAffected(start,end,melody,index):
counter = index
melodyAffected = []
melodyUnaffected = []
highestPitch = 0
indexHighest = -1
lowestPitch = 10000
indexLowest = -1
tempIndex = 0
print "start: " + str(start)
print "end: " + str(end)
print "index: " + str(index)
for x in range(index,len(melody)):
print "considering: " + str(melody[x][2])
counter = x
if melody[x][2] >= end: #stop if the offset is past the end offset of chord
break
if melody[x][2] >= start and melody[x][2] < end:
melodyAffected.append(melody[x])
weight = int(str(melody[x][0])[-1]) * 12 + melody[x][3] #Octave * 12 + offset for unique number identifying key
if weight < lowestPitch:
lowestPitch = weight
indexLowest = tempIndex
if weight > highestPitch:
highestPitch = weight
indexHighest = tempIndex
tempIndex = tempIndex + 1
counter = counter + 1
elif melody[x][2] < start or melody[x][2] >= end:
melodyUnaffected.append(melody[x])
counter = counter + 1
print "unaffected" + str(melody[x])
return counter, melodyAffected, indexHighest, indexLowest, melodyUnaffected #return the array here with the counter
#need to also keep track of pitch so that we can give the range of pitches to findScale
def practice1():
sc = stream.Score(id="MainScore")
n1 = note.Note('G')
n1.offset = 5
m1 = stream.Measure()
#m1.timeSignature = meter.TimeSignature('2/4')
m1.insert(n1)
m1.offset = 5
print n1.offset
print m1.offset
#m1.insert(0,n1)
#print m1.activeSite
#print n1.activeSite
sc.insert(m1)
sc.flat.show()
def pr2(m):
noa = note.Note('G')
noa.offset = 5
noa.duration.type="half"
m.insert(100,noa)
def pitchPractice():
ne = note.Note('C4')
ne1 = note.Note('G5')
ads= note.pitch
print ne.pitchClass
print ne1.pitchClass
print abs(-1)
a = ads.transpose(1)
print a
def streamP():
c5 = converter.parse('dreamchaser.xml')
print c5.parts[1].measure(0)
#streamP()
#pitchPractice()
#practice1()
#streamCreate()
createNewStream()
#findAllMeasures()
#findAllNotesWithinMeasure()
#flatStream()
#noteattributes()
#reconstruction()
#streamCreate()
#main()
#findScale() | n = note.Note(pitch)
n.duration = duration
n.offset = offset
return n | identifier_body |
Mash.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 3 23:35:49 2016
@author: Beiwen Liu
"""
from music21 import *
c = converter.parse('./ChopinNocturneOp9No2.xml') # Get Info from Original Sheet
sc = stream.Score(id="MainScore") # New Stream
def switch(): | number = len(c2.parts[1]) #number of measures
number2 = len(c2.parts[1].measure(4))
c = c.parts[0].flat
print c2.parts[1].measure(4).notes[1].duration.type
print number2
#s = stream.Score()
#s.insert(0,c)
#s.insert(1,c2)
#s.show()
def determineChord(measure):
#measure.notes[x].offset for offset
for x in range(0,len(measure.notes)):
print "chord" + str(measure.notes[x].offset)
def determineMelody(measure):
for x in range(0,len(measure.notes)):
print "melody" + str(measure.notes[x].offset)
def main():
c = converter.parse('./ChopinNocturneOp9No2.xml')
melody = c.parts[0] #Melody part
chord = c.parts[1] #Chord part
measureLength = len(melody) #Number of measures
print "length: " + str(measureLength)
for x in range(1,measureLength): #For all measures
print "index" + str(x)
if (len(chord.measure(x).notes) == 0 and len(melody.measure(x).notes) == 0):
break
determineChord(chord.measure(x))
determineMelody(melody.measure(x))
def streamCreate():
c = chord.Chord(['C4','E4','G4'])
n = note.Note('F#4')
m = stream.Measure()
m.append(n)
m.append(c)
n = stream.Measure()
n.append(n)
n.append(c)
"""
p1 = stream.Part()
n2 = note.Note('C6')
p1.append(n2)
p2 = stream.Part()
n3 = note.Note('G6')
p2.append(n3)"""
s = stream.Stream()
s.insert(0,m)
s.show()
def createNewStream():
c = converter.parse('./bumblebee.xml') # Get Info from Original Sheet
c5 = converter.parse('./waitingontheworld.xml')
#sc = stream.Score(id="MainScore") # New Stream
melody = stream.Part(id="part0") # Melody part
chord1 = stream.Part(id="part1") # Chord part
findAllMeasuresWithinParts(c.parts[0],c5.parts[1],chord1,melody)
"""
timeSignature = c.parts[0].measure(1).getContextByClass('TimeSignature') #Get Time Signature
keySignature = c.parts[1].measure(1).getContextByClass('KeySignature') #Get Key Signature
#melody.timeSignature = timeSignature
#melody.keySignature = keySignature
#chord1.keySignature = keySignature
#chord1.timeSignature = timeSignature
#sc.timeSignature = timeSignature
#sc.keySignature = keySignature
m1 = stream.Measure(number=1)
m1.keySignature = keySignature
m1.timeSignature = timeSignature
m1.append(note.Note('C'))
m2 = stream.Measure(number=2)
m2.append(note.Note('D'))
melody.append([m1,m2])
m11 = stream.Measure(number=1)
m11.keySignature = keySignature
m11.timeSignature = timeSignature
m11.append(note.Note('E'))
m12 = stream.Measure(number=2)
m12.append(note.Note('F'))
chord1.append([m11,m12])
"""
sc.insert(0,melody)
sc.insert(0,chord1)
sc.show()
def noteattributes():
c = converter.parse('./ChopinNocturneOp9No2.xml')
pitch = c.parts[0].measure(1).notes[0].pitch
duration = c.parts[0].measure(1).notes[0].duration
offset = c.parts[0].measure(1).notes[0].offset
print pitch,duration,offset
def noteCreation(pitch, duration, offset):
n = note.Note(pitch)
n.duration = duration
n.offset = offset
return n
def findAllMeasuresWithinParts(melody,chords,newChord,newMelody):
chordMeasures = chords.measure(0)
if chordMeasures is None:
chordMeasures = chords.measure(1)
c1 = chordMeasures
melodyMeasures = melody.measure(0)
if melodyMeasures is None:
melodyMeasures = melody.measure(1)
m1 = melodyMeasures
end = False
counter = 0
melodyList = []
chordList = []
while end == False:
if c1 is None or m1 is None:
end = True
else:
c2 = stream.Measure(number = counter)
c2.offset = c1.offset
c2.timeSignature = c1.timeSignature
m2 = stream.Measure(number = counter)
m2.offset = m1.offset
m2.timeSignature = m1.timeSignature
chordArray, singleNoteChord = findAllNotesWithinMeasureChord(c1)
melodyArray = findAllNotesWithinMeasureMelody(m1)
c2,m2 = createMashForMeasure(chordArray, melodyArray, singleNoteChord, c2, m2)
chordList.append(c2)
melodyList.append(m2)
c1 = c1.next('Measure')
m1 = m1.next('Measure')
print counter
counter = counter + 1
newChord.append(chordList)
newMelody.append(melodyList)
def findAllNotesWithinMeasureChord(measure):
totalList = []
totalList2 = []
for x in measure.flat.recurse():
print x
if type(x) == chord.Chord:
totalList.append([x,x.duration,x.offset])
#print x,x.duration,x.offset
elif type(x) == note.Note:
totalList2.append([x,x.duration,x.offset])
return totalList, totalList2
def findAllNotesWithinMeasureMelody(measure):
totalList = []
for x in measure.flat.recurse():
if type(x) == note.Note:
totalList.append([x.pitch,x.duration,x.offset,x.pitchClass,x])
#print x.pitch,x.duration,x.offset
return totalList
def createMashForMeasure(chordArray, melodyArray, singleNoteChord, chordM, melodyM):
enter = False
newMelodyArray = []
if (len(chordArray) > 0 and len(melodyArray) > 0):
index = 0
for x in range(0,len(chordArray)): #For each chord in this measure, find affected melody, and change them
enter = True
start,end = findWindow(chordArray[x][2],chordArray[x][1]) #Find the window size of specific chord
index, melodyAffected, indexHighest, indexLowest, melodyUnaffected = findMelodiesAffected(start,end,melodyArray,index) #find melodies that are within chord offset + duration
genScale = findScale(chordArray[x][0], melodyAffected, indexHighest, indexLowest) #find scale according to the highest and lowest pitches of melody within chord window
newTempMelodyArray = createNewMelody(chordArray[x], genScale, melodyAffected, melodyUnaffected)
for z in range(0,len(newTempMelodyArray)):
newMelodyArray.append(newTempMelodyArray[z])
if enter:
return createNewMeasure(chordArray,chordM,melodyM, singleNoteChord, newMelodyArray)
enter = False
else:
return createNewMeasure(chordArray,chordM,melodyM, singleNoteChord, melodyArray)
def createNewMelody(genChord, genScale, melodyAffected, melodyUnaffected): #This will generate a new melody array using the scales from the chord
print "---"
print genChord
newMelody = []
if len(melodyAffected) > 0: #If there is any melody affected
for y in range(0,len(melodyAffected)):
minIndex = -1
minValue = 10000
actualValue = 0
value = int(str(melodyAffected[y][0])[-1]) * 12 + melodyAffected[y][3]
for x in range(0,len(genScale)):
tempNum = int(str(genScale[x])[-1]) * 12 + genScale[x].pitchClass
if abs(tempNum - value) < minValue:
minValue = abs(tempNum - value) #used to compare closest
minIndex = x #used to find which index in scale
actualValue = tempNum - value #used to tranpose the target note
if (minValue != 0):
tempNote = melodyAffected[y][-1].transpose(actualValue)
melodyAffected[y][-1] = tempNote
newMelody.append(melodyAffected[y])
for z in range(0,len(melodyUnaffected)):
newMelody.append(melodyUnaffected[z])
#print melodyUnaffected
print newMelody
return newMelody
def createNewMeasure(chordArray,chordM,melodyM,singleNoteChord, melodyArray): #Generate measure here
numberofSingle = len(singleNoteChord)
for x in range(0,len(chordArray)):
if x < numberofSingle:
chordM.insert(singleNoteChord[x][0])
chordM.insert(chordArray[x][2],chordArray[x][0])
for x in range(0,len(melodyArray)):
melodyM.insert(melodyArray[x][-1])
return chordM, melodyM
def findScale(chord1, melodyArray, indexH, indexL):
rootNote = str(chord1.findRoot())[:-1] #Beginning to end - 1 to take out the number
default = False
if indexH == -1 or indexL == -1:
default = True
if chord1.isMajorTriad():
sc1 = scale.MajorScale(str(rootNote))
else:
sc1 = scale.MinorScale(str(rootNote))
if default:
genScale = [p for p in sc1.getPitches("{}5".format(rootNote),"{}6".format(rootNote))]
else:
genScale = [p for p in sc1.getPitches("{}".format(melodyArray[indexL][0].transpose(-11)),"{}".format(melodyArray[indexH][0].transpose(11)))]
#print default,chord1,genScale
#genScale will default to the root scale if no melodies are associated with it
return genScale
def findWindow(offset,duration):
start = offset
end = duration.quarterLength + offset
return start,end
def findMelodiesAffected(start,end,melody,index):
counter = index
melodyAffected = []
melodyUnaffected = []
highestPitch = 0
indexHighest = -1
lowestPitch = 10000
indexLowest = -1
tempIndex = 0
print "start: " + str(start)
print "end: " + str(end)
print "index: " + str(index)
for x in range(index,len(melody)):
print "considering: " + str(melody[x][2])
counter = x
if melody[x][2] >= end: #stop if the offset is past the end offset of chord
break
if melody[x][2] >= start and melody[x][2] < end:
melodyAffected.append(melody[x])
weight = int(str(melody[x][0])[-1]) * 12 + melody[x][3] #Octave * 12 + offset for unique number identifying key
if weight < lowestPitch:
lowestPitch = weight
indexLowest = tempIndex
if weight > highestPitch:
highestPitch = weight
indexHighest = tempIndex
tempIndex = tempIndex + 1
counter = counter + 1
elif melody[x][2] < start or melody[x][2] >= end:
melodyUnaffected.append(melody[x])
counter = counter + 1
print "unaffected" + str(melody[x])
return counter, melodyAffected, indexHighest, indexLowest, melodyUnaffected #return the array here with the counter
#need to also keep track of pitch so that we can give the range of pitches to findScale
def practice1():
sc = stream.Score(id="MainScore")
n1 = note.Note('G')
n1.offset = 5
m1 = stream.Measure()
#m1.timeSignature = meter.TimeSignature('2/4')
m1.insert(n1)
m1.offset = 5
print n1.offset
print m1.offset
#m1.insert(0,n1)
#print m1.activeSite
#print n1.activeSite
sc.insert(m1)
sc.flat.show()
def pr2(m):
noa = note.Note('G')
noa.offset = 5
noa.duration.type="half"
m.insert(100,noa)
def pitchPractice():
ne = note.Note('C4')
ne1 = note.Note('G5')
ads= note.pitch
print ne.pitchClass
print ne1.pitchClass
print abs(-1)
a = ads.transpose(1)
print a
def streamP():
c5 = converter.parse('dreamchaser.xml')
print c5.parts[1].measure(0)
#streamP()
#pitchPractice()
#practice1()
#streamCreate()
createNewStream()
#findAllMeasures()
#findAllNotesWithinMeasure()
#flatStream()
#noteattributes()
#reconstruction()
#streamCreate()
#main()
#findScale() | c = converter.parse('./KL_Rains_of_Castamere.xml')
c2 = converter.parse('./FurElise.xml') | random_line_split |
chain.go | // Copyright 2017, Square, Inc.
// Package chain implements a job chain. It provides the ability to traverse a chain
// and run all of the jobs in it.
package chain
import (
"fmt"
"sync"
"time"
"github.com/square/spincycle/proto"
)
// chain represents a job chain and some meta information about it.
type chain struct {
JobChain *proto.JobChain `json:"jobChain"`
Running map[string]RunningJob `json:"running"` // keyed on job ID => start time (Unix nano)
N uint
*sync.RWMutex
}
type RunningJob struct {
N uint `json:"n"`
StartTs int64 `json:"startTs"`
}
// NewChain takes a JobChain proto (from the RM) and turns it into a Chain that
// the JR can use.
func NewChain(jc *proto.JobChain) *chain {
// Set the state of all jobs in the chain to "Pending".
for jobName, job := range jc.Jobs {
job.State = proto.STATE_PENDING
job.Data = map[string]interface{}{}
jc.Jobs[jobName] = job
}
return &chain{
JobChain: jc,
Running: map[string]RunningJob{},
N: 0,
RWMutex: &sync.RWMutex{},
}
}
// ErrInvalidChain is the error returned when a chain is not valid.
type ErrInvalidChain struct {
Message string
}
func (e ErrInvalidChain) Error() string {
return e.Error()
}
// FirstJob finds the job in the chain with indegree 0. If there is not
// exactly one of these jobs, it returns an error.
func (c *chain) FirstJob() (proto.Job, error) {
var jobIds []string
for jobId, count := range c.indegreeCounts() {
if count == 0 {
jobIds = append(jobIds, jobId)
}
}
if len(jobIds) != 1 {
return proto.Job{}, ErrInvalidChain{
Message: fmt.Sprintf("chain has %d first job(s), should "+
"have one (first job(s) = %v)", len(jobIds), jobIds),
}
}
return c.JobChain.Jobs[jobIds[0]], nil
}
// LastJob finds the job in the chain with outdegree 0. If there is not
// exactly one of these jobs, it returns an error.
func (c *chain) LastJob() (proto.Job, error) {
var jobIds []string
for jobId, count := range c.outdegreeCounts() {
if count == 0 {
jobIds = append(jobIds, jobId)
}
}
if len(jobIds) != 1 {
return proto.Job{}, ErrInvalidChain{
Message: fmt.Sprintf("chain has %d last job(s), should "+
"have one (last job(s) = %v)", len(jobIds), jobIds),
}
}
return c.JobChain.Jobs[jobIds[0]], nil
}
// NextJobs finds all of the jobs adjacent to the given job.
func (c *chain) NextJobs(jobId string) proto.Jobs {
var nextJobs proto.Jobs
if nextJobIds, ok := c.JobChain.AdjacencyList[jobId]; ok {
for _, id := range nextJobIds {
if val, ok := c.JobChain.Jobs[id]; ok {
nextJobs = append(nextJobs, val)
}
}
}
return nextJobs
}
// PreviousJobs finds all of the immediately previous jobs to a given job.
func (c *chain) PreviousJobs(jobId string) proto.Jobs {
var prevJobs proto.Jobs
for curJob, nextJobs := range c.JobChain.AdjacencyList {
if contains(nextJobs, jobId) {
if val, ok := c.JobChain.Jobs[curJob]; ok {
prevJobs = append(prevJobs, val)
}
}
}
return prevJobs
}
// JobIsReady returns whether or not a job is ready to run. A job is considered
// ready to run if all of its previous jobs are complete. If any previous jobs
// are not complete, the job is not ready to run.
func (c *chain) JobIsReady(jobId string) bool {
isReady := true
for _, job := range c.PreviousJobs(jobId) {
if job.State != proto.STATE_COMPLETE {
isReady = false
}
}
return isReady
}
// IsDone returns two booleans - the first one indicates whether or not the
// chain is done, and the second one indicates whether or not the chain is
// complete.
//
// A chain is done running if there are no more jobs in it that can run. This
// can happen if all of the jobs in the chain or complete, or if some or all
// of the jobs in the chain failed.
//
// A chain is complete if every job in it completed successfully.
func (c *chain) IsDone() (done bool, complete bool) {
done = true
complete = true
pendingJobs := proto.Jobs{}
// Loop through every job in the chain and act on its state. Keep
// track of the jobs that aren't running or in a finished state so
// that we can later check to see if they are capable of running.
LOOP:
for _, job := range c.JobChain.Jobs {
switch job.State {
case proto.STATE_RUNNING:
// If any jobs are running, the chain can't be done
// or complete, so return false for both now.
return false, false
case proto.STATE_COMPLETE:
// Move on to the next job.
continue LOOP
case proto.STATE_FAIL:
// do nothing
default:
// Any job that's not running, complete, or failed.
pendingJobs = append(pendingJobs, job)
}
// We can only arrive here if a job is not complete. If there
// is at least one job that is not complete, the whole chain is
// not complete. The chain could still be done, though, so we
// aren't ready to return yet.
complete = false
}
// For each pending job, check to see if all of its previous jobs
// completed. If they did, there's no reason the pending job can't run.
for _, job := range pendingJobs {
complete = false
allPrevComplete := true
for _, prevJob := range c.PreviousJobs(job.Id) {
if prevJob.State != proto.STATE_COMPLETE {
allPrevComplete = false
// We can break out of this loop if a single
// one of the previous jobs is not complete.
break
}
}
// If all of the previous jobs of a pending job are complete, the
// chain can't be complete because the pending job can still run.
if allPrevComplete == true {
return false, complete
}
}
return
}
// Validate checks if a job chain is valid. It returns an error if it's not.
func (c *chain) Validate() error {
// Make sure the adjacency list is valid.
if !c.adjacencyListIsValid() {
return ErrInvalidChain{
Message: "invalid adjacency list: some jobs exist in " +
"chain.AdjacencyList but not chain.Jobs",
}
}
// Make sure there is one first job.
_, err := c.FirstJob()
if err != nil {
return err
}
// Make sure there is one last job.
_, err = c.LastJob()
if err != nil {
return err
}
// Make sure there are no cycles.
if !c.isAcyclic() {
return ErrInvalidChain{Message: "chain is cyclic"}
}
return nil
}
// RequestId returns the request id of the job chain.
func (c *chain) RequestId() string {
return c.JobChain.RequestId
}
// JobState returns the state of a given job.
func (c *chain) JobState(jobId string) byte {
c.RLock() // -- lock
defer c.RUnlock() // -- unlock
return c.JobChain.Jobs[jobId].State
}
// Set the state of a job in the chain.
func (c *chain) SetJobState(jobId string, state byte) {
c.Lock() // -- lock
j := c.JobChain.Jobs[jobId]
j.State = state
c.JobChain.Jobs[jobId] = j
// Keep chain.Running up to date
if state == proto.STATE_RUNNING {
c.N += 1 // Nth job to run
// @todo: on sequence retry, we need to N-- for all jobs in the sequence
c.Running[jobId] = RunningJob{
N: c.N,
StartTs: time.Now().UnixNano(),
}
} else {
// STATE_RUNNING is the only running state, and it's not that, so the
// job must not be running.
delete(c.Running, jobId)
}
c.Unlock() // -- unlock
}
// SetState sets the chain's state.
func (c *chain) SetState(state byte) {
c.Lock() // -- lock
c.JobChain.State = state
c.Unlock() // -- unlock
}
// -------------------------------------------------------------------------- //
// indegreeCounts finds the indegree for each job in the chain.
func (c *chain) indegreeCounts() map[string]int {
indegreeCounts := make(map[string]int)
for job := range c.JobChain.Jobs {
indegreeCounts[job] = 0
}
for _, nextJobs := range c.JobChain.AdjacencyList {
for _, nextJob := range nextJobs {
if _, ok := indegreeCounts[nextJob]; ok {
indegreeCounts[nextJob] += 1
}
}
}
return indegreeCounts
}
// outdegreeCounts finds the outdegree for each job in the chain.
func (c *chain) outdegreeCounts() map[string]int {
outdegreeCounts := make(map[string]int)
for job := range c.JobChain.Jobs {
outdegreeCounts[job] = len(c.JobChain.AdjacencyList[job])
}
return outdegreeCounts
}
// isAcyclic returns whether or not a job chain is acyclic. It essentially
// works by moving through the job chain from the top (the first job)
// down to the bottom (the last job), and if there are any cycles in the
// chain (dependencies that go in the opposite direction...i.e., bottom to
// top), it returns false.
func (c *chain) isAcyclic() bool |
// adjacencyListIsValid returns whether or not the chain's adjacency list is
// not valid. An adjacency list is not valid if any of the jobs in it do not
// exist in chain.Jobs.
func (c *chain) adjacencyListIsValid() bool {
for job, adjJobs := range c.JobChain.AdjacencyList {
if _, ok := c.JobChain.Jobs[job]; !ok {
return false
}
for _, adjJob := range adjJobs {
if _, ok := c.JobChain.Jobs[adjJob]; !ok {
return false
}
}
}
return true
}
// contains returns whether or not a slice of strings contains a specific string.
func contains(s []string, t string) bool {
for _, i := range s {
if i == t {
return true
}
}
return false
}
| {
indegreeCounts := c.indegreeCounts()
queue := make(map[string]struct{})
// Add all of the first jobs to the queue (in reality there should
// only be 1).
for job, indegreeCount := range indegreeCounts {
if indegreeCount == 0 {
queue[job] = struct{}{}
}
}
jobsVisited := 0
for {
// Break when there are no more jobs in the queue. This happens
// when either there are no first jobs, or when a cycle
// prevents us from enqueuing a job below.
if len(queue) == 0 {
break
}
// Get a job from the queue.
var curJob string
for k := range queue {
curJob = k
}
delete(queue, curJob)
// Visit each job adjacent to the current job and decrement
// their indegree count by 1. When a job's indegree count
// becomes 0, add it to the queue.
//
// If there is a cycle somewhere, at least one jobs indegree
// count will never reach 0, and therefore it will never be
// enqueued and visited.
for _, adjJob := range c.JobChain.AdjacencyList[curJob] {
indegreeCounts[adjJob] -= 1
if indegreeCounts[adjJob] == 0 {
queue[adjJob] = struct{}{}
}
}
// Keep track of the number of jobs we've visited. If there is
// a cycle in the chain, we won't end up visiting some jobs.
jobsVisited += 1
}
if jobsVisited != len(c.JobChain.Jobs) {
return false
}
return true
} | identifier_body |
chain.go | // Copyright 2017, Square, Inc.
// Package chain implements a job chain. It provides the ability to traverse a chain
// and run all of the jobs in it.
package chain
import (
"fmt"
"sync"
"time"
"github.com/square/spincycle/proto"
)
// chain represents a job chain and some meta information about it.
type chain struct {
JobChain *proto.JobChain `json:"jobChain"`
Running map[string]RunningJob `json:"running"` // keyed on job ID => start time (Unix nano)
N uint
*sync.RWMutex
}
type RunningJob struct {
N uint `json:"n"`
StartTs int64 `json:"startTs"`
}
// NewChain takes a JobChain proto (from the RM) and turns it into a Chain that
// the JR can use.
func NewChain(jc *proto.JobChain) *chain {
// Set the state of all jobs in the chain to "Pending".
for jobName, job := range jc.Jobs {
job.State = proto.STATE_PENDING
job.Data = map[string]interface{}{}
jc.Jobs[jobName] = job
}
return &chain{
JobChain: jc,
Running: map[string]RunningJob{},
N: 0,
RWMutex: &sync.RWMutex{},
}
}
// ErrInvalidChain is the error returned when a chain is not valid.
type ErrInvalidChain struct {
Message string
}
func (e ErrInvalidChain) Error() string {
return e.Error()
}
// FirstJob finds the job in the chain with indegree 0. If there is not
// exactly one of these jobs, it returns an error.
func (c *chain) FirstJob() (proto.Job, error) {
var jobIds []string
for jobId, count := range c.indegreeCounts() {
if count == 0 {
jobIds = append(jobIds, jobId)
}
}
if len(jobIds) != 1 {
return proto.Job{}, ErrInvalidChain{
Message: fmt.Sprintf("chain has %d first job(s), should "+
"have one (first job(s) = %v)", len(jobIds), jobIds),
}
}
return c.JobChain.Jobs[jobIds[0]], nil
}
// LastJob finds the job in the chain with outdegree 0. If there is not
// exactly one of these jobs, it returns an error.
func (c *chain) LastJob() (proto.Job, error) {
var jobIds []string
for jobId, count := range c.outdegreeCounts() |
if len(jobIds) != 1 {
return proto.Job{}, ErrInvalidChain{
Message: fmt.Sprintf("chain has %d last job(s), should "+
"have one (last job(s) = %v)", len(jobIds), jobIds),
}
}
return c.JobChain.Jobs[jobIds[0]], nil
}
// NextJobs finds all of the jobs adjacent to the given job.
func (c *chain) NextJobs(jobId string) proto.Jobs {
var nextJobs proto.Jobs
if nextJobIds, ok := c.JobChain.AdjacencyList[jobId]; ok {
for _, id := range nextJobIds {
if val, ok := c.JobChain.Jobs[id]; ok {
nextJobs = append(nextJobs, val)
}
}
}
return nextJobs
}
// PreviousJobs finds all of the immediately previous jobs to a given job.
func (c *chain) PreviousJobs(jobId string) proto.Jobs {
var prevJobs proto.Jobs
for curJob, nextJobs := range c.JobChain.AdjacencyList {
if contains(nextJobs, jobId) {
if val, ok := c.JobChain.Jobs[curJob]; ok {
prevJobs = append(prevJobs, val)
}
}
}
return prevJobs
}
// JobIsReady returns whether or not a job is ready to run. A job is considered
// ready to run if all of its previous jobs are complete. If any previous jobs
// are not complete, the job is not ready to run.
func (c *chain) JobIsReady(jobId string) bool {
isReady := true
for _, job := range c.PreviousJobs(jobId) {
if job.State != proto.STATE_COMPLETE {
isReady = false
}
}
return isReady
}
// IsDone returns two booleans - the first one indicates whether or not the
// chain is done, and the second one indicates whether or not the chain is
// complete.
//
// A chain is done running if there are no more jobs in it that can run. This
// can happen if all of the jobs in the chain or complete, or if some or all
// of the jobs in the chain failed.
//
// A chain is complete if every job in it completed successfully.
func (c *chain) IsDone() (done bool, complete bool) {
done = true
complete = true
pendingJobs := proto.Jobs{}
// Loop through every job in the chain and act on its state. Keep
// track of the jobs that aren't running or in a finished state so
// that we can later check to see if they are capable of running.
LOOP:
for _, job := range c.JobChain.Jobs {
switch job.State {
case proto.STATE_RUNNING:
// If any jobs are running, the chain can't be done
// or complete, so return false for both now.
return false, false
case proto.STATE_COMPLETE:
// Move on to the next job.
continue LOOP
case proto.STATE_FAIL:
// do nothing
default:
// Any job that's not running, complete, or failed.
pendingJobs = append(pendingJobs, job)
}
// We can only arrive here if a job is not complete. If there
// is at least one job that is not complete, the whole chain is
// not complete. The chain could still be done, though, so we
// aren't ready to return yet.
complete = false
}
// For each pending job, check to see if all of its previous jobs
// completed. If they did, there's no reason the pending job can't run.
for _, job := range pendingJobs {
complete = false
allPrevComplete := true
for _, prevJob := range c.PreviousJobs(job.Id) {
if prevJob.State != proto.STATE_COMPLETE {
allPrevComplete = false
// We can break out of this loop if a single
// one of the previous jobs is not complete.
break
}
}
// If all of the previous jobs of a pending job are complete, the
// chain can't be complete because the pending job can still run.
if allPrevComplete == true {
return false, complete
}
}
return
}
// Validate checks if a job chain is valid. It returns an error if it's not.
func (c *chain) Validate() error {
// Make sure the adjacency list is valid.
if !c.adjacencyListIsValid() {
return ErrInvalidChain{
Message: "invalid adjacency list: some jobs exist in " +
"chain.AdjacencyList but not chain.Jobs",
}
}
// Make sure there is one first job.
_, err := c.FirstJob()
if err != nil {
return err
}
// Make sure there is one last job.
_, err = c.LastJob()
if err != nil {
return err
}
// Make sure there are no cycles.
if !c.isAcyclic() {
return ErrInvalidChain{Message: "chain is cyclic"}
}
return nil
}
// RequestId returns the request id of the job chain.
func (c *chain) RequestId() string {
return c.JobChain.RequestId
}
// JobState returns the state of a given job.
func (c *chain) JobState(jobId string) byte {
c.RLock() // -- lock
defer c.RUnlock() // -- unlock
return c.JobChain.Jobs[jobId].State
}
// Set the state of a job in the chain.
func (c *chain) SetJobState(jobId string, state byte) {
c.Lock() // -- lock
j := c.JobChain.Jobs[jobId]
j.State = state
c.JobChain.Jobs[jobId] = j
// Keep chain.Running up to date
if state == proto.STATE_RUNNING {
c.N += 1 // Nth job to run
// @todo: on sequence retry, we need to N-- for all jobs in the sequence
c.Running[jobId] = RunningJob{
N: c.N,
StartTs: time.Now().UnixNano(),
}
} else {
// STATE_RUNNING is the only running state, and it's not that, so the
// job must not be running.
delete(c.Running, jobId)
}
c.Unlock() // -- unlock
}
// SetState sets the chain's state.
func (c *chain) SetState(state byte) {
c.Lock() // -- lock
c.JobChain.State = state
c.Unlock() // -- unlock
}
// -------------------------------------------------------------------------- //
// indegreeCounts finds the indegree for each job in the chain.
func (c *chain) indegreeCounts() map[string]int {
indegreeCounts := make(map[string]int)
for job := range c.JobChain.Jobs {
indegreeCounts[job] = 0
}
for _, nextJobs := range c.JobChain.AdjacencyList {
for _, nextJob := range nextJobs {
if _, ok := indegreeCounts[nextJob]; ok {
indegreeCounts[nextJob] += 1
}
}
}
return indegreeCounts
}
// outdegreeCounts finds the outdegree for each job in the chain.
func (c *chain) outdegreeCounts() map[string]int {
outdegreeCounts := make(map[string]int)
for job := range c.JobChain.Jobs {
outdegreeCounts[job] = len(c.JobChain.AdjacencyList[job])
}
return outdegreeCounts
}
// isAcyclic returns whether or not a job chain is acyclic. It essentially
// works by moving through the job chain from the top (the first job)
// down to the bottom (the last job), and if there are any cycles in the
// chain (dependencies that go in the opposite direction...i.e., bottom to
// top), it returns false.
func (c *chain) isAcyclic() bool {
indegreeCounts := c.indegreeCounts()
queue := make(map[string]struct{})
// Add all of the first jobs to the queue (in reality there should
// only be 1).
for job, indegreeCount := range indegreeCounts {
if indegreeCount == 0 {
queue[job] = struct{}{}
}
}
jobsVisited := 0
for {
// Break when there are no more jobs in the queue. This happens
// when either there are no first jobs, or when a cycle
// prevents us from enqueuing a job below.
if len(queue) == 0 {
break
}
// Get a job from the queue.
var curJob string
for k := range queue {
curJob = k
}
delete(queue, curJob)
// Visit each job adjacent to the current job and decrement
// their indegree count by 1. When a job's indegree count
// becomes 0, add it to the queue.
//
// If there is a cycle somewhere, at least one jobs indegree
// count will never reach 0, and therefore it will never be
// enqueued and visited.
for _, adjJob := range c.JobChain.AdjacencyList[curJob] {
indegreeCounts[adjJob] -= 1
if indegreeCounts[adjJob] == 0 {
queue[adjJob] = struct{}{}
}
}
// Keep track of the number of jobs we've visited. If there is
// a cycle in the chain, we won't end up visiting some jobs.
jobsVisited += 1
}
if jobsVisited != len(c.JobChain.Jobs) {
return false
}
return true
}
// adjacencyListIsValid returns whether or not the chain's adjacency list is
// not valid. An adjacency list is not valid if any of the jobs in it do not
// exist in chain.Jobs.
func (c *chain) adjacencyListIsValid() bool {
for job, adjJobs := range c.JobChain.AdjacencyList {
if _, ok := c.JobChain.Jobs[job]; !ok {
return false
}
for _, adjJob := range adjJobs {
if _, ok := c.JobChain.Jobs[adjJob]; !ok {
return false
}
}
}
return true
}
// contains returns whether or not a slice of strings contains a specific string.
func contains(s []string, t string) bool {
for _, i := range s {
if i == t {
return true
}
}
return false
}
| {
if count == 0 {
jobIds = append(jobIds, jobId)
}
} | conditional_block |
chain.go | // Copyright 2017, Square, Inc.
// Package chain implements a job chain. It provides the ability to traverse a chain
// and run all of the jobs in it.
package chain
import (
"fmt"
"sync"
"time"
"github.com/square/spincycle/proto"
)
// chain represents a job chain and some meta information about it.
type chain struct {
JobChain *proto.JobChain `json:"jobChain"`
Running map[string]RunningJob `json:"running"` // keyed on job ID => start time (Unix nano)
N uint
*sync.RWMutex
}
type RunningJob struct {
N uint `json:"n"`
StartTs int64 `json:"startTs"`
}
// NewChain takes a JobChain proto (from the RM) and turns it into a Chain that
// the JR can use.
func NewChain(jc *proto.JobChain) *chain {
// Set the state of all jobs in the chain to "Pending".
for jobName, job := range jc.Jobs {
job.State = proto.STATE_PENDING
job.Data = map[string]interface{}{}
jc.Jobs[jobName] = job
}
return &chain{
JobChain: jc,
Running: map[string]RunningJob{},
N: 0,
RWMutex: &sync.RWMutex{},
}
}
// ErrInvalidChain is the error returned when a chain is not valid.
type ErrInvalidChain struct {
Message string
}
func (e ErrInvalidChain) Error() string {
return e.Error()
}
// FirstJob finds the job in the chain with indegree 0. If there is not
// exactly one of these jobs, it returns an error.
func (c *chain) FirstJob() (proto.Job, error) {
var jobIds []string
for jobId, count := range c.indegreeCounts() {
if count == 0 {
jobIds = append(jobIds, jobId)
}
}
if len(jobIds) != 1 {
return proto.Job{}, ErrInvalidChain{
Message: fmt.Sprintf("chain has %d first job(s), should "+
"have one (first job(s) = %v)", len(jobIds), jobIds),
}
}
return c.JobChain.Jobs[jobIds[0]], nil
}
// LastJob finds the job in the chain with outdegree 0. If there is not
// exactly one of these jobs, it returns an error.
func (c *chain) LastJob() (proto.Job, error) {
var jobIds []string
for jobId, count := range c.outdegreeCounts() {
if count == 0 {
jobIds = append(jobIds, jobId)
}
}
if len(jobIds) != 1 {
return proto.Job{}, ErrInvalidChain{
Message: fmt.Sprintf("chain has %d last job(s), should "+
"have one (last job(s) = %v)", len(jobIds), jobIds),
}
}
return c.JobChain.Jobs[jobIds[0]], nil
}
// NextJobs finds all of the jobs adjacent to the given job.
func (c *chain) NextJobs(jobId string) proto.Jobs {
var nextJobs proto.Jobs
if nextJobIds, ok := c.JobChain.AdjacencyList[jobId]; ok {
for _, id := range nextJobIds {
if val, ok := c.JobChain.Jobs[id]; ok {
nextJobs = append(nextJobs, val)
}
}
}
return nextJobs
}
// PreviousJobs finds all of the immediately previous jobs to a given job.
func (c *chain) PreviousJobs(jobId string) proto.Jobs {
var prevJobs proto.Jobs
for curJob, nextJobs := range c.JobChain.AdjacencyList {
if contains(nextJobs, jobId) {
if val, ok := c.JobChain.Jobs[curJob]; ok {
prevJobs = append(prevJobs, val)
}
}
}
return prevJobs
}
// JobIsReady returns whether or not a job is ready to run. A job is considered
// ready to run if all of its previous jobs are complete. If any previous jobs
// are not complete, the job is not ready to run.
func (c *chain) JobIsReady(jobId string) bool {
isReady := true
for _, job := range c.PreviousJobs(jobId) {
if job.State != proto.STATE_COMPLETE {
isReady = false
}
}
return isReady
}
// IsDone returns two booleans - the first one indicates whether or not the
// chain is done, and the second one indicates whether or not the chain is
// complete.
//
// A chain is done running if there are no more jobs in it that can run. This
// can happen if all of the jobs in the chain or complete, or if some or all
// of the jobs in the chain failed.
//
// A chain is complete if every job in it completed successfully.
func (c *chain) IsDone() (done bool, complete bool) {
done = true
complete = true
pendingJobs := proto.Jobs{}
// Loop through every job in the chain and act on its state. Keep
// track of the jobs that aren't running or in a finished state so
// that we can later check to see if they are capable of running.
LOOP:
for _, job := range c.JobChain.Jobs {
switch job.State {
case proto.STATE_RUNNING:
// If any jobs are running, the chain can't be done
// or complete, so return false for both now.
return false, false
case proto.STATE_COMPLETE:
// Move on to the next job.
continue LOOP
case proto.STATE_FAIL:
// do nothing
default:
// Any job that's not running, complete, or failed.
pendingJobs = append(pendingJobs, job)
}
// We can only arrive here if a job is not complete. If there
// is at least one job that is not complete, the whole chain is
// not complete. The chain could still be done, though, so we
// aren't ready to return yet.
complete = false
}
// For each pending job, check to see if all of its previous jobs
// completed. If they did, there's no reason the pending job can't run.
for _, job := range pendingJobs {
complete = false
allPrevComplete := true
for _, prevJob := range c.PreviousJobs(job.Id) {
if prevJob.State != proto.STATE_COMPLETE {
allPrevComplete = false
// We can break out of this loop if a single
// one of the previous jobs is not complete.
break
}
}
// If all of the previous jobs of a pending job are complete, the
// chain can't be complete because the pending job can still run.
if allPrevComplete == true {
return false, complete
}
}
return
}
// Validate checks if a job chain is valid. It returns an error if it's not.
func (c *chain) Validate() error {
// Make sure the adjacency list is valid.
if !c.adjacencyListIsValid() {
return ErrInvalidChain{
Message: "invalid adjacency list: some jobs exist in " +
"chain.AdjacencyList but not chain.Jobs",
}
}
| _, err := c.FirstJob()
if err != nil {
return err
}
// Make sure there is one last job.
_, err = c.LastJob()
if err != nil {
return err
}
// Make sure there are no cycles.
if !c.isAcyclic() {
return ErrInvalidChain{Message: "chain is cyclic"}
}
return nil
}
// RequestId returns the request id of the job chain.
func (c *chain) RequestId() string {
return c.JobChain.RequestId
}
// JobState returns the state of a given job.
func (c *chain) JobState(jobId string) byte {
c.RLock() // -- lock
defer c.RUnlock() // -- unlock
return c.JobChain.Jobs[jobId].State
}
// Set the state of a job in the chain.
func (c *chain) SetJobState(jobId string, state byte) {
c.Lock() // -- lock
j := c.JobChain.Jobs[jobId]
j.State = state
c.JobChain.Jobs[jobId] = j
// Keep chain.Running up to date
if state == proto.STATE_RUNNING {
c.N += 1 // Nth job to run
// @todo: on sequence retry, we need to N-- for all jobs in the sequence
c.Running[jobId] = RunningJob{
N: c.N,
StartTs: time.Now().UnixNano(),
}
} else {
// STATE_RUNNING is the only running state, and it's not that, so the
// job must not be running.
delete(c.Running, jobId)
}
c.Unlock() // -- unlock
}
// SetState sets the chain's state.
func (c *chain) SetState(state byte) {
c.Lock() // -- lock
c.JobChain.State = state
c.Unlock() // -- unlock
}
// -------------------------------------------------------------------------- //
// indegreeCounts finds the indegree for each job in the chain.
func (c *chain) indegreeCounts() map[string]int {
indegreeCounts := make(map[string]int)
for job := range c.JobChain.Jobs {
indegreeCounts[job] = 0
}
for _, nextJobs := range c.JobChain.AdjacencyList {
for _, nextJob := range nextJobs {
if _, ok := indegreeCounts[nextJob]; ok {
indegreeCounts[nextJob] += 1
}
}
}
return indegreeCounts
}
// outdegreeCounts finds the outdegree for each job in the chain.
func (c *chain) outdegreeCounts() map[string]int {
outdegreeCounts := make(map[string]int)
for job := range c.JobChain.Jobs {
outdegreeCounts[job] = len(c.JobChain.AdjacencyList[job])
}
return outdegreeCounts
}
// isAcyclic returns whether or not a job chain is acyclic. It essentially
// works by moving through the job chain from the top (the first job)
// down to the bottom (the last job), and if there are any cycles in the
// chain (dependencies that go in the opposite direction...i.e., bottom to
// top), it returns false.
func (c *chain) isAcyclic() bool {
indegreeCounts := c.indegreeCounts()
queue := make(map[string]struct{})
// Add all of the first jobs to the queue (in reality there should
// only be 1).
for job, indegreeCount := range indegreeCounts {
if indegreeCount == 0 {
queue[job] = struct{}{}
}
}
jobsVisited := 0
for {
// Break when there are no more jobs in the queue. This happens
// when either there are no first jobs, or when a cycle
// prevents us from enqueuing a job below.
if len(queue) == 0 {
break
}
// Get a job from the queue.
var curJob string
for k := range queue {
curJob = k
}
delete(queue, curJob)
// Visit each job adjacent to the current job and decrement
// their indegree count by 1. When a job's indegree count
// becomes 0, add it to the queue.
//
// If there is a cycle somewhere, at least one jobs indegree
// count will never reach 0, and therefore it will never be
// enqueued and visited.
for _, adjJob := range c.JobChain.AdjacencyList[curJob] {
indegreeCounts[adjJob] -= 1
if indegreeCounts[adjJob] == 0 {
queue[adjJob] = struct{}{}
}
}
// Keep track of the number of jobs we've visited. If there is
// a cycle in the chain, we won't end up visiting some jobs.
jobsVisited += 1
}
if jobsVisited != len(c.JobChain.Jobs) {
return false
}
return true
}
// adjacencyListIsValid returns whether or not the chain's adjacency list is
// not valid. An adjacency list is not valid if any of the jobs in it do not
// exist in chain.Jobs.
func (c *chain) adjacencyListIsValid() bool {
for job, adjJobs := range c.JobChain.AdjacencyList {
if _, ok := c.JobChain.Jobs[job]; !ok {
return false
}
for _, adjJob := range adjJobs {
if _, ok := c.JobChain.Jobs[adjJob]; !ok {
return false
}
}
}
return true
}
// contains returns whether or not a slice of strings contains a specific string.
func contains(s []string, t string) bool {
for _, i := range s {
if i == t {
return true
}
}
return false
} | // Make sure there is one first job. | random_line_split |
chain.go | // Copyright 2017, Square, Inc.
// Package chain implements a job chain. It provides the ability to traverse a chain
// and run all of the jobs in it.
package chain
import (
"fmt"
"sync"
"time"
"github.com/square/spincycle/proto"
)
// chain represents a job chain and some meta information about it.
type chain struct {
JobChain *proto.JobChain `json:"jobChain"`
Running map[string]RunningJob `json:"running"` // keyed on job ID => start time (Unix nano)
N uint
*sync.RWMutex
}
type RunningJob struct {
N uint `json:"n"`
StartTs int64 `json:"startTs"`
}
// NewChain takes a JobChain proto (from the RM) and turns it into a Chain that
// the JR can use.
func NewChain(jc *proto.JobChain) *chain {
// Set the state of all jobs in the chain to "Pending".
for jobName, job := range jc.Jobs {
job.State = proto.STATE_PENDING
job.Data = map[string]interface{}{}
jc.Jobs[jobName] = job
}
return &chain{
JobChain: jc,
Running: map[string]RunningJob{},
N: 0,
RWMutex: &sync.RWMutex{},
}
}
// ErrInvalidChain is the error returned when a chain is not valid.
type ErrInvalidChain struct {
Message string
}
func (e ErrInvalidChain) Error() string {
return e.Error()
}
// FirstJob finds the job in the chain with indegree 0. If there is not
// exactly one of these jobs, it returns an error.
func (c *chain) FirstJob() (proto.Job, error) {
var jobIds []string
for jobId, count := range c.indegreeCounts() {
if count == 0 {
jobIds = append(jobIds, jobId)
}
}
if len(jobIds) != 1 {
return proto.Job{}, ErrInvalidChain{
Message: fmt.Sprintf("chain has %d first job(s), should "+
"have one (first job(s) = %v)", len(jobIds), jobIds),
}
}
return c.JobChain.Jobs[jobIds[0]], nil
}
// LastJob finds the job in the chain with outdegree 0. If there is not
// exactly one of these jobs, it returns an error.
func (c *chain) LastJob() (proto.Job, error) {
var jobIds []string
for jobId, count := range c.outdegreeCounts() {
if count == 0 {
jobIds = append(jobIds, jobId)
}
}
if len(jobIds) != 1 {
return proto.Job{}, ErrInvalidChain{
Message: fmt.Sprintf("chain has %d last job(s), should "+
"have one (last job(s) = %v)", len(jobIds), jobIds),
}
}
return c.JobChain.Jobs[jobIds[0]], nil
}
// NextJobs finds all of the jobs adjacent to the given job.
func (c *chain) NextJobs(jobId string) proto.Jobs {
var nextJobs proto.Jobs
if nextJobIds, ok := c.JobChain.AdjacencyList[jobId]; ok {
for _, id := range nextJobIds {
if val, ok := c.JobChain.Jobs[id]; ok {
nextJobs = append(nextJobs, val)
}
}
}
return nextJobs
}
// PreviousJobs finds all of the immediately previous jobs to a given job.
func (c *chain) | (jobId string) proto.Jobs {
var prevJobs proto.Jobs
for curJob, nextJobs := range c.JobChain.AdjacencyList {
if contains(nextJobs, jobId) {
if val, ok := c.JobChain.Jobs[curJob]; ok {
prevJobs = append(prevJobs, val)
}
}
}
return prevJobs
}
// JobIsReady returns whether or not a job is ready to run. A job is considered
// ready to run if all of its previous jobs are complete. If any previous jobs
// are not complete, the job is not ready to run.
func (c *chain) JobIsReady(jobId string) bool {
isReady := true
for _, job := range c.PreviousJobs(jobId) {
if job.State != proto.STATE_COMPLETE {
isReady = false
}
}
return isReady
}
// IsDone returns two booleans - the first one indicates whether or not the
// chain is done, and the second one indicates whether or not the chain is
// complete.
//
// A chain is done running if there are no more jobs in it that can run. This
// can happen if all of the jobs in the chain or complete, or if some or all
// of the jobs in the chain failed.
//
// A chain is complete if every job in it completed successfully.
func (c *chain) IsDone() (done bool, complete bool) {
done = true
complete = true
pendingJobs := proto.Jobs{}
// Loop through every job in the chain and act on its state. Keep
// track of the jobs that aren't running or in a finished state so
// that we can later check to see if they are capable of running.
LOOP:
for _, job := range c.JobChain.Jobs {
switch job.State {
case proto.STATE_RUNNING:
// If any jobs are running, the chain can't be done
// or complete, so return false for both now.
return false, false
case proto.STATE_COMPLETE:
// Move on to the next job.
continue LOOP
case proto.STATE_FAIL:
// do nothing
default:
// Any job that's not running, complete, or failed.
pendingJobs = append(pendingJobs, job)
}
// We can only arrive here if a job is not complete. If there
// is at least one job that is not complete, the whole chain is
// not complete. The chain could still be done, though, so we
// aren't ready to return yet.
complete = false
}
// For each pending job, check to see if all of its previous jobs
// completed. If they did, there's no reason the pending job can't run.
for _, job := range pendingJobs {
complete = false
allPrevComplete := true
for _, prevJob := range c.PreviousJobs(job.Id) {
if prevJob.State != proto.STATE_COMPLETE {
allPrevComplete = false
// We can break out of this loop if a single
// one of the previous jobs is not complete.
break
}
}
// If all of the previous jobs of a pending job are complete, the
// chain can't be complete because the pending job can still run.
if allPrevComplete == true {
return false, complete
}
}
return
}
// Validate checks if a job chain is valid. It returns an error if it's not.
func (c *chain) Validate() error {
// Make sure the adjacency list is valid.
if !c.adjacencyListIsValid() {
return ErrInvalidChain{
Message: "invalid adjacency list: some jobs exist in " +
"chain.AdjacencyList but not chain.Jobs",
}
}
// Make sure there is one first job.
_, err := c.FirstJob()
if err != nil {
return err
}
// Make sure there is one last job.
_, err = c.LastJob()
if err != nil {
return err
}
// Make sure there are no cycles.
if !c.isAcyclic() {
return ErrInvalidChain{Message: "chain is cyclic"}
}
return nil
}
// RequestId returns the request id of the job chain.
func (c *chain) RequestId() string {
return c.JobChain.RequestId
}
// JobState returns the state of a given job.
func (c *chain) JobState(jobId string) byte {
c.RLock() // -- lock
defer c.RUnlock() // -- unlock
return c.JobChain.Jobs[jobId].State
}
// Set the state of a job in the chain.
func (c *chain) SetJobState(jobId string, state byte) {
c.Lock() // -- lock
j := c.JobChain.Jobs[jobId]
j.State = state
c.JobChain.Jobs[jobId] = j
// Keep chain.Running up to date
if state == proto.STATE_RUNNING {
c.N += 1 // Nth job to run
// @todo: on sequence retry, we need to N-- for all jobs in the sequence
c.Running[jobId] = RunningJob{
N: c.N,
StartTs: time.Now().UnixNano(),
}
} else {
// STATE_RUNNING is the only running state, and it's not that, so the
// job must not be running.
delete(c.Running, jobId)
}
c.Unlock() // -- unlock
}
// SetState sets the chain's state.
func (c *chain) SetState(state byte) {
c.Lock() // -- lock
c.JobChain.State = state
c.Unlock() // -- unlock
}
// -------------------------------------------------------------------------- //
// indegreeCounts finds the indegree for each job in the chain.
func (c *chain) indegreeCounts() map[string]int {
indegreeCounts := make(map[string]int)
for job := range c.JobChain.Jobs {
indegreeCounts[job] = 0
}
for _, nextJobs := range c.JobChain.AdjacencyList {
for _, nextJob := range nextJobs {
if _, ok := indegreeCounts[nextJob]; ok {
indegreeCounts[nextJob] += 1
}
}
}
return indegreeCounts
}
// outdegreeCounts finds the outdegree for each job in the chain.
func (c *chain) outdegreeCounts() map[string]int {
outdegreeCounts := make(map[string]int)
for job := range c.JobChain.Jobs {
outdegreeCounts[job] = len(c.JobChain.AdjacencyList[job])
}
return outdegreeCounts
}
// isAcyclic returns whether or not a job chain is acyclic. It essentially
// works by moving through the job chain from the top (the first job)
// down to the bottom (the last job), and if there are any cycles in the
// chain (dependencies that go in the opposite direction...i.e., bottom to
// top), it returns false.
func (c *chain) isAcyclic() bool {
indegreeCounts := c.indegreeCounts()
queue := make(map[string]struct{})
// Add all of the first jobs to the queue (in reality there should
// only be 1).
for job, indegreeCount := range indegreeCounts {
if indegreeCount == 0 {
queue[job] = struct{}{}
}
}
jobsVisited := 0
for {
// Break when there are no more jobs in the queue. This happens
// when either there are no first jobs, or when a cycle
// prevents us from enqueuing a job below.
if len(queue) == 0 {
break
}
// Get a job from the queue.
var curJob string
for k := range queue {
curJob = k
}
delete(queue, curJob)
// Visit each job adjacent to the current job and decrement
// their indegree count by 1. When a job's indegree count
// becomes 0, add it to the queue.
//
// If there is a cycle somewhere, at least one jobs indegree
// count will never reach 0, and therefore it will never be
// enqueued and visited.
for _, adjJob := range c.JobChain.AdjacencyList[curJob] {
indegreeCounts[adjJob] -= 1
if indegreeCounts[adjJob] == 0 {
queue[adjJob] = struct{}{}
}
}
// Keep track of the number of jobs we've visited. If there is
// a cycle in the chain, we won't end up visiting some jobs.
jobsVisited += 1
}
if jobsVisited != len(c.JobChain.Jobs) {
return false
}
return true
}
// adjacencyListIsValid returns whether or not the chain's adjacency list is
// not valid. An adjacency list is not valid if any of the jobs in it do not
// exist in chain.Jobs.
func (c *chain) adjacencyListIsValid() bool {
for job, adjJobs := range c.JobChain.AdjacencyList {
if _, ok := c.JobChain.Jobs[job]; !ok {
return false
}
for _, adjJob := range adjJobs {
if _, ok := c.JobChain.Jobs[adjJob]; !ok {
return false
}
}
}
return true
}
// contains returns whether or not a slice of strings contains a specific string.
func contains(s []string, t string) bool {
for _, i := range s {
if i == t {
return true
}
}
return false
}
| PreviousJobs | identifier_name |
node.rs | use std::convert::TryFrom;
use std::sync::Arc;
use std::time::Duration;
use bytes::Bytes;
use casbin::prelude::{Enforcer, MgmtApi};
use dashmap::DashMap;
use http::Uri;
use prost::Message;
use raft::prelude::*;
use raft::{Config, RawNode};
use slog::Logger;
use tokio::sync::mpsc::*;
use tokio::sync::RwLock;
use tokio::time::*;
use tonic::Request;
use crate::cluster::{self, InternalRaftMessage, RaftRequest};
use crate::network::{create_client, RpcClient};
use crate::storage::{MemStorage, Storage};
use crate::utils;
pub struct CasbinRaft {
pub id: u64,
pub node: RawNode<MemStorage>,
pub logger: Logger,
pub mailbox_sender: Sender<cluster::Message>,
pub mailbox_recv: Receiver<cluster::Message>,
pub conf_sender: Sender<ConfChange>,
pub conf_recv: Receiver<ConfChange>,
pub peers: Arc<DashMap<u64, RpcClient>>,
pub heartbeat: usize,
pub enforcer: Arc<RwLock<Enforcer>>,
}
impl CasbinRaft {
pub fn new(
id: u64,
cfg: Config,
logger: Logger,
peers: Arc<DashMap<u64, RpcClient>>,
mailbox_sender: Sender<cluster::Message>,
mailbox_recv: Receiver<cluster::Message>,
enforcer: Arc<RwLock<Enforcer>>,
) -> Result<Self, crate::Error> {
cfg.validate()?;
let storage = MemStorage::new();
let node = RawNode::new(&cfg, storage, &logger)?;
let (conf_sender, conf_recv) = channel(1024);
Ok(Self {
id,
node,
logger: logger.clone(),
mailbox_sender,
mailbox_recv,
conf_sender,
conf_recv,
heartbeat: cfg.heartbeat_tick,
peers,
enforcer,
})
}
pub fn tick(&mut self) -> bool {
self.node.tick()
}
pub fn propose_conf_change(
&mut self,
context: Vec<u8>,
cc: ConfChange,
) -> Result<(), raft::Error> {
Ok(self.node.propose_conf_change(context, cc)?)
}
pub fn become_leader(&mut self) {
self.node.raft.raft_log.committed = 0;
self.node.raft.become_candidate();
self.node.raft.become_leader();
}
fn set_hard_state(
&mut self,
commit: u64,
term: u64,
) -> Result<(), crate::error::Error> {
self.node.raft.mut_store().set_hard_state(commit, term);
Ok(())
}
#[allow(irrefutable_let_patterns)]
pub async fn run(
mut self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
while let _ = interval(Duration::from_millis(self.heartbeat as u64))
.tick()
.await
{
let msg = match timeout(Duration::from_millis(100), self.mailbox_recv.recv())
.await
{
Ok(Some(msg)) => Some(msg),
Ok(None) => None,
Err(_) => None,
};
if let Some(msg) = msg {
slog::info!(self.logger, "Inbound raft message: {:?}", msg);
self.node.step(msg.into())?;
}
match timeout(Duration::from_millis(100), self.conf_recv.recv()).await {
Ok(Some(cc)) => {
let ccc = cc.clone();
let state = self.node.apply_conf_change(&cc)?;
self.node.mut_store().set_conf_state(state);
let p = self.peers.clone();
let logger = self.logger.clone();
tokio::spawn(async move {
let uri = Uri::try_from(&ccc.context[..]).unwrap();
let client: RpcClient =
create_client(uri.clone(), Some(logger.clone()))
.await
.unwrap();
p.insert(ccc.node_id, client);
slog::info!(
logger,
"Added client: {:?} - {:?}",
ccc.node_id,
&uri
);
});
}
Ok(None) => (),
Err(_) => (),
};
if self.node.has_ready() {
slog::info!(self.logger, "I'm ready!");
self.ready().await?;
}
self.node.tick();
}
Ok(())
}
pub async fn | (
&mut self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
let mut ready = self.node.ready();
let is_leader = self.node.raft.leader_id == self.node.raft.id;
slog::info!(
self.logger,
"Leader ID: {}, Node ID: {}",
self.node.raft.leader_id,
self.node.raft.id
);
slog::info!(self.logger, "Am I leader?: {}", is_leader);
if !Snapshot::is_empty(ready.snapshot()) {
let snap = ready.snapshot().clone();
slog::info!(self.logger, "Got a snap: {:?}", snap);
self.node.mut_store().apply_snapshot(snap)?;
}
if !ready.entries().is_empty() {
let entries = ready
.entries()
.iter()
.cloned()
.filter(|e| !e.get_data().is_empty())
.collect::<Vec<Entry>>();
slog::info!(self.logger, "Entries?: {}", entries.len());
self.node.mut_store().append(&entries)?;
}
if let Some(hs) = ready.hs() {
slog::info!(self.logger, "HS?: {:?}", hs);
self.node.mut_store().set_hard_state(hs.commit, hs.term);
// self.node.mut_store().state.hard_state = (*hs).clone();
// self.node.mut_store().commit()?;
}
for mut msg in ready.messages.drain(..) {
slog::info!(self.logger, "LOGMSG==={:?}", msg);
let to = msg.to;
msg.from = self.id;
msg.log_term = self.node.store().hard_state().term;
msg.commit = self.node.store().hard_state().commit;
if let Some(client) = self.peers.get(&to) {
let mut msg_bytes = vec![];
msg.encode(&mut msg_bytes).unwrap();
let req = Request::new(RaftRequest {
tpe: 0,
message: msg_bytes,
});
let req = client.clone().raft(req).await?;
slog::info!(self.logger, "RESP={:?}", req);
}
self.append_entries(&msg.entries).await?;
}
if let Some(committed_entries) = ready.committed_entries.take() {
for entry in committed_entries.clone() {
slog::info!(self.logger, "Committing: {:?}", entry);
if entry.data.is_empty() {
// From new elected leaders.
continue;
}
let mut internal_raft_message = InternalRaftMessage::default();
internal_raft_message
.merge(Bytes::from(entry.data.clone()))
.unwrap();
if let Err(error) = self.apply(internal_raft_message) {
slog::error!(self.logger, "Unable to apply entry. {:?}", error);
// TODO: return an error to the user
}
}
if let Some(entry) = committed_entries.last() {
self.set_hard_state(entry.index, entry.term)?;
}
}
self.node.advance(ready);
Ok(())
}
pub fn propose(
&mut self,
ctx: Vec<u8>,
entry: Vec<u8>,
) -> Result<(), Box<dyn std::error::Error>> {
Ok(self.node.propose(ctx, entry)?)
}
pub async fn send(&mut self, msg: cluster::Message) -> Result<(), crate::Error> {
slog::info!(self.logger, "SEND = {:?}", msg);
self.mailbox_sender.send(msg).await.unwrap();
Ok(())
}
pub async fn append_entries(
&mut self,
entries: &[Entry],
) -> Result<(), crate::Error> {
for entry in entries {
if entry.data.is_empty() {
continue;
}
slog::info!(self.logger, "APPEND={:?}", entry);
match EntryType::from_i32(entry.entry_type) {
Some(EntryType::EntryConfChange) => {
let mut cc = ConfChange::default();
cc.merge(Bytes::from(entry.data.clone()))?;
let cs = self.node.apply_conf_change(&cc)?;
self.node.mut_store().set_conf_state(cs);
}
Some(EntryType::EntryNormal) => {
let mut e = Entry::default();
e.merge(Bytes::from(entry.data.clone()))?;
self.node.mut_store().append(&[e])?;
}
Some(EntryType::EntryConfChangeV2) => panic!("Conf2"),
None => panic!(":-("),
}
}
Ok(())
}
pub fn apply(&mut self, request: InternalRaftMessage) -> Result<(), crate::Error> {
if let Some(policy_request) = request.policy {
let op = utils::string_to_static_str(policy_request.op.to_lowercase());
// self.db.insert(put.key, put.value)?;
match op {
"add" => {
let cloned_enforcer = self.enforcer.clone();
let p_type = "p".to_string();
let policy = policy_request.params;
Box::pin(async move {
let mut lock = cloned_enforcer.write().await;
lock.add_named_policy(&p_type, policy).await.unwrap();
});
}
"remove" => {
let cloned_enforcer = self.enforcer.clone();
let policy = policy_request.params;
Box::pin(async move {
let mut lock = cloned_enforcer.write().await;
lock.remove_policy(policy).await.unwrap();
});
}
_ => panic!(":-("),
}
}
Ok(())
}
}
| ready | identifier_name |
node.rs | use std::convert::TryFrom;
use std::sync::Arc;
use std::time::Duration;
use bytes::Bytes;
use casbin::prelude::{Enforcer, MgmtApi};
use dashmap::DashMap;
use http::Uri;
use prost::Message;
use raft::prelude::*;
use raft::{Config, RawNode};
use slog::Logger;
use tokio::sync::mpsc::*;
use tokio::sync::RwLock;
use tokio::time::*;
use tonic::Request;
use crate::cluster::{self, InternalRaftMessage, RaftRequest};
use crate::network::{create_client, RpcClient};
use crate::storage::{MemStorage, Storage};
use crate::utils;
pub struct CasbinRaft {
pub id: u64,
pub node: RawNode<MemStorage>,
pub logger: Logger,
pub mailbox_sender: Sender<cluster::Message>,
pub mailbox_recv: Receiver<cluster::Message>,
pub conf_sender: Sender<ConfChange>,
pub conf_recv: Receiver<ConfChange>,
pub peers: Arc<DashMap<u64, RpcClient>>,
pub heartbeat: usize,
pub enforcer: Arc<RwLock<Enforcer>>,
}
impl CasbinRaft {
pub fn new(
id: u64,
cfg: Config,
logger: Logger,
peers: Arc<DashMap<u64, RpcClient>>,
mailbox_sender: Sender<cluster::Message>,
mailbox_recv: Receiver<cluster::Message>,
enforcer: Arc<RwLock<Enforcer>>,
) -> Result<Self, crate::Error> {
cfg.validate()?;
let storage = MemStorage::new();
let node = RawNode::new(&cfg, storage, &logger)?;
let (conf_sender, conf_recv) = channel(1024);
Ok(Self {
id,
node,
logger: logger.clone(),
mailbox_sender,
mailbox_recv,
conf_sender,
conf_recv,
heartbeat: cfg.heartbeat_tick,
peers,
enforcer,
})
}
pub fn tick(&mut self) -> bool {
self.node.tick()
}
pub fn propose_conf_change(
&mut self,
context: Vec<u8>,
cc: ConfChange,
) -> Result<(), raft::Error> {
Ok(self.node.propose_conf_change(context, cc)?)
}
pub fn become_leader(&mut self) {
self.node.raft.raft_log.committed = 0;
self.node.raft.become_candidate();
self.node.raft.become_leader();
}
fn set_hard_state(
&mut self,
commit: u64,
term: u64,
) -> Result<(), crate::error::Error> {
self.node.raft.mut_store().set_hard_state(commit, term);
Ok(())
}
#[allow(irrefutable_let_patterns)]
pub async fn run(
mut self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
while let _ = interval(Duration::from_millis(self.heartbeat as u64))
.tick()
.await
{
let msg = match timeout(Duration::from_millis(100), self.mailbox_recv.recv())
.await
{
Ok(Some(msg)) => Some(msg),
Ok(None) => None,
Err(_) => None,
};
if let Some(msg) = msg |
match timeout(Duration::from_millis(100), self.conf_recv.recv()).await {
Ok(Some(cc)) => {
let ccc = cc.clone();
let state = self.node.apply_conf_change(&cc)?;
self.node.mut_store().set_conf_state(state);
let p = self.peers.clone();
let logger = self.logger.clone();
tokio::spawn(async move {
let uri = Uri::try_from(&ccc.context[..]).unwrap();
let client: RpcClient =
create_client(uri.clone(), Some(logger.clone()))
.await
.unwrap();
p.insert(ccc.node_id, client);
slog::info!(
logger,
"Added client: {:?} - {:?}",
ccc.node_id,
&uri
);
});
}
Ok(None) => (),
Err(_) => (),
};
if self.node.has_ready() {
slog::info!(self.logger, "I'm ready!");
self.ready().await?;
}
self.node.tick();
}
Ok(())
}
pub async fn ready(
&mut self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
let mut ready = self.node.ready();
let is_leader = self.node.raft.leader_id == self.node.raft.id;
slog::info!(
self.logger,
"Leader ID: {}, Node ID: {}",
self.node.raft.leader_id,
self.node.raft.id
);
slog::info!(self.logger, "Am I leader?: {}", is_leader);
if !Snapshot::is_empty(ready.snapshot()) {
let snap = ready.snapshot().clone();
slog::info!(self.logger, "Got a snap: {:?}", snap);
self.node.mut_store().apply_snapshot(snap)?;
}
if !ready.entries().is_empty() {
let entries = ready
.entries()
.iter()
.cloned()
.filter(|e| !e.get_data().is_empty())
.collect::<Vec<Entry>>();
slog::info!(self.logger, "Entries?: {}", entries.len());
self.node.mut_store().append(&entries)?;
}
if let Some(hs) = ready.hs() {
slog::info!(self.logger, "HS?: {:?}", hs);
self.node.mut_store().set_hard_state(hs.commit, hs.term);
// self.node.mut_store().state.hard_state = (*hs).clone();
// self.node.mut_store().commit()?;
}
for mut msg in ready.messages.drain(..) {
slog::info!(self.logger, "LOGMSG==={:?}", msg);
let to = msg.to;
msg.from = self.id;
msg.log_term = self.node.store().hard_state().term;
msg.commit = self.node.store().hard_state().commit;
if let Some(client) = self.peers.get(&to) {
let mut msg_bytes = vec![];
msg.encode(&mut msg_bytes).unwrap();
let req = Request::new(RaftRequest {
tpe: 0,
message: msg_bytes,
});
let req = client.clone().raft(req).await?;
slog::info!(self.logger, "RESP={:?}", req);
}
self.append_entries(&msg.entries).await?;
}
if let Some(committed_entries) = ready.committed_entries.take() {
for entry in committed_entries.clone() {
slog::info!(self.logger, "Committing: {:?}", entry);
if entry.data.is_empty() {
// From new elected leaders.
continue;
}
let mut internal_raft_message = InternalRaftMessage::default();
internal_raft_message
.merge(Bytes::from(entry.data.clone()))
.unwrap();
if let Err(error) = self.apply(internal_raft_message) {
slog::error!(self.logger, "Unable to apply entry. {:?}", error);
// TODO: return an error to the user
}
}
if let Some(entry) = committed_entries.last() {
self.set_hard_state(entry.index, entry.term)?;
}
}
self.node.advance(ready);
Ok(())
}
pub fn propose(
&mut self,
ctx: Vec<u8>,
entry: Vec<u8>,
) -> Result<(), Box<dyn std::error::Error>> {
Ok(self.node.propose(ctx, entry)?)
}
pub async fn send(&mut self, msg: cluster::Message) -> Result<(), crate::Error> {
slog::info!(self.logger, "SEND = {:?}", msg);
self.mailbox_sender.send(msg).await.unwrap();
Ok(())
}
pub async fn append_entries(
&mut self,
entries: &[Entry],
) -> Result<(), crate::Error> {
for entry in entries {
if entry.data.is_empty() {
continue;
}
slog::info!(self.logger, "APPEND={:?}", entry);
match EntryType::from_i32(entry.entry_type) {
Some(EntryType::EntryConfChange) => {
let mut cc = ConfChange::default();
cc.merge(Bytes::from(entry.data.clone()))?;
let cs = self.node.apply_conf_change(&cc)?;
self.node.mut_store().set_conf_state(cs);
}
Some(EntryType::EntryNormal) => {
let mut e = Entry::default();
e.merge(Bytes::from(entry.data.clone()))?;
self.node.mut_store().append(&[e])?;
}
Some(EntryType::EntryConfChangeV2) => panic!("Conf2"),
None => panic!(":-("),
}
}
Ok(())
}
pub fn apply(&mut self, request: InternalRaftMessage) -> Result<(), crate::Error> {
if let Some(policy_request) = request.policy {
let op = utils::string_to_static_str(policy_request.op.to_lowercase());
// self.db.insert(put.key, put.value)?;
match op {
"add" => {
let cloned_enforcer = self.enforcer.clone();
let p_type = "p".to_string();
let policy = policy_request.params;
Box::pin(async move {
let mut lock = cloned_enforcer.write().await;
lock.add_named_policy(&p_type, policy).await.unwrap();
});
}
"remove" => {
let cloned_enforcer = self.enforcer.clone();
let policy = policy_request.params;
Box::pin(async move {
let mut lock = cloned_enforcer.write().await;
lock.remove_policy(policy).await.unwrap();
});
}
_ => panic!(":-("),
}
}
Ok(())
}
}
| {
slog::info!(self.logger, "Inbound raft message: {:?}", msg);
self.node.step(msg.into())?;
} | conditional_block |
node.rs | use std::convert::TryFrom;
use std::sync::Arc;
use std::time::Duration;
use bytes::Bytes;
use casbin::prelude::{Enforcer, MgmtApi};
use dashmap::DashMap;
use http::Uri;
use prost::Message;
use raft::prelude::*;
use raft::{Config, RawNode};
use slog::Logger;
use tokio::sync::mpsc::*;
use tokio::sync::RwLock;
use tokio::time::*;
use tonic::Request;
use crate::cluster::{self, InternalRaftMessage, RaftRequest};
use crate::network::{create_client, RpcClient};
use crate::storage::{MemStorage, Storage};
use crate::utils;
pub struct CasbinRaft {
pub id: u64,
pub node: RawNode<MemStorage>,
pub logger: Logger,
pub mailbox_sender: Sender<cluster::Message>,
pub mailbox_recv: Receiver<cluster::Message>,
pub conf_sender: Sender<ConfChange>,
pub conf_recv: Receiver<ConfChange>,
pub peers: Arc<DashMap<u64, RpcClient>>,
pub heartbeat: usize,
pub enforcer: Arc<RwLock<Enforcer>>,
}
impl CasbinRaft {
pub fn new(
id: u64,
cfg: Config,
logger: Logger,
peers: Arc<DashMap<u64, RpcClient>>,
mailbox_sender: Sender<cluster::Message>,
mailbox_recv: Receiver<cluster::Message>,
enforcer: Arc<RwLock<Enforcer>>,
) -> Result<Self, crate::Error> {
cfg.validate()?;
let storage = MemStorage::new();
let node = RawNode::new(&cfg, storage, &logger)?;
let (conf_sender, conf_recv) = channel(1024);
Ok(Self {
id,
node,
logger: logger.clone(),
mailbox_sender,
mailbox_recv,
conf_sender,
conf_recv,
heartbeat: cfg.heartbeat_tick,
peers,
enforcer,
})
}
pub fn tick(&mut self) -> bool {
self.node.tick()
}
pub fn propose_conf_change(
&mut self,
context: Vec<u8>,
cc: ConfChange,
) -> Result<(), raft::Error> {
Ok(self.node.propose_conf_change(context, cc)?)
}
pub fn become_leader(&mut self) |
fn set_hard_state(
&mut self,
commit: u64,
term: u64,
) -> Result<(), crate::error::Error> {
self.node.raft.mut_store().set_hard_state(commit, term);
Ok(())
}
#[allow(irrefutable_let_patterns)]
pub async fn run(
mut self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
while let _ = interval(Duration::from_millis(self.heartbeat as u64))
.tick()
.await
{
let msg = match timeout(Duration::from_millis(100), self.mailbox_recv.recv())
.await
{
Ok(Some(msg)) => Some(msg),
Ok(None) => None,
Err(_) => None,
};
if let Some(msg) = msg {
slog::info!(self.logger, "Inbound raft message: {:?}", msg);
self.node.step(msg.into())?;
}
match timeout(Duration::from_millis(100), self.conf_recv.recv()).await {
Ok(Some(cc)) => {
let ccc = cc.clone();
let state = self.node.apply_conf_change(&cc)?;
self.node.mut_store().set_conf_state(state);
let p = self.peers.clone();
let logger = self.logger.clone();
tokio::spawn(async move {
let uri = Uri::try_from(&ccc.context[..]).unwrap();
let client: RpcClient =
create_client(uri.clone(), Some(logger.clone()))
.await
.unwrap();
p.insert(ccc.node_id, client);
slog::info!(
logger,
"Added client: {:?} - {:?}",
ccc.node_id,
&uri
);
});
}
Ok(None) => (),
Err(_) => (),
};
if self.node.has_ready() {
slog::info!(self.logger, "I'm ready!");
self.ready().await?;
}
self.node.tick();
}
Ok(())
}
pub async fn ready(
&mut self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
let mut ready = self.node.ready();
let is_leader = self.node.raft.leader_id == self.node.raft.id;
slog::info!(
self.logger,
"Leader ID: {}, Node ID: {}",
self.node.raft.leader_id,
self.node.raft.id
);
slog::info!(self.logger, "Am I leader?: {}", is_leader);
if !Snapshot::is_empty(ready.snapshot()) {
let snap = ready.snapshot().clone();
slog::info!(self.logger, "Got a snap: {:?}", snap);
self.node.mut_store().apply_snapshot(snap)?;
}
if !ready.entries().is_empty() {
let entries = ready
.entries()
.iter()
.cloned()
.filter(|e| !e.get_data().is_empty())
.collect::<Vec<Entry>>();
slog::info!(self.logger, "Entries?: {}", entries.len());
self.node.mut_store().append(&entries)?;
}
if let Some(hs) = ready.hs() {
slog::info!(self.logger, "HS?: {:?}", hs);
self.node.mut_store().set_hard_state(hs.commit, hs.term);
// self.node.mut_store().state.hard_state = (*hs).clone();
// self.node.mut_store().commit()?;
}
for mut msg in ready.messages.drain(..) {
slog::info!(self.logger, "LOGMSG==={:?}", msg);
let to = msg.to;
msg.from = self.id;
msg.log_term = self.node.store().hard_state().term;
msg.commit = self.node.store().hard_state().commit;
if let Some(client) = self.peers.get(&to) {
let mut msg_bytes = vec![];
msg.encode(&mut msg_bytes).unwrap();
let req = Request::new(RaftRequest {
tpe: 0,
message: msg_bytes,
});
let req = client.clone().raft(req).await?;
slog::info!(self.logger, "RESP={:?}", req);
}
self.append_entries(&msg.entries).await?;
}
if let Some(committed_entries) = ready.committed_entries.take() {
for entry in committed_entries.clone() {
slog::info!(self.logger, "Committing: {:?}", entry);
if entry.data.is_empty() {
// From new elected leaders.
continue;
}
let mut internal_raft_message = InternalRaftMessage::default();
internal_raft_message
.merge(Bytes::from(entry.data.clone()))
.unwrap();
if let Err(error) = self.apply(internal_raft_message) {
slog::error!(self.logger, "Unable to apply entry. {:?}", error);
// TODO: return an error to the user
}
}
if let Some(entry) = committed_entries.last() {
self.set_hard_state(entry.index, entry.term)?;
}
}
self.node.advance(ready);
Ok(())
}
pub fn propose(
&mut self,
ctx: Vec<u8>,
entry: Vec<u8>,
) -> Result<(), Box<dyn std::error::Error>> {
Ok(self.node.propose(ctx, entry)?)
}
pub async fn send(&mut self, msg: cluster::Message) -> Result<(), crate::Error> {
slog::info!(self.logger, "SEND = {:?}", msg);
self.mailbox_sender.send(msg).await.unwrap();
Ok(())
}
pub async fn append_entries(
&mut self,
entries: &[Entry],
) -> Result<(), crate::Error> {
for entry in entries {
if entry.data.is_empty() {
continue;
}
slog::info!(self.logger, "APPEND={:?}", entry);
match EntryType::from_i32(entry.entry_type) {
Some(EntryType::EntryConfChange) => {
let mut cc = ConfChange::default();
cc.merge(Bytes::from(entry.data.clone()))?;
let cs = self.node.apply_conf_change(&cc)?;
self.node.mut_store().set_conf_state(cs);
}
Some(EntryType::EntryNormal) => {
let mut e = Entry::default();
e.merge(Bytes::from(entry.data.clone()))?;
self.node.mut_store().append(&[e])?;
}
Some(EntryType::EntryConfChangeV2) => panic!("Conf2"),
None => panic!(":-("),
}
}
Ok(())
}
pub fn apply(&mut self, request: InternalRaftMessage) -> Result<(), crate::Error> {
if let Some(policy_request) = request.policy {
let op = utils::string_to_static_str(policy_request.op.to_lowercase());
// self.db.insert(put.key, put.value)?;
match op {
"add" => {
let cloned_enforcer = self.enforcer.clone();
let p_type = "p".to_string();
let policy = policy_request.params;
Box::pin(async move {
let mut lock = cloned_enforcer.write().await;
lock.add_named_policy(&p_type, policy).await.unwrap();
});
}
"remove" => {
let cloned_enforcer = self.enforcer.clone();
let policy = policy_request.params;
Box::pin(async move {
let mut lock = cloned_enforcer.write().await;
lock.remove_policy(policy).await.unwrap();
});
}
_ => panic!(":-("),
}
}
Ok(())
}
}
| {
self.node.raft.raft_log.committed = 0;
self.node.raft.become_candidate();
self.node.raft.become_leader();
} | identifier_body |
node.rs | use std::convert::TryFrom;
use std::sync::Arc;
use std::time::Duration;
use bytes::Bytes;
use casbin::prelude::{Enforcer, MgmtApi};
use dashmap::DashMap;
use http::Uri;
use prost::Message;
use raft::prelude::*;
use raft::{Config, RawNode};
use slog::Logger;
use tokio::sync::mpsc::*;
use tokio::sync::RwLock;
use tokio::time::*;
use tonic::Request;
use crate::cluster::{self, InternalRaftMessage, RaftRequest};
use crate::network::{create_client, RpcClient};
use crate::storage::{MemStorage, Storage};
use crate::utils;
pub struct CasbinRaft {
pub id: u64,
pub node: RawNode<MemStorage>,
pub logger: Logger,
pub mailbox_sender: Sender<cluster::Message>,
pub mailbox_recv: Receiver<cluster::Message>,
pub conf_sender: Sender<ConfChange>,
pub conf_recv: Receiver<ConfChange>,
pub peers: Arc<DashMap<u64, RpcClient>>,
pub heartbeat: usize,
pub enforcer: Arc<RwLock<Enforcer>>,
}
impl CasbinRaft {
pub fn new(
id: u64,
cfg: Config,
logger: Logger,
peers: Arc<DashMap<u64, RpcClient>>,
mailbox_sender: Sender<cluster::Message>,
mailbox_recv: Receiver<cluster::Message>,
enforcer: Arc<RwLock<Enforcer>>,
) -> Result<Self, crate::Error> {
cfg.validate()?;
let storage = MemStorage::new();
let node = RawNode::new(&cfg, storage, &logger)?;
let (conf_sender, conf_recv) = channel(1024);
Ok(Self {
id,
node,
logger: logger.clone(),
mailbox_sender,
mailbox_recv,
conf_sender,
conf_recv,
heartbeat: cfg.heartbeat_tick,
peers,
enforcer,
})
}
pub fn tick(&mut self) -> bool {
self.node.tick()
}
pub fn propose_conf_change(
&mut self,
context: Vec<u8>,
cc: ConfChange,
) -> Result<(), raft::Error> {
Ok(self.node.propose_conf_change(context, cc)?)
}
pub fn become_leader(&mut self) {
self.node.raft.raft_log.committed = 0;
self.node.raft.become_candidate();
self.node.raft.become_leader();
}
fn set_hard_state(
&mut self,
commit: u64,
term: u64,
) -> Result<(), crate::error::Error> {
self.node.raft.mut_store().set_hard_state(commit, term);
Ok(())
}
#[allow(irrefutable_let_patterns)]
pub async fn run(
mut self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
while let _ = interval(Duration::from_millis(self.heartbeat as u64))
.tick()
.await
{
let msg = match timeout(Duration::from_millis(100), self.mailbox_recv.recv())
.await
{
Ok(Some(msg)) => Some(msg),
Ok(None) => None,
Err(_) => None,
};
if let Some(msg) = msg {
slog::info!(self.logger, "Inbound raft message: {:?}", msg);
self.node.step(msg.into())?;
}
match timeout(Duration::from_millis(100), self.conf_recv.recv()).await {
Ok(Some(cc)) => {
let ccc = cc.clone();
let state = self.node.apply_conf_change(&cc)?;
self.node.mut_store().set_conf_state(state);
let p = self.peers.clone();
let logger = self.logger.clone();
tokio::spawn(async move {
let uri = Uri::try_from(&ccc.context[..]).unwrap();
let client: RpcClient =
create_client(uri.clone(), Some(logger.clone()))
.await
.unwrap();
p.insert(ccc.node_id, client);
slog::info!(
logger,
"Added client: {:?} - {:?}",
ccc.node_id,
&uri
);
});
}
Ok(None) => (),
Err(_) => (),
};
if self.node.has_ready() {
slog::info!(self.logger, "I'm ready!");
self.ready().await?;
}
self.node.tick();
}
Ok(())
}
pub async fn ready(
&mut self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
let mut ready = self.node.ready();
let is_leader = self.node.raft.leader_id == self.node.raft.id;
slog::info!(
self.logger,
"Leader ID: {}, Node ID: {}",
self.node.raft.leader_id,
self.node.raft.id
);
slog::info!(self.logger, "Am I leader?: {}", is_leader);
if !Snapshot::is_empty(ready.snapshot()) {
let snap = ready.snapshot().clone();
slog::info!(self.logger, "Got a snap: {:?}", snap);
self.node.mut_store().apply_snapshot(snap)?;
}
if !ready.entries().is_empty() {
let entries = ready
.entries()
.iter()
.cloned()
.filter(|e| !e.get_data().is_empty())
.collect::<Vec<Entry>>();
slog::info!(self.logger, "Entries?: {}", entries.len());
self.node.mut_store().append(&entries)?;
}
if let Some(hs) = ready.hs() {
slog::info!(self.logger, "HS?: {:?}", hs);
self.node.mut_store().set_hard_state(hs.commit, hs.term);
// self.node.mut_store().state.hard_state = (*hs).clone();
// self.node.mut_store().commit()?;
}
for mut msg in ready.messages.drain(..) {
slog::info!(self.logger, "LOGMSG==={:?}", msg);
let to = msg.to;
msg.from = self.id;
msg.log_term = self.node.store().hard_state().term;
msg.commit = self.node.store().hard_state().commit;
if let Some(client) = self.peers.get(&to) {
let mut msg_bytes = vec![];
msg.encode(&mut msg_bytes).unwrap();
let req = Request::new(RaftRequest {
tpe: 0,
message: msg_bytes,
});
let req = client.clone().raft(req).await?;
slog::info!(self.logger, "RESP={:?}", req);
}
self.append_entries(&msg.entries).await?;
}
if let Some(committed_entries) = ready.committed_entries.take() {
for entry in committed_entries.clone() {
slog::info!(self.logger, "Committing: {:?}", entry);
if entry.data.is_empty() {
// From new elected leaders.
continue;
}
| internal_raft_message
.merge(Bytes::from(entry.data.clone()))
.unwrap();
if let Err(error) = self.apply(internal_raft_message) {
slog::error!(self.logger, "Unable to apply entry. {:?}", error);
// TODO: return an error to the user
}
}
if let Some(entry) = committed_entries.last() {
self.set_hard_state(entry.index, entry.term)?;
}
}
self.node.advance(ready);
Ok(())
}
pub fn propose(
&mut self,
ctx: Vec<u8>,
entry: Vec<u8>,
) -> Result<(), Box<dyn std::error::Error>> {
Ok(self.node.propose(ctx, entry)?)
}
pub async fn send(&mut self, msg: cluster::Message) -> Result<(), crate::Error> {
slog::info!(self.logger, "SEND = {:?}", msg);
self.mailbox_sender.send(msg).await.unwrap();
Ok(())
}
pub async fn append_entries(
&mut self,
entries: &[Entry],
) -> Result<(), crate::Error> {
for entry in entries {
if entry.data.is_empty() {
continue;
}
slog::info!(self.logger, "APPEND={:?}", entry);
match EntryType::from_i32(entry.entry_type) {
Some(EntryType::EntryConfChange) => {
let mut cc = ConfChange::default();
cc.merge(Bytes::from(entry.data.clone()))?;
let cs = self.node.apply_conf_change(&cc)?;
self.node.mut_store().set_conf_state(cs);
}
Some(EntryType::EntryNormal) => {
let mut e = Entry::default();
e.merge(Bytes::from(entry.data.clone()))?;
self.node.mut_store().append(&[e])?;
}
Some(EntryType::EntryConfChangeV2) => panic!("Conf2"),
None => panic!(":-("),
}
}
Ok(())
}
pub fn apply(&mut self, request: InternalRaftMessage) -> Result<(), crate::Error> {
if let Some(policy_request) = request.policy {
let op = utils::string_to_static_str(policy_request.op.to_lowercase());
// self.db.insert(put.key, put.value)?;
match op {
"add" => {
let cloned_enforcer = self.enforcer.clone();
let p_type = "p".to_string();
let policy = policy_request.params;
Box::pin(async move {
let mut lock = cloned_enforcer.write().await;
lock.add_named_policy(&p_type, policy).await.unwrap();
});
}
"remove" => {
let cloned_enforcer = self.enforcer.clone();
let policy = policy_request.params;
Box::pin(async move {
let mut lock = cloned_enforcer.write().await;
lock.remove_policy(policy).await.unwrap();
});
}
_ => panic!(":-("),
}
}
Ok(())
}
} | let mut internal_raft_message = InternalRaftMessage::default(); | random_line_split |
script.py | # -*- coding: utf-8 -*-
##############################################################################
# Visión por Computador
# Trabajo 3: Indexación y recuperación de imágenes.
# @author Álvaro Fernández García
##############################################################################
import numpy as np
import cv2
import auxFunc as axf
import matplotlib.pyplot as plt
import pickle
# Muestra una imgen en pantalla. La imagen se recibe como una matriz:
def _showImage(img, title='Imagen'):
cv2.imshow(title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Concatena varias imágenes en una sola:
def PintaMI( | tle):
assert len(vim) > 0
if len(vim) == 1:
finalImg = vim
else:
finalImg = vim[0]
vim.pop(0)
for img in vim:
finalImg = cv2.hconcat((finalImg, img))
_showImage(finalImg, title)
return finalImg
# Muestra en una imagen 10 parches de 24x24 pixeles:
def dibuja10parches(parches):
f, ax = plt.subplots(2,5)
for i,a in enumerate(ax.flatten()):
a.imshow(parches[i], 'gray')
a.set_xticks([])
a.set_yticks([])
plt.tight_layout()
plt.show()
#######################################################################################
# EJERCICIO 1:
#######################################################################################
# 1 .- Emparejamiento de descriptores [4 puntos]
# * Mirar las imágenes en imagenesIR.rar y elegir parejas de imágenes
# que tengan partes de escena comunes. Haciendo uso de una máscara
# binaria o de las funciones extractRegion() y clickAndDraw(), seleccionar
# una región en la primera imagen que esté presente en la segunda imagen.
# Para ello solo hay que fijar los vértices de un polígono que contenga
# a la región.
# * Extraiga los puntos SIFT contenidos en la región seleccionada de la
# primera imagen y calcule las correspondencias con todos los puntos
# SIFT de la segunda imagen (ayuda: use el concepto de máscara con
# el parámetro mask).
# * Pinte las correspondencias encontrados sobre las imágenes.
# * Jugar con distintas parejas de imágenes, valorar las correspondencias
# correctas obtenidas y extraer conclusiones respecto a la utilidad de
# esta aproximación de recuperación de regiones/objetos de interés a
# partir de descriptores de una región.
def Ejercicio1():
# Declaramos los objetos necesarios:
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=True)
# Crear las parejas de imagenes:
parejas = [
(cv2.imread('imagenes/55.png', 1), cv2.imread('imagenes/59.png', 1)),
(cv2.imread('imagenes/229.png', 1), cv2.imread('imagenes/248.png', 1)),
(cv2.imread('imagenes/71.png', 1), cv2.imread('imagenes/88.png', 1))
]
for par in parejas:
img1 = par[0]
img2 = par[1]
# Crear la máscara de la región extraída:
refPts = np.array(axf.extractRegion(img1))
mask = np.zeros((img1.shape[0], img1.shape[1]), np.uint8)
mask = cv2.fillConvexPoly(mask, refPts, (255,255,255))
# Extraer los puntos y descriptores:
kp1, des1 = sift.detectAndCompute(img1, mask)
kp2, des2 = sift.detectAndCompute(img2, None)
# Extraer las correspondencias:
matches = bf.match(des1, des2)
# Dibujarlas:
out = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2)
_showImage(out)
#######################################################################################
# EJERCICIO 2:
#######################################################################################
# 2. Recuperación de imágenes [4 puntos]
# • Implementar un modelo de índice invertido + bolsa de palabras para
# las imágenes dadas en imagenesIR.rar usando el vocabulario dado
# en kmeanscenters2000.pkl.
# • Verificar que el modelo construido para cada imagen permite recu-
# perar imágenes de la misma escena cuando la comparamos al resto
# de imágenes de la base de datos.
# • Elegir dos imágenes-pregunta en las se ponga de manifiesto que el
# modelo usado es realmente muy efectivo para extraer sus semejantes y
# elegir otra imagen-pregunta en la que se muestre que el modelo puede
# realmente fallar. Para ello muestre las cinco imágenes más semejantes
# de cada una de las imágenes-pregunta seleccionadas usando como
# medida de distancia el producto escalar normalizado de sus vectores
# de bolsa de palabras.
# • Explicar qué conclusiones obtiene de este experimento.
# Función que construye el índice invertido y la bolsa de palabras:
def Ejercicio2():
# Cargar los centroides y crear el detector y matcher:
dicc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=False)
# Inicializar el índice invertido:
indice_invert = dict()
for i in range(dicc.shape[0]):
indice_invert[i] = set()
# Inicializar la bolsa de palabras:
bolsa_palabras = dict()
for i in range(441):
bolsa_palabras[i] = np.zeros(dicc.shape[0], np.int)
# Para cada imagen:
for i in range(441):
# Leemos la imagen
img_name = 'imagenes/' + str(i) + '.png'
img = cv2.imread(img_name, 1)
# Obtener los descriptores:
des = sift.detectAndCompute(img, None)[1]
# Normalizar los descriptores:
des_normalized = []
for d in des:
norm = np.linalg.norm(d)
# Ignoramos los descriptores nulos:
if norm != 0:
des_normalized.append(d * (1/norm))
des = np.array(des_normalized)
# Extraer los matches:
matches = bf.match(des, dicc)
# Para cada match:
for m in matches:
# Actualizar el indice invertido:
indice_invert[m.trainIdx].add(i)
# Actualizar los histogramas de la bolsa de palabras:
bolsa_palabras[i][m.trainIdx] += 1
# Normalizar los histogramas:
for i in range(441):
bolsa_palabras[i] = bolsa_palabras[i] / np.linalg.norm(bolsa_palabras[i])
return indice_invert, bolsa_palabras
# Dada una imagen img, y la bolsa de palabras calculada con la función anterior,
# muestra las 5 imágenes más cercanas:
def get_5_nearest_images(img, bolsa_palabras):
# Paso 1: obtener la bolsa de palabras de la imagen:
# Declaramos los objetos necesarios:
dicc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=False)
# Inicializar el histograma:
q = np.zeros(dicc.shape[0])
# Extraer los descriptores de la imagen:
des = sift.detectAndCompute(img, None)[1]
# Normalizar los descriptores:
des_normalized = []
for d in des:
norm = np.linalg.norm(d)
# Ignoramos los descriptores nulos:
if norm != 0:
des_normalized.append(d * (1/norm))
des = np.array(des_normalized)
# Extraer los matches:
matches = bf.match(des, dicc)
# Actualizar el histograma:
for m in matches:
q[m.trainIdx] += 1
# Normalizarlo:
q = q / np.linalg.norm(q)
# Paso 2: Obtener las 5 imágenes más cercanas:
# para ello utilizamos la similaridad:
def sim(I, J):
# Como están normalizados no es necesario dividir entre el producto de las normas:
return (I * J).sum()
# Calcular las similaridades:
similaridades = []
for i in range(len(bolsa_palabras)):
similaridades.append((i, sim(bolsa_palabras[i], q)))
# Ordenamos las similaridades:
similaridades = sorted(similaridades, key = lambda x:x[1], reverse=True)
# Mostrar las 5 imágenes más cercanas: (la posición 0 es la propia imagen, por eso nos la saltamos)
for i in range(1,6):
ima = cv2.imread('imagenes/' + str(similaridades[i][0]) + '.png', 1)
PintaMI([img, ima], "{}ª imagen más cercana: Similaridad = {:.4f}".format(i, similaridades[i][1]))
#######################################################################################
# EJERCICIO 3:
#######################################################################################
# Visualización del vocabulario [3 puntos]
# • Usando las imágenes dadas en imagenesIR.rar se han extraido 600
# regiones de cada imagen de forma directa y se han re-escalado en
# parches de 24x24 píxeles. A partir de ellas se ha construido un
# vocabulario de 5.000 palabras usando k-means. Los ficheros con los datos
# son descriptorsAndpatches2000.pkl (descriptores de las regiones
# y los parches extraídos) y kmeanscenters2000.pkl (vocabulario
# extraído).
# • Elegir al menos dos palabras visuales diferentes y visualizar las
# regiones imagen de los 10 parches más cercanos de cada palabra visual,
# de forma que se muestre el contenido visual que codifican (mejor en
# niveles de gris).
# • Explicar si lo que se ha obtenido es realmente lo esperado en términos
# de cercanía visual de los parches.
def Ejercicio3(vocabulario, words):
# Cargamos los archivos proporcionados:
dicc = vocabulario
des, patches = axf.loadAux('descriptorsAndpatches2000.pkl', True)
# Declaramos el Matcher:
bf = cv2.BFMatcher(crossCheck=False)
# Obtenemos los 10 matches más cercanos:
matches = bf.knnMatch(dicc, des, k=10)
# Mostrar los ejemplos:
# en los dos primeros matches los parches son muy parecidos (casi idénticos)
# en el tercero, hay ligeras variaciones
# en el último son distintos:
for i in words:
print(i)
myPatches = []
for m in matches[i]:
# Convertirlo a escala de grises y hacer el reshape del parche:
img = cv2.cvtColor(patches[m.trainIdx], cv2.COLOR_BGR2GRAY).reshape(24,24)
myPatches.append(img)
dibuja10parches(myPatches)
#######################################################################################
# BONUS:
#######################################################################################
# Ejercicio 2: Creación de un vocabulario [2 puntos]: Calcular desde todas las
# imagenesIR.rar los ficheros dados en el Ejercicio 2 usando los mismos parámetros.
# Aplicar con el nuevo diccionario lo pedido con el Ejercicio 3.
def buildVocabulary():
# Crear el detector SIFT:
sift = cv2.xfeatures2d.SIFT_create(600)
img = cv2.imread('imagenes/0.png', 1)
des = sift.detectAndCompute(img, None)[1]
descriptors = des
# Para cada imagen:
for i in range(1, 441):
# Leemos la imagen
img_name = 'imagenes/' + str(i) + '.png'
img = cv2.imread(img_name, 1)
# Obtener los descriptores:
des = sift.detectAndCompute(img, None)[1]
# Añadirlos: cada fila de la matriz es un descriptor:
descriptors = np.vstack((descriptors, des))
# Lo convertimos a float 32 (lo requiere el método kmeans):
descriptors = np.float32(descriptors)
# Realizamos el clustering para extraer el vocabulario:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
compactness, labels, centers = cv2.kmeans(descriptors, 2000, None, criteria, 5, cv2.KMEANS_PP_CENTERS)
# Descomentar esta línea para guardar el vocabulario en un fichero:
# pickle.dump(centers, open("myVocabulary.pkl", "wb"))
return centers
#######################################################################################
# PRUEBA DE LAS FUNCIONES:
#######################################################################################
Ejercicio1()
# Ejercicio 2:
invert, bag = Ejercicio2()
# Casos favorables:
img = cv2.imread('imagenes/89.png',1 )
get_5_nearest_images(img, bag)
img = cv2.imread('imagenes/356.png',1 )
get_5_nearest_images(img, bag)
# Caso desfavorable:
img = cv2.imread('imagenes/78.png',1 )
get_5_nearest_images(img, bag)
voc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
# El segundo parámetro son las palabras a visualizar:
Ejercicio3(voc, (76, 32, 36, 80))
#######################################################################################
# PRUEBA DEL BONUS:
#######################################################################################
# Descomentar está línea y comentar la siguiente para volver a calcular el vocabulario:
# voc = buildVocabulary()
voc = pickle.load(open("myVocabulary.pkl", "rb"))
Ejercicio3(voc, (54, 96, 71, 100))
| vim, ti | identifier_name |
script.py | # -*- coding: utf-8 -*-
##############################################################################
# Visión por Computador
# Trabajo 3: Indexación y recuperación de imágenes.
# @author Álvaro Fernández García
##############################################################################
import numpy as np
import cv2
import auxFunc as axf
import matplotlib.pyplot as plt
import pickle
# Muestra una imgen en pantalla. La imagen se recibe como una matriz:
def _showImage(img, title='Imagen'):
cv2.imshow(title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Concatena varias imágenes en una sola:
def PintaMI(vim, title):
assert len(vim) > 0
if len(vim) == 1:
finalImg = vim
else:
finalImg = vim[0]
vim.pop(0)
for img in vim:
finalImg = cv2.hconcat((finalImg, img))
_showImage(finalImg, title)
return finalImg
# Muestra en una imagen 10 parches de 24x24 pixeles:
def dibuja10parches(parches):
f, ax = plt.subplots(2,5)
for i,a in enumerate(ax.flatten()):
a.imshow(parches[i], 'gray')
a.set_xticks([])
a.set_yticks([])
plt.tight_layout()
plt.show()
#######################################################################################
# EJERCICIO 1:
#######################################################################################
# 1 .- Emparejamiento de descriptores [4 puntos]
# * Mirar las imágenes en imagenesIR.rar y elegir parejas de imágenes
# que tengan partes de escena comunes. Haciendo uso de una máscara
# binaria o de las funciones extractRegion() y clickAndDraw(), seleccionar
# una región en la primera imagen que esté presente en la segunda imagen.
# Para ello solo hay que fijar los vértices de un polígono que contenga
# a la región.
# * Extraiga los puntos SIFT contenidos en la región seleccionada de la
# primera imagen y calcule las correspondencias con todos los puntos
# SIFT de la segunda imagen (ayuda: use el concepto de máscara con
# el parámetro mask).
# * Pinte las correspondencias encontrados sobre las imágenes.
# * Jugar con distintas parejas de imágenes, valorar las correspondencias
# correctas obtenidas y extraer conclusiones respecto a la utilidad de
# esta aproximación de recuperación de regiones/objetos de interés a
# partir de descriptores de una región.
def Ejercicio1():
# Declaramos los objetos necesarios:
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=True)
# Crear las parejas de imagenes:
parejas = [
(cv2.imread('imagenes/55.png', 1), cv2.imread('imagenes/59.png', 1)),
(cv2.imread('imagenes/229.png', 1), cv2.imread('imagenes/248.png', 1)),
(cv2.imread('imagenes/71.png', 1), cv2.imread('imagenes/88.png', 1))
]
for par in parejas:
img1 = par[0]
img2 = par[1]
# Crear la máscara de la región extraída:
refPts = np.array(axf.extractRegion(img1))
mask = np.zeros((img1.shape[0], img1.shape[1]), np.uint8)
mask = cv2.fillConvexPoly(mask, refPts, (255,255,255))
# Extraer los puntos y descriptores:
kp1, des1 = sift.detectAndCompute(img1, mask)
kp2, des2 = sift.detectAndCompute(img2, None)
# Extraer las correspondencias:
matches = bf.match(des1, des2)
# Dibujarlas:
out = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2)
_showImage(out)
#######################################################################################
# EJERCICIO 2:
#######################################################################################
# 2. Recuperación de imágenes [4 puntos]
# • Implementar un modelo de índice invertido + bolsa de palabras para
# las imágenes dadas en imagenesIR.rar usando el vocabulario dado
# en kmeanscenters2000.pkl.
# • Verificar que el modelo construido para cada imagen permite recu-
# perar imágenes de la misma escena cuando la comparamos al resto
# de imágenes de la base de datos.
# • Elegir dos imágenes-pregunta en las se ponga de manifiesto que el
# modelo usado es realmente muy efectivo para extraer sus semejantes y
# elegir otra imagen-pregunta en la que se muestre que el modelo puede
# realmente fallar. Para ello muestre las cinco imágenes más semejantes
# de cada una de las imágenes-pregunta seleccionadas usando como
# medida de distancia el producto escalar normalizado de sus vectores
# de bolsa de palabras.
# • Explicar qué conclusiones obtiene de este experimento.
# Función que construye el índice invertido y la bolsa de palabras:
def Ejercicio2():
# Cargar los centroides y crear el detector y matcher:
dicc = axf.loadDictionary('kmeanscenters2000.pkl' | alculada con la función anterior,
# muestra las 5 imágenes más cercanas:
def get_5_nearest_images(img, bolsa_palabras):
# Paso 1: obtener la bolsa de palabras de la imagen:
# Declaramos los objetos necesarios:
dicc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=False)
# Inicializar el histograma:
q = np.zeros(dicc.shape[0])
# Extraer los descriptores de la imagen:
des = sift.detectAndCompute(img, None)[1]
# Normalizar los descriptores:
des_normalized = []
for d in des:
norm = np.linalg.norm(d)
# Ignoramos los descriptores nulos:
if norm != 0:
des_normalized.append(d * (1/norm))
des = np.array(des_normalized)
# Extraer los matches:
matches = bf.match(des, dicc)
# Actualizar el histograma:
for m in matches:
q[m.trainIdx] += 1
# Normalizarlo:
q = q / np.linalg.norm(q)
# Paso 2: Obtener las 5 imágenes más cercanas:
# para ello utilizamos la similaridad:
def sim(I, J):
# Como están normalizados no es necesario dividir entre el producto de las normas:
return (I * J).sum()
# Calcular las similaridades:
similaridades = []
for i in range(len(bolsa_palabras)):
similaridades.append((i, sim(bolsa_palabras[i], q)))
# Ordenamos las similaridades:
similaridades = sorted(similaridades, key = lambda x:x[1], reverse=True)
# Mostrar las 5 imágenes más cercanas: (la posición 0 es la propia imagen, por eso nos la saltamos)
for i in range(1,6):
ima = cv2.imread('imagenes/' + str(similaridades[i][0]) + '.png', 1)
PintaMI([img, ima], "{}ª imagen más cercana: Similaridad = {:.4f}".format(i, similaridades[i][1]))
#######################################################################################
# EJERCICIO 3:
#######################################################################################
# Visualización del vocabulario [3 puntos]
# • Usando las imágenes dadas en imagenesIR.rar se han extraido 600
# regiones de cada imagen de forma directa y se han re-escalado en
# parches de 24x24 píxeles. A partir de ellas se ha construido un
# vocabulario de 5.000 palabras usando k-means. Los ficheros con los datos
# son descriptorsAndpatches2000.pkl (descriptores de las regiones
# y los parches extraídos) y kmeanscenters2000.pkl (vocabulario
# extraído).
# • Elegir al menos dos palabras visuales diferentes y visualizar las
# regiones imagen de los 10 parches más cercanos de cada palabra visual,
# de forma que se muestre el contenido visual que codifican (mejor en
# niveles de gris).
# • Explicar si lo que se ha obtenido es realmente lo esperado en términos
# de cercanía visual de los parches.
def Ejercicio3(vocabulario, words):
# Cargamos los archivos proporcionados:
dicc = vocabulario
des, patches = axf.loadAux('descriptorsAndpatches2000.pkl', True)
# Declaramos el Matcher:
bf = cv2.BFMatcher(crossCheck=False)
# Obtenemos los 10 matches más cercanos:
matches = bf.knnMatch(dicc, des, k=10)
# Mostrar los ejemplos:
# en los dos primeros matches los parches son muy parecidos (casi idénticos)
# en el tercero, hay ligeras variaciones
# en el último son distintos:
for i in words:
print(i)
myPatches = []
for m in matches[i]:
# Convertirlo a escala de grises y hacer el reshape del parche:
img = cv2.cvtColor(patches[m.trainIdx], cv2.COLOR_BGR2GRAY).reshape(24,24)
myPatches.append(img)
dibuja10parches(myPatches)
#######################################################################################
# BONUS:
#######################################################################################
# Ejercicio 2: Creación de un vocabulario [2 puntos]: Calcular desde todas las
# imagenesIR.rar los ficheros dados en el Ejercicio 2 usando los mismos parámetros.
# Aplicar con el nuevo diccionario lo pedido con el Ejercicio 3.
def buildVocabulary():
# Crear el detector SIFT:
sift = cv2.xfeatures2d.SIFT_create(600)
img = cv2.imread('imagenes/0.png', 1)
des = sift.detectAndCompute(img, None)[1]
descriptors = des
# Para cada imagen:
for i in range(1, 441):
# Leemos la imagen
img_name = 'imagenes/' + str(i) + '.png'
img = cv2.imread(img_name, 1)
# Obtener los descriptores:
des = sift.detectAndCompute(img, None)[1]
# Añadirlos: cada fila de la matriz es un descriptor:
descriptors = np.vstack((descriptors, des))
# Lo convertimos a float 32 (lo requiere el método kmeans):
descriptors = np.float32(descriptors)
# Realizamos el clustering para extraer el vocabulario:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
compactness, labels, centers = cv2.kmeans(descriptors, 2000, None, criteria, 5, cv2.KMEANS_PP_CENTERS)
# Descomentar esta línea para guardar el vocabulario en un fichero:
# pickle.dump(centers, open("myVocabulary.pkl", "wb"))
return centers
#######################################################################################
# PRUEBA DE LAS FUNCIONES:
#######################################################################################
Ejercicio1()
# Ejercicio 2:
invert, bag = Ejercicio2()
# Casos favorables:
img = cv2.imread('imagenes/89.png',1 )
get_5_nearest_images(img, bag)
img = cv2.imread('imagenes/356.png',1 )
get_5_nearest_images(img, bag)
# Caso desfavorable:
img = cv2.imread('imagenes/78.png',1 )
get_5_nearest_images(img, bag)
voc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
# El segundo parámetro son las palabras a visualizar:
Ejercicio3(voc, (76, 32, 36, 80))
#######################################################################################
# PRUEBA DEL BONUS:
#######################################################################################
# Descomentar está línea y comentar la siguiente para volver a calcular el vocabulario:
# voc = buildVocabulary()
voc = pickle.load(open("myVocabulary.pkl", "rb"))
Ejercicio3(voc, (54, 96, 71, 100))
| )[2]
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=False)
# Inicializar el índice invertido:
indice_invert = dict()
for i in range(dicc.shape[0]):
indice_invert[i] = set()
# Inicializar la bolsa de palabras:
bolsa_palabras = dict()
for i in range(441):
bolsa_palabras[i] = np.zeros(dicc.shape[0], np.int)
# Para cada imagen:
for i in range(441):
# Leemos la imagen
img_name = 'imagenes/' + str(i) + '.png'
img = cv2.imread(img_name, 1)
# Obtener los descriptores:
des = sift.detectAndCompute(img, None)[1]
# Normalizar los descriptores:
des_normalized = []
for d in des:
norm = np.linalg.norm(d)
# Ignoramos los descriptores nulos:
if norm != 0:
des_normalized.append(d * (1/norm))
des = np.array(des_normalized)
# Extraer los matches:
matches = bf.match(des, dicc)
# Para cada match:
for m in matches:
# Actualizar el indice invertido:
indice_invert[m.trainIdx].add(i)
# Actualizar los histogramas de la bolsa de palabras:
bolsa_palabras[i][m.trainIdx] += 1
# Normalizar los histogramas:
for i in range(441):
bolsa_palabras[i] = bolsa_palabras[i] / np.linalg.norm(bolsa_palabras[i])
return indice_invert, bolsa_palabras
# Dada una imagen img, y la bolsa de palabras c | identifier_body |
script.py | # -*- coding: utf-8 -*-
##############################################################################
# Visión por Computador
# Trabajo 3: Indexación y recuperación de imágenes.
# @author Álvaro Fernández García
##############################################################################
import numpy as np
import cv2
import auxFunc as axf
import matplotlib.pyplot as plt
import pickle
# Muestra una imgen en pantalla. La imagen se recibe como una matriz:
def _showImage(img, title='Imagen'):
cv2.imshow(title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Concatena varias imágenes en una sola:
def PintaMI(vim, title):
assert len(vim) > 0
if len(vim) == 1:
finalImg = vim
else:
finalImg = vim[0]
vim.pop(0)
for img in vim:
finalImg = cv2.hconcat((finalImg, img))
_showImage(finalImg, title)
return finalImg
# Muestra en una imagen 10 parches de 24x24 pixeles:
def dibuja10parches(parches):
f, ax = plt.subplots(2,5)
for i,a in enumerate(ax.flatten()):
a.imshow(parches[i], 'gray')
a.set_xticks([])
a.set_yticks([])
plt.tight_layout()
plt.show()
#######################################################################################
# EJERCICIO 1:
#######################################################################################
# 1 .- Emparejamiento de descriptores [4 puntos]
# * Mirar las imágenes en imagenesIR.rar y elegir parejas de imágenes
# que tengan partes de escena comunes. Haciendo uso de una máscara
# binaria o de las funciones extractRegion() y clickAndDraw(), seleccionar
# una región en la primera imagen que esté presente en la segunda imagen.
# Para ello solo hay que fijar los vértices de un polígono que contenga
# a la región.
# * Extraiga los puntos SIFT contenidos en la región seleccionada de la
# primera imagen y calcule las correspondencias con todos los puntos
# SIFT de la segunda imagen (ayuda: use el concepto de máscara con
# el parámetro mask).
# * Pinte las correspondencias encontrados sobre las imágenes.
# * Jugar con distintas parejas de imágenes, valorar las correspondencias
# correctas obtenidas y extraer conclusiones respecto a la utilidad de
# esta aproximación de recuperación de regiones/objetos de interés a
# partir de descriptores de una región.
def Ejercicio1():
# Declaramos los objetos necesarios:
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=True)
# Crear las parejas de imagenes:
parejas = [
(cv2.imread('imagenes/55.png', 1), cv2.imread('imagenes/59.png', 1)),
(cv2.imread('imagenes/229.png', 1), cv2.imread('imagenes/248.png', 1)),
(cv2.imread('imagenes/71.png', 1), cv2.imread('imagenes/88.png', 1))
]
for par in parejas:
img1 = par[0]
img2 = par[1]
# Crear la máscara de la región extraída:
refPts = np.array(axf.extractRegion(img1))
mask = np.zeros((img1.shape[0], img1.shape[1]), np.uint8)
mask = cv2.fillConvexPoly(mask, refPts, (255,255,255))
# Extraer los puntos y descriptores:
kp1, des1 = sift.detectAndCompute(img1, mask)
kp2, des2 = sift.detectAndCompute(img2, None)
# Extraer las correspondencias:
matches = bf.match(des1, des2)
# Dibujarlas:
out = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2)
_showImage(out)
#######################################################################################
# EJERCICIO 2:
#######################################################################################
# 2. Recuperación de imágenes [4 puntos]
# • Implementar un modelo de índice invertido + bolsa de palabras para
# las imágenes dadas en imagenesIR.rar usando el vocabulario dado
# en kmeanscenters2000.pkl.
# • Verificar que el modelo construido para cada imagen permite recu-
# perar imágenes de la misma escena cuando la comparamos al resto
# de imágenes de la base de datos.
# • Elegir dos imágenes-pregunta en las se ponga de manifiesto que el
# modelo usado es realmente muy efectivo para extraer sus semejantes y
# elegir otra imagen-pregunta en la que se muestre que el modelo puede
# realmente fallar. Para ello muestre las cinco imágenes más semejantes
# de cada una de las imágenes-pregunta seleccionadas usando como
# medida de distancia el producto escalar normalizado de sus vectores
# de bolsa de palabras.
# • Explicar qué conclusiones obtiene de este experimento.
# Función que construye el índice invertido y la bolsa de palabras:
def Ejercicio2():
# Cargar los centroides y crear el detector y matcher:
dicc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=False)
# Inicializar el índice invertido:
indice_invert = dict()
for i in range(dicc.shape[0]):
indice_invert[i] = set()
# Inicializar la bolsa de palabras:
bolsa_palabras = dict()
for i in range(441):
bolsa_palabras[i] = np.zeros(dicc.shape[0], np.int)
# Para cada imagen:
for i in range(441):
# Leemos la imagen
img_name = 'imagenes/' + str(i) + '.png'
img = cv2.imread(img_name, 1)
# Obtener los descriptores:
des = sift.detectAndCompute(img, None)[1]
# Normalizar los descriptores:
des_normalized = []
for d in des:
norm = np.linalg.norm(d)
# Ignoramos los descriptores nulos:
if norm != 0:
des_normalized.append(d * (1/norm))
des = np.array(des_normalized)
# Extraer los matches:
matches = bf.match(des, dicc)
# Para cada match:
for m in matches:
# Actualizar el indice invertido:
indice_invert[m.trainIdx].add(i)
# Actualizar los histogramas de la bolsa de palabras:
bolsa_palabras[i][m.trainIdx] += 1
# Normalizar los histogramas:
for i in range(441):
bolsa_palabras[i] = bolsa_palabras[i] / np.linalg.norm(bolsa_palabras[i])
return indice_invert, bolsa_palabras
# Dada una imagen img, y la bolsa de palabras calculada con la función anterior,
# muestra las 5 imágenes más cercanas:
def get_5_nearest_images(img, bolsa_palabras):
# Paso 1: obtener la bolsa de palabras de la imagen:
# Declaramos los objetos necesarios:
dicc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=False)
# Inicializar el histograma:
q = np.zeros(dicc.shape[0])
# Extraer los descriptores de la imagen:
des = sift.detectAndCompute(img, None)[1]
# Normalizar los descriptores:
des_normalized = []
for d in des:
norm = np.linalg.norm(d)
# Ignoramos los descriptores nulos:
if norm != 0:
des_normalized.append(d * (1/norm))
des = np.array(des_normalized)
# Extraer los matches:
matches = bf.match(des, dicc)
# Actualizar el histograma:
for m in matches:
q[m.trainIdx] += 1
# Normalizarlo:
q = q / np.linalg.norm(q)
# Paso 2: Obtener las 5 imágenes más cercanas:
# para ello utilizamos la similaridad:
def sim(I, J):
# Como están normalizados no es necesario dividir entre el producto de las normas:
return (I * J).sum()
# Calcular las similaridades:
similaridades = []
for i in range(len(bolsa_palabras)):
similaridades.append((i, sim(bolsa_palabras[i], q)))
# Ordenamos las similaridades:
similaridades = sorted(similaridades, key = lambda x:x[1], reverse=True)
# Mostrar las 5 imágenes más cercanas: (la posición 0 es la propia imagen, por eso nos la saltamos)
for i in range(1,6):
ima = cv2.imread('imagenes/' + str(similaridades[i][0]) + '.png', 1)
PintaMI([img, ima], "{}ª imagen más cercana: Similaridad = {:.4f}".format(i, similaridades[i][1]))
#######################################################################################
# EJERCICIO 3:
#######################################################################################
# Visualización del vocabulario [3 puntos]
# • Usando las imágenes dadas en imagenesIR.rar se han extraido 600
# regiones de cada imagen de forma directa y se han re-escalado en
# parches de 24x24 píxeles. A partir de ellas se ha construido un
# vocabulario de 5.000 palabras usando k-means. Los ficheros con los datos
# son descriptorsAndpatches2000.pkl (descriptores de las regiones
# y los parches extraídos) y kmeanscenters2000.pkl (vocabulario
# extraído).
# • Elegir al menos dos palabras visuales diferentes y visualizar las
# regiones imagen de los 10 parches más cercanos de cada palabra visual,
# de forma que se muestre el contenido visual que codifican (mejor en
# niveles de gris).
# • Explicar si lo que se ha obtenido es realmente lo esperado en términos
# de cercanía visual de los parches.
def Ejercicio3(vocabulario, words):
# Cargamos los archivos proporcionados:
dicc = vocabulario
des, patches = axf.loadAux('descriptorsAndpatches2000.pkl', True)
# Declaramos el Matcher:
bf = cv2.BFMatcher(crossCheck=False)
# Obtenemos los 10 matches más cercanos:
matches = bf.knnMatch(dicc, des, k=10)
| for i in words:
print(i)
myPatches = []
for m in matches[i]:
# Convertirlo a escala de grises y hacer el reshape del parche:
img = cv2.cvtColor(patches[m.trainIdx], cv2.COLOR_BGR2GRAY).reshape(24,24)
myPatches.append(img)
dibuja10parches(myPatches)
#######################################################################################
# BONUS:
#######################################################################################
# Ejercicio 2: Creación de un vocabulario [2 puntos]: Calcular desde todas las
# imagenesIR.rar los ficheros dados en el Ejercicio 2 usando los mismos parámetros.
# Aplicar con el nuevo diccionario lo pedido con el Ejercicio 3.
def buildVocabulary():
# Crear el detector SIFT:
sift = cv2.xfeatures2d.SIFT_create(600)
img = cv2.imread('imagenes/0.png', 1)
des = sift.detectAndCompute(img, None)[1]
descriptors = des
# Para cada imagen:
for i in range(1, 441):
# Leemos la imagen
img_name = 'imagenes/' + str(i) + '.png'
img = cv2.imread(img_name, 1)
# Obtener los descriptores:
des = sift.detectAndCompute(img, None)[1]
# Añadirlos: cada fila de la matriz es un descriptor:
descriptors = np.vstack((descriptors, des))
# Lo convertimos a float 32 (lo requiere el método kmeans):
descriptors = np.float32(descriptors)
# Realizamos el clustering para extraer el vocabulario:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
compactness, labels, centers = cv2.kmeans(descriptors, 2000, None, criteria, 5, cv2.KMEANS_PP_CENTERS)
# Descomentar esta línea para guardar el vocabulario en un fichero:
# pickle.dump(centers, open("myVocabulary.pkl", "wb"))
return centers
#######################################################################################
# PRUEBA DE LAS FUNCIONES:
#######################################################################################
Ejercicio1()
# Ejercicio 2:
invert, bag = Ejercicio2()
# Casos favorables:
img = cv2.imread('imagenes/89.png',1 )
get_5_nearest_images(img, bag)
img = cv2.imread('imagenes/356.png',1 )
get_5_nearest_images(img, bag)
# Caso desfavorable:
img = cv2.imread('imagenes/78.png',1 )
get_5_nearest_images(img, bag)
voc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
# El segundo parámetro son las palabras a visualizar:
Ejercicio3(voc, (76, 32, 36, 80))
#######################################################################################
# PRUEBA DEL BONUS:
#######################################################################################
# Descomentar está línea y comentar la siguiente para volver a calcular el vocabulario:
# voc = buildVocabulary()
voc = pickle.load(open("myVocabulary.pkl", "rb"))
Ejercicio3(voc, (54, 96, 71, 100)) | # Mostrar los ejemplos:
# en los dos primeros matches los parches son muy parecidos (casi idénticos)
# en el tercero, hay ligeras variaciones
# en el último son distintos: | random_line_split |
script.py | # -*- coding: utf-8 -*-
##############################################################################
# Visión por Computador
# Trabajo 3: Indexación y recuperación de imágenes.
# @author Álvaro Fernández García
##############################################################################
import numpy as np
import cv2
import auxFunc as axf
import matplotlib.pyplot as plt
import pickle
# Muestra una imgen en pantalla. La imagen se recibe como una matriz:
def _showImage(img, title='Imagen'):
cv2.imshow(title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Concatena varias imágenes en una sola:
def PintaMI(vim, title):
assert len(vim) > 0
if len(vim) == 1:
finalImg = vim
else:
finalImg = vim[0]
vim.pop(0)
for img in vim:
finalImg = cv2.hconcat((finalImg, img))
_showImage(finalImg, title)
return finalImg
# Muestra en una imagen 10 parches de 24x24 pixeles:
def dibuja10parches(parches):
f, ax = plt.subplots(2,5)
for i,a in enumerate(ax.flatten()):
a.imshow(parches[i], 'gray')
a.set_xticks([])
a.set_yticks([])
plt.tight_layout()
plt.show()
#######################################################################################
# EJERCICIO 1:
#######################################################################################
# 1 .- Emparejamiento de descriptores [4 puntos]
# * Mirar las imágenes en imagenesIR.rar y elegir parejas de imágenes
# que tengan partes de escena comunes. Haciendo uso de una máscara
# binaria o de las funciones extractRegion() y clickAndDraw(), seleccionar
# una región en la primera imagen que esté presente en la segunda imagen.
# Para ello solo hay que fijar los vértices de un polígono que contenga
# a la región.
# * Extraiga los puntos SIFT contenidos en la región seleccionada de la
# primera imagen y calcule las correspondencias con todos los puntos
# SIFT de la segunda imagen (ayuda: use el concepto de máscara con
# el parámetro mask).
# * Pinte las correspondencias encontrados sobre las imágenes.
# * Jugar con distintas parejas de imágenes, valorar las correspondencias
# correctas obtenidas y extraer conclusiones respecto a la utilidad de
# esta aproximación de recuperación de regiones/objetos de interés a
# partir de descriptores de una región.
def Ejercicio1():
# Declaramos los objetos necesarios:
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=True)
# Crear las parejas de imagenes:
parejas = [
(cv2.imread('imagenes/55.png', 1), cv2.imread('imagenes/59.png', 1)),
(cv2.imread('imagenes/229.png', 1), cv2.imread('imagenes/248.png', 1)),
(cv2.imread('imagenes/71.png', 1), cv2.imread('imagenes/88.png', 1))
]
for par in parejas:
img1 = par[0]
img2 = par[1]
# Crear la máscara de la región extraída:
refPts = np.array(axf.extractRegion(img1))
mask = np.zeros((img1.shape[0], img1.shape[1]), np.uint8)
mask = cv2.fillConvexPoly(mask, refPts, (255,255,255))
# Extraer los puntos y descriptores:
kp1, des1 = sift.detectAndCompute(img1, mask)
kp2, des2 = sift.detectAndCompute(img2, None)
# Extraer las correspondencias:
matches = bf.match(des1, des2)
# Dibujarlas:
out = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2)
_showImage(out)
#######################################################################################
# EJERCICIO 2:
#######################################################################################
# 2. Recuperación de imágenes [4 puntos]
# • Implementar un modelo de índice invertido + bolsa de palabras para
# las imágenes dadas en imagenesIR.rar usando el vocabulario dado
# en kmeanscenters2000.pkl.
# • Verificar que el modelo construido para cada imagen permite recu-
# perar imágenes de la misma escena cuando la comparamos al resto
# de imágenes de la base de datos.
# • Elegir dos imágenes-pregunta en las se ponga de manifiesto que el
# modelo usado es realmente muy efectivo para extraer sus semejantes y
# elegir otra imagen-pregunta en la que se muestre que el modelo puede
# realmente fallar. Para ello muestre las cinco imágenes más semejantes
# de cada una de las imágenes-pregunta seleccionadas usando como
# medida de distancia el producto escalar normalizado de sus vectores
# de bolsa de palabras.
# • Explicar qué conclusiones obtiene de este experimento.
# Función que construye el índice invertido y la bolsa de palabras:
def Ejercicio2():
# Cargar los centroides y crear el detector y matcher:
dicc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=False)
# Inicializar el índice invertido:
indice_invert = dict()
for i in range(dicc.shape[0]):
indice_invert[i] = set()
# Inicializar la bolsa de palabras:
bolsa_palabras = dict()
for i in range(441):
bolsa_palabras[i] = np.zeros(dicc.shape[0], np.int)
# Para cada imagen:
for i in range(441):
# Leemos la imagen
img_name = 'imagenes/' + str(i) + '.png'
img = cv2.imread(img_name, 1)
# Obtener los descriptores:
des = sift.detectAndCompute(img, None)[1]
# Normalizar los descriptores:
des_normalized = []
for d in des:
norm = np.linalg.norm(d)
# Ignoramos los descriptores nulos:
if norm != 0:
des_normalized.append(d * (1/norm))
des = np.array(des_normalized)
# Extraer los matches:
matches = bf.match(des, dicc)
# Para cada match:
for m in matches:
# Actualizar el indice invertido:
indice_invert[m.trainIdx].add(i)
# Actualizar los histogramas de la bolsa de palabras:
bolsa_palabras[i][m.trainIdx] += 1
# Normalizar los histogramas:
for i in range(441):
bolsa_palabras[i] = bolsa_palabras[i] / np.linalg.norm(bolsa_palabras[i])
return indice_invert, bolsa_palabras
# Dada una imagen img, y la bolsa de palabras calculada con la función anterior,
# muestra las 5 imágenes más cercanas:
def get_5_nearest_images(img, bolsa_palabras):
# Paso 1: obtener la bolsa de palabras de la imagen:
# Declaramos los objetos necesarios:
dicc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
sift = cv2.xfeatures2d.SIFT_create()
bf = cv2.BFMatcher(crossCheck=False)
# Inicializar el histograma:
q = np.zeros(dicc.shape[0])
# Extraer los descriptores de la imagen:
des = sift.detectAndCompute(img, None)[1]
# Normalizar los descriptores:
des_normalized = []
for d in des:
norm = np.linalg.norm(d)
# Ignoramos los descriptores nulos:
if norm != 0:
des_normalized.append(d * (1/norm))
des = np.array(des_normalized)
# Extraer los matches:
matches = bf.match(des, dicc)
# Actualizar el histograma:
for m in matches:
q[m.trainIdx] += 1
# Normalizarlo:
q = q / np.linalg.norm(q)
# Paso 2: Obtener las 5 imágenes más cercanas:
# para ello utilizamos la similaridad:
def sim(I, J):
# Como están normalizados no es necesario dividir entre el producto de las normas:
return (I * J).sum()
# Calcular las similaridades:
similaridades = []
for i in range(len(bolsa_palabras)):
similaridades.append((i, sim(bolsa_palabras[i], q)))
# Ordenamos las similaridades:
similaridades = sorted(similaridades, key = lambda x:x[1], reverse=True)
# Mostrar las 5 imágenes más cercanas: (la posición 0 es la propia imagen, por eso nos la saltamos)
for i in range(1,6):
ima = cv2.imread('imagenes/' + str(similaridades[i][0]) + ' | #####################################
# EJERCICIO 3:
#######################################################################################
# Visualización del vocabulario [3 puntos]
# • Usando las imágenes dadas en imagenesIR.rar se han extraido 600
# regiones de cada imagen de forma directa y se han re-escalado en
# parches de 24x24 píxeles. A partir de ellas se ha construido un
# vocabulario de 5.000 palabras usando k-means. Los ficheros con los datos
# son descriptorsAndpatches2000.pkl (descriptores de las regiones
# y los parches extraídos) y kmeanscenters2000.pkl (vocabulario
# extraído).
# • Elegir al menos dos palabras visuales diferentes y visualizar las
# regiones imagen de los 10 parches más cercanos de cada palabra visual,
# de forma que se muestre el contenido visual que codifican (mejor en
# niveles de gris).
# • Explicar si lo que se ha obtenido es realmente lo esperado en términos
# de cercanía visual de los parches.
def Ejercicio3(vocabulario, words):
# Cargamos los archivos proporcionados:
dicc = vocabulario
des, patches = axf.loadAux('descriptorsAndpatches2000.pkl', True)
# Declaramos el Matcher:
bf = cv2.BFMatcher(crossCheck=False)
# Obtenemos los 10 matches más cercanos:
matches = bf.knnMatch(dicc, des, k=10)
# Mostrar los ejemplos:
# en los dos primeros matches los parches son muy parecidos (casi idénticos)
# en el tercero, hay ligeras variaciones
# en el último son distintos:
for i in words:
print(i)
myPatches = []
for m in matches[i]:
# Convertirlo a escala de grises y hacer el reshape del parche:
img = cv2.cvtColor(patches[m.trainIdx], cv2.COLOR_BGR2GRAY).reshape(24,24)
myPatches.append(img)
dibuja10parches(myPatches)
#######################################################################################
# BONUS:
#######################################################################################
# Ejercicio 2: Creación de un vocabulario [2 puntos]: Calcular desde todas las
# imagenesIR.rar los ficheros dados en el Ejercicio 2 usando los mismos parámetros.
# Aplicar con el nuevo diccionario lo pedido con el Ejercicio 3.
def buildVocabulary():
# Crear el detector SIFT:
sift = cv2.xfeatures2d.SIFT_create(600)
img = cv2.imread('imagenes/0.png', 1)
des = sift.detectAndCompute(img, None)[1]
descriptors = des
# Para cada imagen:
for i in range(1, 441):
# Leemos la imagen
img_name = 'imagenes/' + str(i) + '.png'
img = cv2.imread(img_name, 1)
# Obtener los descriptores:
des = sift.detectAndCompute(img, None)[1]
# Añadirlos: cada fila de la matriz es un descriptor:
descriptors = np.vstack((descriptors, des))
# Lo convertimos a float 32 (lo requiere el método kmeans):
descriptors = np.float32(descriptors)
# Realizamos el clustering para extraer el vocabulario:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
compactness, labels, centers = cv2.kmeans(descriptors, 2000, None, criteria, 5, cv2.KMEANS_PP_CENTERS)
# Descomentar esta línea para guardar el vocabulario en un fichero:
# pickle.dump(centers, open("myVocabulary.pkl", "wb"))
return centers
#######################################################################################
# PRUEBA DE LAS FUNCIONES:
#######################################################################################
Ejercicio1()
# Ejercicio 2:
invert, bag = Ejercicio2()
# Casos favorables:
img = cv2.imread('imagenes/89.png',1 )
get_5_nearest_images(img, bag)
img = cv2.imread('imagenes/356.png',1 )
get_5_nearest_images(img, bag)
# Caso desfavorable:
img = cv2.imread('imagenes/78.png',1 )
get_5_nearest_images(img, bag)
voc = axf.loadDictionary('kmeanscenters2000.pkl')[2]
# El segundo parámetro son las palabras a visualizar:
Ejercicio3(voc, (76, 32, 36, 80))
#######################################################################################
# PRUEBA DEL BONUS:
#######################################################################################
# Descomentar está línea y comentar la siguiente para volver a calcular el vocabulario:
# voc = buildVocabulary()
voc = pickle.load(open("myVocabulary.pkl", "rb"))
Ejercicio3(voc, (54, 96, 71, 100))
| .png', 1)
PintaMI([img, ima], "{}ª imagen más cercana: Similaridad = {:.4f}".format(i, similaridades[i][1]))
################################################## | conditional_block |
index_file_deleter.rs | // Copyright 2019 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use core::codec::segment_infos::{
generation_from_segments_file_name, parse_generation, parse_segment_name, SegmentInfos,
CODEC_FILE_PATTERN, CODEC_UPDATE_DV_PATTERN, CODEC_UPDATE_FNM_PATTERN,
INDEX_FILE_OLD_SEGMENT_GEN, INDEX_FILE_PENDING_SEGMENTS, INDEX_FILE_SEGMENTS,
};
use core::codec::Codec;
use core::index::writer::KeepOnlyLastCommitDeletionPolicy;
use core::store::directory::{Directory, LockValidatingDirectoryWrapper};
use regex::Regex;
use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::mem;
use std::sync::{Arc, Mutex, RwLock};
use error::{ErrorKind, Result};
use std::time::{SystemTime, UNIX_EPOCH};
/// This class keeps track of each SegmentInfos instance that
/// is still "live", either because it corresponds to a
/// segments_N file in the Directory (a "commit", i.e. a
/// committed SegmentInfos) or because it's an in-memory
/// SegmentInfos that a writer is actively updating but has
/// not yet committed. This class uses simple reference
/// counting to map the live SegmentInfos instances to
/// individual files in the Directory.
///
/// The same directory file may be referenced by more than
/// one IndexCommit, i.e. more than one SegmentInfos.
/// Therefore we count how many commits reference each file.
/// When all the commits referencing a certain file have been
/// deleted, the refcount for that file becomes zero, and the
/// file is deleted.
///
/// A separate deletion policy interface
/// (IndexDeletionPolicy) is consulted on creation (onInit)
/// and once per commit (onCommit), to decide when a commit
/// should be removed.
///
/// It is the business of the IndexDeletionPolicy to choose
/// when to delete commit points. The actual mechanics of
/// file deletion, retrying, etc, derived from the deletion
/// of commit points is the business of the IndexFileDeleter.
///
/// The current default deletion policy is {@link
/// KeepOnlyLastCommitDeletionPolicy}, which removes all
/// prior commits when a new commit has completed. This
/// matches the behavior before 2.2.
///
/// Note that you must hold the write.lock before
/// instantiating this class. It opens segments_N file(s)
/// directly with no retry logic.
pub struct IndexFileDeleter<D: Directory> {
/// Reference count for all files in the index. Counts
/// how many existing commits reference a file.
ref_counts: Arc<RwLock<HashMap<String, RefCount>>>,
/// Holds all commits (segments_N) currently in the index.
/// this will have just 1 commit if you are using the default
/// delete policy (KeepOnlyLastCommitDeletionPolicy). Other policies
/// may leave commit points live for longer in which case this list
/// would be longer than 1.
commits: Vec<CommitPoint>,
/// Holds files we had inc_ref'd from the previous non-commit checkpoint:
last_files: HashSet<String>,
policy: KeepOnlyLastCommitDeletionPolicy,
delayed_dv_update_files: Arc<Mutex<Vec<(u64, Vec<String>)>>>,
dv_pattern: Regex,
fnm_pattern: Regex,
directory: Arc<LockValidatingDirectoryWrapper<D>>,
inited: bool,
}
impl<D: Directory> IndexFileDeleter<D> {
pub fn new(directory: Arc<LockValidatingDirectoryWrapper<D>>) -> Self {
IndexFileDeleter {
ref_counts: Arc::new(RwLock::new(HashMap::new())),
commits: vec![],
last_files: HashSet::new(),
policy: KeepOnlyLastCommitDeletionPolicy {},
delayed_dv_update_files: Arc::new(Mutex::new(Vec::new())),
dv_pattern: Regex::new(CODEC_UPDATE_DV_PATTERN).unwrap(),
fnm_pattern: Regex::new(CODEC_UPDATE_FNM_PATTERN).unwrap(),
directory,
inited: false,
}
}
pub fn init<C: Codec>(
&mut self,
directory_orig: Arc<D>,
files: &[String],
segment_infos: &mut SegmentInfos<D, C>,
initial_index_exists: bool,
) -> Result<bool> {
let mut current_commit_point_idx: Option<usize> = None;
if let Some(ref current_segments_file) = segment_infos.segment_file_name() {
let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap();
for filename in files {
if pattern.is_match(filename)
|| filename.starts_with(INDEX_FILE_SEGMENTS)
|| filename.starts_with(INDEX_FILE_PENDING_SEGMENTS)
{
// Add this file to ref_counts with initial count 0.
{
if !self.ref_counts.read()?.contains_key(filename) {
self.ref_counts
.write()?
.insert(filename.to_string(), RefCount::default());
}
}
if filename.starts_with(INDEX_FILE_SEGMENTS)
&& filename != INDEX_FILE_OLD_SEGMENT_GEN
{
// This is a commit (segments or segments_N), and
// it's valid (<= the max gen). Load it, then
// incref all files it refers to:
let sis: SegmentInfos<D, C> =
SegmentInfos::read_commit(&directory_orig, filename)?;
let commit_point = CommitPoint::new(
sis.generation,
sis.segment_file_name().unwrap_or("".to_string()),
sis.files(true),
sis.has_dv_updates(),
);
self.commits.push(commit_point);
if sis.generation == segment_infos.generation {
current_commit_point_idx = Some(self.commits.len() - 1);
}
self.inc_ref_files(&sis.files(true));
}
}
}
if current_commit_point_idx.is_none() && initial_index_exists {
// We did not in fact see the segments_N file
// corresponding to the segmentInfos that was passed
// in. Yet, it must exist, because our caller holds
// the write lock. This can happen when the directory
// listing was stale (eg when index accessed via NFS
// client with stale directory listing cache). So we
// try now to explicitly open this commit point:
let sis: SegmentInfos<D, C> =
SegmentInfos::read_commit(&directory_orig, current_segments_file)?;
let commit_point = CommitPoint::new(
sis.generation,
sis.segment_file_name().unwrap_or("".to_string()),
sis.files(true),
sis.has_dv_updates(),
);
self.commits.push(commit_point);
current_commit_point_idx = Some(self.commits.len() - 1);
self.inc_ref_files(&sis.files(true));
}
}
// We keep commits list in sorted order (oldest to newest):
self.commits.sort();
// refCounts only includes "normal" filenames (does not include write.lock)
{
let ref_counts = self.ref_counts.read()?;
let files: Vec<&str> = ref_counts.keys().map(|s| s.as_str()).collect();
Self::inflate_gens(segment_infos, files)?;
}
// Now delete anything with ref count at 0. These are
// presumably abandoned files eg due to crash of
// IndexWriter.
{
let mut to_delete = HashSet::new();
for (filename, rc) in &*self.ref_counts.read()? {
if rc.count == 0 {
// A segments_N file should never have ref count 0 on init
if filename.starts_with(INDEX_FILE_SEGMENTS) {
bail!(ErrorKind::IllegalState(format!(
"file '{}' has ref_count=0, shouldn't happen on init",
filename
)));
}
to_delete.insert(filename.clone());
}
}
self.delete_files(&to_delete, false)?;
}
// Finally, give policy a chance to remove things on
// startup:
{
let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len());
for i in &mut self.commits {
commits.push(i);
}
self.policy.on_init(commits)?;
}
// Always protect the incoming segmentInfos since
// sometime it may not be the most recent commit
self.checkpoint(segment_infos, false)?;
let mut starting_commit_deleted = false;
if let Some(idx) = current_commit_point_idx {
if self.commits[idx].deleted {
starting_commit_deleted = true;
}
}
self.delete_commits()?;
self.inited = true;
Ok(starting_commit_deleted)
}
/// Set all gens beyond what we currently see in the directory, to avoid double-write
/// in cases where the previous IndexWriter did not gracefully close/rollback (e.g.
/// os/machine crashed or lost power).
fn inflate_gens<C: Codec>(infos: &mut SegmentInfos<D, C>, files: Vec<&str>) -> Result<()> {
let mut max_segment_gen = i64::min_value();
let mut max_segment_name = i32::min_value();
// Confusingly, this is the union of live_docs, field infos, doc values
// (and maybe others, in the future) gens. This is somewhat messy,
// since it means DV updates will suddenly write to the next gen after
// live docs' gen, for example, but we don't have the APIs to ask the
// codec which file is which:
let mut max_per_segment_gen = HashMap::new();
for filename in files {
if filename == INDEX_FILE_OLD_SEGMENT_GEN {
// do nothing
} else if filename.starts_with(INDEX_FILE_SEGMENTS) {
// trash file: we have to handle this since we allow anything
// starting with 'segments' here
if let Ok(gen) = generation_from_segments_file_name(filename) {
max_segment_gen = max_segment_gen.max(gen);
}
} else if filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) {
// the first 8 bytes is "pending_", so the slice operation is safe
if let Ok(gen) = generation_from_segments_file_name(&filename[8..]) {
max_segment_gen = max_segment_gen.max(gen);
}
} else {
let segment_name = parse_segment_name(filename);
debug_assert!(segment_name.starts_with('_'));
if filename.to_lowercase().ends_with(".tmp") {
// A temp file: don't try to look at its gen
continue;
}
max_segment_name =
max_segment_name.max(i32::from_str_radix(&segment_name[1..], 36)?);
let mut cur_gen = max_per_segment_gen.get(segment_name).map_or(0, |x| *x);
if let Ok(gen) = parse_generation(filename) {
cur_gen = cur_gen.max(gen);
}
max_per_segment_gen.insert(segment_name.to_string(), cur_gen);
}
}
// Generation is advanced before write:
let next_write_gen = max_segment_gen.max(infos.generation);
infos.set_next_write_generation(next_write_gen)?;
if infos.counter < max_segment_name + 1 {
infos.counter = max_segment_name
}
for info in &mut infos.segments {
let gen = max_per_segment_gen[&info.info.name];
if info.next_write_del_gen() < gen + 1 {
info.set_next_write_del_gen(gen + 1);
}
if info.next_write_field_infos_gen() < gen + 1 {
info.set_next_write_field_infos_gen(gen + 1);
}
if info.next_write_doc_values_gen() < gen + 1 {
info.set_next_write_doc_values_gen(gen + 1);
}
}
Ok(())
}
/// For definition of "check point" see IndexWriter comments:
/// "Clarification: Check Points (and commits)".
///
/// Writer calls this when it has made a "consistent
/// change" to the index, meaning new files are written to
/// the index and the in-memory SegmentInfos have been
/// modified to point to those files.
///
/// This may or may not be a commit (segments_N may or may
/// not have been written).
///
/// We simply incref the files referenced by the new
/// SegmentInfos and decref the files we had previously
/// seen (if any).
///
/// If this is a commit, we also call the policy to give it
/// a chance to remove other commits. If any commits are
/// removed, we decref their files as well.
pub fn checkpoint<C: Codec>(
&mut self,
segment_infos: &SegmentInfos<D, C>,
is_commit: bool,
) -> Result<()> {
// incref the files:
self.inc_ref_files(&segment_infos.files(is_commit));
if is_commit {
// Append to our commits list:
let p = CommitPoint::new(
segment_infos.generation,
segment_infos.segment_file_name().unwrap_or("".to_string()),
segment_infos.files(true),
segment_infos.has_dv_updates(),
);
self.commits.push(p);
// Tell policy so it can remove commits:
{
let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len());
for i in &mut self.commits {
commits.push(i);
}
self.policy.on_commit(commits)?;
}
// DecRef file for commits that were deleted by the policy
self.delete_commits()
} else {
let res = self.dec_ref_files(&self.last_files);
self.last_files.clear();
res?;
// Save files so we can decr on next checkpoint/commit:
self.last_files.extend(segment_infos.files(false));
Ok(())
}
}
pub fn exists(&self, filename: &str) -> bool {
if !self.ref_counts.read().unwrap().contains_key(filename) {
false
} else {
self.ensure_ref_count(filename);
self.ref_counts.read().unwrap()[filename].count > 0
}
}
fn ensure_ref_count(&self, file_name: &str) {
let mut ref_counts = self.ref_counts.write().unwrap();
if !ref_counts.contains_key(file_name) {
ref_counts.insert(file_name.to_string(), RefCount::default());
}
}
pub fn inc_ref_files(&self, files: &HashSet<String>) {
for file in files {
self.ensure_ref_count(file);
self.ref_counts
.write()
.unwrap()
.get_mut(file)
.unwrap()
.inc_ref();
}
}
/// Decrefs all provided files, even on exception; throws first exception hit, if any.
pub fn dec_ref_files(&self, files: &HashSet<String>) -> Result<()> {
let mut to_delete = HashSet::new();
for f in files {
if self.dec_ref(f) {
to_delete.insert(f.clone());
}
}
self.delete_files(&to_delete, false)
}
fn _dec_ref_files_by_commit(&self, files: &HashSet<String>) -> Result<()> {
let mut to_delete = HashSet::new();
for f in files {
if self.dec_ref(f) {
to_delete.insert(f.clone());
}
}
self.delete_files(&to_delete, true)
}
pub fn dec_ref_files_no_error(&self, files: &HashSet<String>) {
if let Err(e) = self.dec_ref_files(files) {
warn!("dec_ref_files_no_error failed with '{:?}'", e);
}
}
/// Returns true if the file should now be deleted.
fn dec_ref(&self, filename: &str) -> bool {
self.ensure_ref_count(filename);
let mut ref_counts = self.ref_counts.write().unwrap();
if ref_counts.get_mut(filename).unwrap().dec_ref() == 0 {
// This file is no longer referenced by any past
// commit points nor by the in-memory SegmentInfos:
ref_counts.remove(filename);
true
} else {
false
}
}
/// Remove the CommitPoints in the commitsToDelete List by
/// DecRef'ing all files from each SegmentInfos.
fn delete_commits(&mut self) -> Result<()> {
let mut res = Ok(());
// First decref all files that had been referred to by
// the now-deleted commits:
for commit in &self.commits {
if commit.deleted {
res = self.dec_ref_files(&commit.files);
}
}
// NOTE: does nothing if not err
if res.is_err() {
return res;
}
// Now compact commits to remove deleted ones (preserving the sort):
let size = self.commits.len();
let mut read_from = 0;
let mut write_to = 0;
while read_from < size {
if !self.commits[read_from].deleted {
if write_to != read_from {
self.commits.swap(read_from, write_to);
}
write_to += 1;
}
read_from += 1;
}
self.commits.truncate(write_to);
Ok(())
}
fn delete_files(&self, files: &HashSet<String>, do_commit_filter: bool) -> Result<()> {
// We make two passes, first deleting any segments_N files, second
// deleting the rest. We do this so that if we throw exc or JVM
// crashes during deletions, even when not on Windows, we don't
// leave the index in an "apparently corrupt" state:
let mut copys = vec![];
for file in files {
copys.push(file);
if !file.starts_with(INDEX_FILE_SEGMENTS) {
continue;
}
self.delete_file(file)?;
}
if do_commit_filter {
self.filter_dv_update_files(&mut copys);
}
for file in copys {
if file.starts_with(INDEX_FILE_SEGMENTS) {
continue;
}
self.delete_file(file)?;
}
Ok(())
}
fn filter_dv_update_files(&self, candidates: &mut Vec<&String>) {
let dv_update_files: Vec<String> = candidates
.drain_filter(|f| -> bool {
self.fnm_pattern.is_match(f) || self.dv_pattern.is_match(f)
})
.map(|f| f.clone())
.collect();
let to_deletes: Vec<Vec<String>>;
{
let mut l = self.delayed_dv_update_files.lock();
let old_dv_update_files = l.as_mut().unwrap();
let tm_now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
to_deletes = old_dv_update_files
.drain_filter(|(x, _)| -> bool { *x < tm_now })
.map(|(_, y)| y)
.collect();
old_dv_update_files.push((tm_now + 60, dv_update_files));
}
for files in to_deletes {
for file in files {
self.delete_file(&file).unwrap_or(());
}
}
}
fn delete_file(&self, filename: &str) -> Result<()> {
// panic!("wrong deleted files");
self.directory.delete_file(filename)
}
/// Deletes the specified files, but only if they are new
/// (have not yes been incref'd).
pub fn delete_new_files(&self, files: &HashSet<String>) -> Result<()> {
let mut filtered = HashSet::with_capacity(files.len());
let ref_counts = self.ref_counts.read().unwrap();
for file in files {
// NOTE: it's very unusual yet possible for the
// refCount to be present and 0: it can happen if you
// open IW on a crashed index, and it removes a bunch
// of unref'd files, and then you add new docs / do
// merging, and it reuses that segment name.
// TestCrash.testCrashAfterReopen can hit this:
if !ref_counts.contains_key(file) || ref_counts[file].count == 0 {
filtered.insert(file.clone());
}
}
self.delete_files(&filtered, false)
}
/// Writer calls this when it has hit an error and had to
/// roll back, to tell us that there may now be
/// unreferenced files in the filesystem. So we re-list
/// the filesystem and delete such files. If segmentName
/// is non-null, we will only delete files corresponding to
/// that segment.
pub fn refresh(&mut self) -> Result<()> {
debug_assert!(self.inited);
let files = self.directory.list_all()?;
let mut to_delete = HashSet::new();
let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap();
for filename in &files {
if !self.ref_counts.read()?.contains_key(filename)
&& (pattern.is_match(filename)
|| filename.starts_with(INDEX_FILE_SEGMENTS)
|| filename.starts_with(INDEX_FILE_PENDING_SEGMENTS))
{
// Unreferenced file, so remove it
to_delete.insert(filename.clone());
}
}
self.delete_files(&to_delete, false)
}
pub fn close(&mut self) -> Result<()> {
if !self.last_files.is_empty() {
let files = mem::replace(&mut self.last_files, HashSet::new());
self.dec_ref_files(&files)?;
}
Ok(())
}
}
struct RefCount {
inited: bool,
count: u32,
}
impl Default for RefCount {
fn default() -> Self {
RefCount {
inited: false,
count: 0,
}
}
}
impl RefCount {
fn inc_ref(&mut self) -> u32 {
if !self.inited {
self.inited = true;
} else {
debug_assert!(self.count > 0);
}
self.count += 1;
self.count
}
fn dec_ref(&mut self) -> u32 {
debug_assert!(self.count > 0);
self.count -= 1;
self.count
}
}
impl fmt::Display for RefCount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.count)
}
}
impl fmt::Debug for RefCount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.count)
}
}
/// Expert: represents a single commit into an index as seen by the
/// {@link IndexDeletionPolicy} or {@link IndexReader}.
///
/// Changes to the content of an index are made visible
/// only after the writer who made that change commits by
/// writing a new segments file
/// (`segments_N</code`). This point in time, when the
/// action of writing of a new segments file to the directory
/// is completed, is an index commit.
///
/// Each index commit point has a unique segments file
/// associated with it. The segments file associated with a
/// later index commit point would have a larger N.
///
/// Holds details for each commit point. This class is also passed to
/// the deletion policy. Note: this class has a natural ordering that
/// is inconsistent with equals.
pub struct CommitPoint {
generation: i64,
segment_file_name: String,
files: HashSet<String>,
has_dv_updates: bool,
deleted: bool,
}
impl CommitPoint {
fn new(
generation: i64,
segment_file_name: String,
files: HashSet<String>,
has_dv_updates: bool,
) -> Self {
CommitPoint {
generation,
segment_file_name,
files,
has_dv_updates,
deleted: false,
}
}
/// Get the segments file (`segments_N`) associated with this commit point
pub fn segments_file_name(&self) -> &str {
&self.segment_file_name
}
/// Delete this commit point. This only applies when using
/// the commit point in the context of IndexWriter's
/// IndexDeletionPolicy.
///
/// Upon calling this, the writer is notified that this commit
/// point should be deleted.
///
/// Decision that a commit-point should be deleted is taken by the
/// `IndexDeletionPolicy` in effect and therefore this should only
/// be called by its `IndexDeletionPolicy#onInit on_init()` or
/// `IndexDeletionPolicy#onCommit on_commit()` methods.
pub fn delete(&mut self) -> Result<()> {
self.deleted = true;
Ok(()) | }
impl Ord for CommitPoint {
fn cmp(&self, other: &Self) -> Ordering {
self.generation.cmp(&other.generation)
}
}
impl PartialOrd for CommitPoint {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for CommitPoint {}
impl PartialEq for CommitPoint {
fn eq(&self, other: &Self) -> bool {
self.segment_file_name == other.segment_file_name && self.generation == other.generation
}
} | }
pub fn has_dv_updates(&self) -> bool {
self.has_dv_updates
} | random_line_split |
index_file_deleter.rs | // Copyright 2019 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use core::codec::segment_infos::{
generation_from_segments_file_name, parse_generation, parse_segment_name, SegmentInfos,
CODEC_FILE_PATTERN, CODEC_UPDATE_DV_PATTERN, CODEC_UPDATE_FNM_PATTERN,
INDEX_FILE_OLD_SEGMENT_GEN, INDEX_FILE_PENDING_SEGMENTS, INDEX_FILE_SEGMENTS,
};
use core::codec::Codec;
use core::index::writer::KeepOnlyLastCommitDeletionPolicy;
use core::store::directory::{Directory, LockValidatingDirectoryWrapper};
use regex::Regex;
use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::mem;
use std::sync::{Arc, Mutex, RwLock};
use error::{ErrorKind, Result};
use std::time::{SystemTime, UNIX_EPOCH};
/// This class keeps track of each SegmentInfos instance that
/// is still "live", either because it corresponds to a
/// segments_N file in the Directory (a "commit", i.e. a
/// committed SegmentInfos) or because it's an in-memory
/// SegmentInfos that a writer is actively updating but has
/// not yet committed. This class uses simple reference
/// counting to map the live SegmentInfos instances to
/// individual files in the Directory.
///
/// The same directory file may be referenced by more than
/// one IndexCommit, i.e. more than one SegmentInfos.
/// Therefore we count how many commits reference each file.
/// When all the commits referencing a certain file have been
/// deleted, the refcount for that file becomes zero, and the
/// file is deleted.
///
/// A separate deletion policy interface
/// (IndexDeletionPolicy) is consulted on creation (onInit)
/// and once per commit (onCommit), to decide when a commit
/// should be removed.
///
/// It is the business of the IndexDeletionPolicy to choose
/// when to delete commit points. The actual mechanics of
/// file deletion, retrying, etc, derived from the deletion
/// of commit points is the business of the IndexFileDeleter.
///
/// The current default deletion policy is {@link
/// KeepOnlyLastCommitDeletionPolicy}, which removes all
/// prior commits when a new commit has completed. This
/// matches the behavior before 2.2.
///
/// Note that you must hold the write.lock before
/// instantiating this class. It opens segments_N file(s)
/// directly with no retry logic.
pub struct IndexFileDeleter<D: Directory> {
/// Reference count for all files in the index. Counts
/// how many existing commits reference a file.
ref_counts: Arc<RwLock<HashMap<String, RefCount>>>,
/// Holds all commits (segments_N) currently in the index.
/// this will have just 1 commit if you are using the default
/// delete policy (KeepOnlyLastCommitDeletionPolicy). Other policies
/// may leave commit points live for longer in which case this list
/// would be longer than 1.
commits: Vec<CommitPoint>,
/// Holds files we had inc_ref'd from the previous non-commit checkpoint:
last_files: HashSet<String>,
policy: KeepOnlyLastCommitDeletionPolicy,
delayed_dv_update_files: Arc<Mutex<Vec<(u64, Vec<String>)>>>,
dv_pattern: Regex,
fnm_pattern: Regex,
directory: Arc<LockValidatingDirectoryWrapper<D>>,
inited: bool,
}
impl<D: Directory> IndexFileDeleter<D> {
pub fn new(directory: Arc<LockValidatingDirectoryWrapper<D>>) -> Self {
IndexFileDeleter {
ref_counts: Arc::new(RwLock::new(HashMap::new())),
commits: vec![],
last_files: HashSet::new(),
policy: KeepOnlyLastCommitDeletionPolicy {},
delayed_dv_update_files: Arc::new(Mutex::new(Vec::new())),
dv_pattern: Regex::new(CODEC_UPDATE_DV_PATTERN).unwrap(),
fnm_pattern: Regex::new(CODEC_UPDATE_FNM_PATTERN).unwrap(),
directory,
inited: false,
}
}
pub fn init<C: Codec>(
&mut self,
directory_orig: Arc<D>,
files: &[String],
segment_infos: &mut SegmentInfos<D, C>,
initial_index_exists: bool,
) -> Result<bool> {
let mut current_commit_point_idx: Option<usize> = None;
if let Some(ref current_segments_file) = segment_infos.segment_file_name() {
let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap();
for filename in files {
if pattern.is_match(filename)
|| filename.starts_with(INDEX_FILE_SEGMENTS)
|| filename.starts_with(INDEX_FILE_PENDING_SEGMENTS)
{
// Add this file to ref_counts with initial count 0.
{
if !self.ref_counts.read()?.contains_key(filename) {
self.ref_counts
.write()?
.insert(filename.to_string(), RefCount::default());
}
}
if filename.starts_with(INDEX_FILE_SEGMENTS)
&& filename != INDEX_FILE_OLD_SEGMENT_GEN
{
// This is a commit (segments or segments_N), and
// it's valid (<= the max gen). Load it, then
// incref all files it refers to:
let sis: SegmentInfos<D, C> =
SegmentInfos::read_commit(&directory_orig, filename)?;
let commit_point = CommitPoint::new(
sis.generation,
sis.segment_file_name().unwrap_or("".to_string()),
sis.files(true),
sis.has_dv_updates(),
);
self.commits.push(commit_point);
if sis.generation == segment_infos.generation {
current_commit_point_idx = Some(self.commits.len() - 1);
}
self.inc_ref_files(&sis.files(true));
}
}
}
if current_commit_point_idx.is_none() && initial_index_exists {
// We did not in fact see the segments_N file
// corresponding to the segmentInfos that was passed
// in. Yet, it must exist, because our caller holds
// the write lock. This can happen when the directory
// listing was stale (eg when index accessed via NFS
// client with stale directory listing cache). So we
// try now to explicitly open this commit point:
let sis: SegmentInfos<D, C> =
SegmentInfos::read_commit(&directory_orig, current_segments_file)?;
let commit_point = CommitPoint::new(
sis.generation,
sis.segment_file_name().unwrap_or("".to_string()),
sis.files(true),
sis.has_dv_updates(),
);
self.commits.push(commit_point);
current_commit_point_idx = Some(self.commits.len() - 1);
self.inc_ref_files(&sis.files(true));
}
}
// We keep commits list in sorted order (oldest to newest):
self.commits.sort();
// refCounts only includes "normal" filenames (does not include write.lock)
{
let ref_counts = self.ref_counts.read()?;
let files: Vec<&str> = ref_counts.keys().map(|s| s.as_str()).collect();
Self::inflate_gens(segment_infos, files)?;
}
// Now delete anything with ref count at 0. These are
// presumably abandoned files eg due to crash of
// IndexWriter.
{
let mut to_delete = HashSet::new();
for (filename, rc) in &*self.ref_counts.read()? {
if rc.count == 0 {
// A segments_N file should never have ref count 0 on init
if filename.starts_with(INDEX_FILE_SEGMENTS) {
bail!(ErrorKind::IllegalState(format!(
"file '{}' has ref_count=0, shouldn't happen on init",
filename
)));
}
to_delete.insert(filename.clone());
}
}
self.delete_files(&to_delete, false)?;
}
// Finally, give policy a chance to remove things on
// startup:
{
let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len());
for i in &mut self.commits {
commits.push(i);
}
self.policy.on_init(commits)?;
}
// Always protect the incoming segmentInfos since
// sometime it may not be the most recent commit
self.checkpoint(segment_infos, false)?;
let mut starting_commit_deleted = false;
if let Some(idx) = current_commit_point_idx {
if self.commits[idx].deleted {
starting_commit_deleted = true;
}
}
self.delete_commits()?;
self.inited = true;
Ok(starting_commit_deleted)
}
/// Set all gens beyond what we currently see in the directory, to avoid double-write
/// in cases where the previous IndexWriter did not gracefully close/rollback (e.g.
/// os/machine crashed or lost power).
fn inflate_gens<C: Codec>(infos: &mut SegmentInfos<D, C>, files: Vec<&str>) -> Result<()> {
let mut max_segment_gen = i64::min_value();
let mut max_segment_name = i32::min_value();
// Confusingly, this is the union of live_docs, field infos, doc values
// (and maybe others, in the future) gens. This is somewhat messy,
// since it means DV updates will suddenly write to the next gen after
// live docs' gen, for example, but we don't have the APIs to ask the
// codec which file is which:
let mut max_per_segment_gen = HashMap::new();
for filename in files {
if filename == INDEX_FILE_OLD_SEGMENT_GEN {
// do nothing
} else if filename.starts_with(INDEX_FILE_SEGMENTS) {
// trash file: we have to handle this since we allow anything
// starting with 'segments' here
if let Ok(gen) = generation_from_segments_file_name(filename) {
max_segment_gen = max_segment_gen.max(gen);
}
} else if filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) {
// the first 8 bytes is "pending_", so the slice operation is safe
if let Ok(gen) = generation_from_segments_file_name(&filename[8..]) {
max_segment_gen = max_segment_gen.max(gen);
}
} else {
let segment_name = parse_segment_name(filename);
debug_assert!(segment_name.starts_with('_'));
if filename.to_lowercase().ends_with(".tmp") {
// A temp file: don't try to look at its gen
continue;
}
max_segment_name =
max_segment_name.max(i32::from_str_radix(&segment_name[1..], 36)?);
let mut cur_gen = max_per_segment_gen.get(segment_name).map_or(0, |x| *x);
if let Ok(gen) = parse_generation(filename) {
cur_gen = cur_gen.max(gen);
}
max_per_segment_gen.insert(segment_name.to_string(), cur_gen);
}
}
// Generation is advanced before write:
let next_write_gen = max_segment_gen.max(infos.generation);
infos.set_next_write_generation(next_write_gen)?;
if infos.counter < max_segment_name + 1 {
infos.counter = max_segment_name
}
for info in &mut infos.segments {
let gen = max_per_segment_gen[&info.info.name];
if info.next_write_del_gen() < gen + 1 {
info.set_next_write_del_gen(gen + 1);
}
if info.next_write_field_infos_gen() < gen + 1 {
info.set_next_write_field_infos_gen(gen + 1);
}
if info.next_write_doc_values_gen() < gen + 1 {
info.set_next_write_doc_values_gen(gen + 1);
}
}
Ok(())
}
/// For definition of "check point" see IndexWriter comments:
/// "Clarification: Check Points (and commits)".
///
/// Writer calls this when it has made a "consistent
/// change" to the index, meaning new files are written to
/// the index and the in-memory SegmentInfos have been
/// modified to point to those files.
///
/// This may or may not be a commit (segments_N may or may
/// not have been written).
///
/// We simply incref the files referenced by the new
/// SegmentInfos and decref the files we had previously
/// seen (if any).
///
/// If this is a commit, we also call the policy to give it
/// a chance to remove other commits. If any commits are
/// removed, we decref their files as well.
pub fn checkpoint<C: Codec>(
&mut self,
segment_infos: &SegmentInfos<D, C>,
is_commit: bool,
) -> Result<()> {
// incref the files:
self.inc_ref_files(&segment_infos.files(is_commit));
if is_commit {
// Append to our commits list:
let p = CommitPoint::new(
segment_infos.generation,
segment_infos.segment_file_name().unwrap_or("".to_string()),
segment_infos.files(true),
segment_infos.has_dv_updates(),
);
self.commits.push(p);
// Tell policy so it can remove commits:
{
let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len());
for i in &mut self.commits {
commits.push(i);
}
self.policy.on_commit(commits)?;
}
// DecRef file for commits that were deleted by the policy
self.delete_commits()
} else {
let res = self.dec_ref_files(&self.last_files);
self.last_files.clear();
res?;
// Save files so we can decr on next checkpoint/commit:
self.last_files.extend(segment_infos.files(false));
Ok(())
}
}
pub fn exists(&self, filename: &str) -> bool {
if !self.ref_counts.read().unwrap().contains_key(filename) {
false
} else {
self.ensure_ref_count(filename);
self.ref_counts.read().unwrap()[filename].count > 0
}
}
fn ensure_ref_count(&self, file_name: &str) {
let mut ref_counts = self.ref_counts.write().unwrap();
if !ref_counts.contains_key(file_name) {
ref_counts.insert(file_name.to_string(), RefCount::default());
}
}
pub fn inc_ref_files(&self, files: &HashSet<String>) {
for file in files {
self.ensure_ref_count(file);
self.ref_counts
.write()
.unwrap()
.get_mut(file)
.unwrap()
.inc_ref();
}
}
/// Decrefs all provided files, even on exception; throws first exception hit, if any.
pub fn dec_ref_files(&self, files: &HashSet<String>) -> Result<()> {
let mut to_delete = HashSet::new();
for f in files {
if self.dec_ref(f) {
to_delete.insert(f.clone());
}
}
self.delete_files(&to_delete, false)
}
fn _dec_ref_files_by_commit(&self, files: &HashSet<String>) -> Result<()> |
pub fn dec_ref_files_no_error(&self, files: &HashSet<String>) {
if let Err(e) = self.dec_ref_files(files) {
warn!("dec_ref_files_no_error failed with '{:?}'", e);
}
}
/// Returns true if the file should now be deleted.
fn dec_ref(&self, filename: &str) -> bool {
self.ensure_ref_count(filename);
let mut ref_counts = self.ref_counts.write().unwrap();
if ref_counts.get_mut(filename).unwrap().dec_ref() == 0 {
// This file is no longer referenced by any past
// commit points nor by the in-memory SegmentInfos:
ref_counts.remove(filename);
true
} else {
false
}
}
/// Remove the CommitPoints in the commitsToDelete List by
/// DecRef'ing all files from each SegmentInfos.
fn delete_commits(&mut self) -> Result<()> {
let mut res = Ok(());
// First decref all files that had been referred to by
// the now-deleted commits:
for commit in &self.commits {
if commit.deleted {
res = self.dec_ref_files(&commit.files);
}
}
// NOTE: does nothing if not err
if res.is_err() {
return res;
}
// Now compact commits to remove deleted ones (preserving the sort):
let size = self.commits.len();
let mut read_from = 0;
let mut write_to = 0;
while read_from < size {
if !self.commits[read_from].deleted {
if write_to != read_from {
self.commits.swap(read_from, write_to);
}
write_to += 1;
}
read_from += 1;
}
self.commits.truncate(write_to);
Ok(())
}
fn delete_files(&self, files: &HashSet<String>, do_commit_filter: bool) -> Result<()> {
// We make two passes, first deleting any segments_N files, second
// deleting the rest. We do this so that if we throw exc or JVM
// crashes during deletions, even when not on Windows, we don't
// leave the index in an "apparently corrupt" state:
let mut copys = vec![];
for file in files {
copys.push(file);
if !file.starts_with(INDEX_FILE_SEGMENTS) {
continue;
}
self.delete_file(file)?;
}
if do_commit_filter {
self.filter_dv_update_files(&mut copys);
}
for file in copys {
if file.starts_with(INDEX_FILE_SEGMENTS) {
continue;
}
self.delete_file(file)?;
}
Ok(())
}
fn filter_dv_update_files(&self, candidates: &mut Vec<&String>) {
let dv_update_files: Vec<String> = candidates
.drain_filter(|f| -> bool {
self.fnm_pattern.is_match(f) || self.dv_pattern.is_match(f)
})
.map(|f| f.clone())
.collect();
let to_deletes: Vec<Vec<String>>;
{
let mut l = self.delayed_dv_update_files.lock();
let old_dv_update_files = l.as_mut().unwrap();
let tm_now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
to_deletes = old_dv_update_files
.drain_filter(|(x, _)| -> bool { *x < tm_now })
.map(|(_, y)| y)
.collect();
old_dv_update_files.push((tm_now + 60, dv_update_files));
}
for files in to_deletes {
for file in files {
self.delete_file(&file).unwrap_or(());
}
}
}
fn delete_file(&self, filename: &str) -> Result<()> {
// panic!("wrong deleted files");
self.directory.delete_file(filename)
}
/// Deletes the specified files, but only if they are new
/// (have not yes been incref'd).
pub fn delete_new_files(&self, files: &HashSet<String>) -> Result<()> {
let mut filtered = HashSet::with_capacity(files.len());
let ref_counts = self.ref_counts.read().unwrap();
for file in files {
// NOTE: it's very unusual yet possible for the
// refCount to be present and 0: it can happen if you
// open IW on a crashed index, and it removes a bunch
// of unref'd files, and then you add new docs / do
// merging, and it reuses that segment name.
// TestCrash.testCrashAfterReopen can hit this:
if !ref_counts.contains_key(file) || ref_counts[file].count == 0 {
filtered.insert(file.clone());
}
}
self.delete_files(&filtered, false)
}
/// Writer calls this when it has hit an error and had to
/// roll back, to tell us that there may now be
/// unreferenced files in the filesystem. So we re-list
/// the filesystem and delete such files. If segmentName
/// is non-null, we will only delete files corresponding to
/// that segment.
pub fn refresh(&mut self) -> Result<()> {
debug_assert!(self.inited);
let files = self.directory.list_all()?;
let mut to_delete = HashSet::new();
let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap();
for filename in &files {
if !self.ref_counts.read()?.contains_key(filename)
&& (pattern.is_match(filename)
|| filename.starts_with(INDEX_FILE_SEGMENTS)
|| filename.starts_with(INDEX_FILE_PENDING_SEGMENTS))
{
// Unreferenced file, so remove it
to_delete.insert(filename.clone());
}
}
self.delete_files(&to_delete, false)
}
pub fn close(&mut self) -> Result<()> {
if !self.last_files.is_empty() {
let files = mem::replace(&mut self.last_files, HashSet::new());
self.dec_ref_files(&files)?;
}
Ok(())
}
}
struct RefCount {
inited: bool,
count: u32,
}
impl Default for RefCount {
fn default() -> Self {
RefCount {
inited: false,
count: 0,
}
}
}
impl RefCount {
fn inc_ref(&mut self) -> u32 {
if !self.inited {
self.inited = true;
} else {
debug_assert!(self.count > 0);
}
self.count += 1;
self.count
}
fn dec_ref(&mut self) -> u32 {
debug_assert!(self.count > 0);
self.count -= 1;
self.count
}
}
impl fmt::Display for RefCount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.count)
}
}
impl fmt::Debug for RefCount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.count)
}
}
/// Expert: represents a single commit into an index as seen by the
/// {@link IndexDeletionPolicy} or {@link IndexReader}.
///
/// Changes to the content of an index are made visible
/// only after the writer who made that change commits by
/// writing a new segments file
/// (`segments_N</code`). This point in time, when the
/// action of writing of a new segments file to the directory
/// is completed, is an index commit.
///
/// Each index commit point has a unique segments file
/// associated with it. The segments file associated with a
/// later index commit point would have a larger N.
///
/// Holds details for each commit point. This class is also passed to
/// the deletion policy. Note: this class has a natural ordering that
/// is inconsistent with equals.
pub struct CommitPoint {
generation: i64,
segment_file_name: String,
files: HashSet<String>,
has_dv_updates: bool,
deleted: bool,
}
impl CommitPoint {
fn new(
generation: i64,
segment_file_name: String,
files: HashSet<String>,
has_dv_updates: bool,
) -> Self {
CommitPoint {
generation,
segment_file_name,
files,
has_dv_updates,
deleted: false,
}
}
/// Get the segments file (`segments_N`) associated with this commit point
pub fn segments_file_name(&self) -> &str {
&self.segment_file_name
}
/// Delete this commit point. This only applies when using
/// the commit point in the context of IndexWriter's
/// IndexDeletionPolicy.
///
/// Upon calling this, the writer is notified that this commit
/// point should be deleted.
///
/// Decision that a commit-point should be deleted is taken by the
/// `IndexDeletionPolicy` in effect and therefore this should only
/// be called by its `IndexDeletionPolicy#onInit on_init()` or
/// `IndexDeletionPolicy#onCommit on_commit()` methods.
pub fn delete(&mut self) -> Result<()> {
self.deleted = true;
Ok(())
}
pub fn has_dv_updates(&self) -> bool {
self.has_dv_updates
}
}
impl Ord for CommitPoint {
fn cmp(&self, other: &Self) -> Ordering {
self.generation.cmp(&other.generation)
}
}
impl PartialOrd for CommitPoint {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for CommitPoint {}
impl PartialEq for CommitPoint {
fn eq(&self, other: &Self) -> bool {
self.segment_file_name == other.segment_file_name && self.generation == other.generation
}
}
| {
let mut to_delete = HashSet::new();
for f in files {
if self.dec_ref(f) {
to_delete.insert(f.clone());
}
}
self.delete_files(&to_delete, true)
} | identifier_body |
index_file_deleter.rs | // Copyright 2019 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use core::codec::segment_infos::{
generation_from_segments_file_name, parse_generation, parse_segment_name, SegmentInfos,
CODEC_FILE_PATTERN, CODEC_UPDATE_DV_PATTERN, CODEC_UPDATE_FNM_PATTERN,
INDEX_FILE_OLD_SEGMENT_GEN, INDEX_FILE_PENDING_SEGMENTS, INDEX_FILE_SEGMENTS,
};
use core::codec::Codec;
use core::index::writer::KeepOnlyLastCommitDeletionPolicy;
use core::store::directory::{Directory, LockValidatingDirectoryWrapper};
use regex::Regex;
use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::mem;
use std::sync::{Arc, Mutex, RwLock};
use error::{ErrorKind, Result};
use std::time::{SystemTime, UNIX_EPOCH};
/// This class keeps track of each SegmentInfos instance that
/// is still "live", either because it corresponds to a
/// segments_N file in the Directory (a "commit", i.e. a
/// committed SegmentInfos) or because it's an in-memory
/// SegmentInfos that a writer is actively updating but has
/// not yet committed. This class uses simple reference
/// counting to map the live SegmentInfos instances to
/// individual files in the Directory.
///
/// The same directory file may be referenced by more than
/// one IndexCommit, i.e. more than one SegmentInfos.
/// Therefore we count how many commits reference each file.
/// When all the commits referencing a certain file have been
/// deleted, the refcount for that file becomes zero, and the
/// file is deleted.
///
/// A separate deletion policy interface
/// (IndexDeletionPolicy) is consulted on creation (onInit)
/// and once per commit (onCommit), to decide when a commit
/// should be removed.
///
/// It is the business of the IndexDeletionPolicy to choose
/// when to delete commit points. The actual mechanics of
/// file deletion, retrying, etc, derived from the deletion
/// of commit points is the business of the IndexFileDeleter.
///
/// The current default deletion policy is {@link
/// KeepOnlyLastCommitDeletionPolicy}, which removes all
/// prior commits when a new commit has completed. This
/// matches the behavior before 2.2.
///
/// Note that you must hold the write.lock before
/// instantiating this class. It opens segments_N file(s)
/// directly with no retry logic.
pub struct IndexFileDeleter<D: Directory> {
/// Reference count for all files in the index. Counts
/// how many existing commits reference a file.
ref_counts: Arc<RwLock<HashMap<String, RefCount>>>,
/// Holds all commits (segments_N) currently in the index.
/// this will have just 1 commit if you are using the default
/// delete policy (KeepOnlyLastCommitDeletionPolicy). Other policies
/// may leave commit points live for longer in which case this list
/// would be longer than 1.
commits: Vec<CommitPoint>,
/// Holds files we had inc_ref'd from the previous non-commit checkpoint:
last_files: HashSet<String>,
policy: KeepOnlyLastCommitDeletionPolicy,
delayed_dv_update_files: Arc<Mutex<Vec<(u64, Vec<String>)>>>,
dv_pattern: Regex,
fnm_pattern: Regex,
directory: Arc<LockValidatingDirectoryWrapper<D>>,
inited: bool,
}
impl<D: Directory> IndexFileDeleter<D> {
pub fn new(directory: Arc<LockValidatingDirectoryWrapper<D>>) -> Self {
IndexFileDeleter {
ref_counts: Arc::new(RwLock::new(HashMap::new())),
commits: vec![],
last_files: HashSet::new(),
policy: KeepOnlyLastCommitDeletionPolicy {},
delayed_dv_update_files: Arc::new(Mutex::new(Vec::new())),
dv_pattern: Regex::new(CODEC_UPDATE_DV_PATTERN).unwrap(),
fnm_pattern: Regex::new(CODEC_UPDATE_FNM_PATTERN).unwrap(),
directory,
inited: false,
}
}
pub fn init<C: Codec>(
&mut self,
directory_orig: Arc<D>,
files: &[String],
segment_infos: &mut SegmentInfos<D, C>,
initial_index_exists: bool,
) -> Result<bool> {
let mut current_commit_point_idx: Option<usize> = None;
if let Some(ref current_segments_file) = segment_infos.segment_file_name() {
let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap();
for filename in files {
if pattern.is_match(filename)
|| filename.starts_with(INDEX_FILE_SEGMENTS)
|| filename.starts_with(INDEX_FILE_PENDING_SEGMENTS)
{
// Add this file to ref_counts with initial count 0.
{
if !self.ref_counts.read()?.contains_key(filename) {
self.ref_counts
.write()?
.insert(filename.to_string(), RefCount::default());
}
}
if filename.starts_with(INDEX_FILE_SEGMENTS)
&& filename != INDEX_FILE_OLD_SEGMENT_GEN
{
// This is a commit (segments or segments_N), and
// it's valid (<= the max gen). Load it, then
// incref all files it refers to:
let sis: SegmentInfos<D, C> =
SegmentInfos::read_commit(&directory_orig, filename)?;
let commit_point = CommitPoint::new(
sis.generation,
sis.segment_file_name().unwrap_or("".to_string()),
sis.files(true),
sis.has_dv_updates(),
);
self.commits.push(commit_point);
if sis.generation == segment_infos.generation {
current_commit_point_idx = Some(self.commits.len() - 1);
}
self.inc_ref_files(&sis.files(true));
}
}
}
if current_commit_point_idx.is_none() && initial_index_exists {
// We did not in fact see the segments_N file
// corresponding to the segmentInfos that was passed
// in. Yet, it must exist, because our caller holds
// the write lock. This can happen when the directory
// listing was stale (eg when index accessed via NFS
// client with stale directory listing cache). So we
// try now to explicitly open this commit point:
let sis: SegmentInfos<D, C> =
SegmentInfos::read_commit(&directory_orig, current_segments_file)?;
let commit_point = CommitPoint::new(
sis.generation,
sis.segment_file_name().unwrap_or("".to_string()),
sis.files(true),
sis.has_dv_updates(),
);
self.commits.push(commit_point);
current_commit_point_idx = Some(self.commits.len() - 1);
self.inc_ref_files(&sis.files(true));
}
}
// We keep commits list in sorted order (oldest to newest):
self.commits.sort();
// refCounts only includes "normal" filenames (does not include write.lock)
{
let ref_counts = self.ref_counts.read()?;
let files: Vec<&str> = ref_counts.keys().map(|s| s.as_str()).collect();
Self::inflate_gens(segment_infos, files)?;
}
// Now delete anything with ref count at 0. These are
// presumably abandoned files eg due to crash of
// IndexWriter.
{
let mut to_delete = HashSet::new();
for (filename, rc) in &*self.ref_counts.read()? {
if rc.count == 0 {
// A segments_N file should never have ref count 0 on init
if filename.starts_with(INDEX_FILE_SEGMENTS) {
bail!(ErrorKind::IllegalState(format!(
"file '{}' has ref_count=0, shouldn't happen on init",
filename
)));
}
to_delete.insert(filename.clone());
}
}
self.delete_files(&to_delete, false)?;
}
// Finally, give policy a chance to remove things on
// startup:
{
let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len());
for i in &mut self.commits {
commits.push(i);
}
self.policy.on_init(commits)?;
}
// Always protect the incoming segmentInfos since
// sometime it may not be the most recent commit
self.checkpoint(segment_infos, false)?;
let mut starting_commit_deleted = false;
if let Some(idx) = current_commit_point_idx {
if self.commits[idx].deleted {
starting_commit_deleted = true;
}
}
self.delete_commits()?;
self.inited = true;
Ok(starting_commit_deleted)
}
/// Set all gens beyond what we currently see in the directory, to avoid double-write
/// in cases where the previous IndexWriter did not gracefully close/rollback (e.g.
/// os/machine crashed or lost power).
fn inflate_gens<C: Codec>(infos: &mut SegmentInfos<D, C>, files: Vec<&str>) -> Result<()> {
let mut max_segment_gen = i64::min_value();
let mut max_segment_name = i32::min_value();
// Confusingly, this is the union of live_docs, field infos, doc values
// (and maybe others, in the future) gens. This is somewhat messy,
// since it means DV updates will suddenly write to the next gen after
// live docs' gen, for example, but we don't have the APIs to ask the
// codec which file is which:
let mut max_per_segment_gen = HashMap::new();
for filename in files {
if filename == INDEX_FILE_OLD_SEGMENT_GEN {
// do nothing
} else if filename.starts_with(INDEX_FILE_SEGMENTS) {
// trash file: we have to handle this since we allow anything
// starting with 'segments' here
if let Ok(gen) = generation_from_segments_file_name(filename) {
max_segment_gen = max_segment_gen.max(gen);
}
} else if filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) {
// the first 8 bytes is "pending_", so the slice operation is safe
if let Ok(gen) = generation_from_segments_file_name(&filename[8..]) {
max_segment_gen = max_segment_gen.max(gen);
}
} else {
let segment_name = parse_segment_name(filename);
debug_assert!(segment_name.starts_with('_'));
if filename.to_lowercase().ends_with(".tmp") {
// A temp file: don't try to look at its gen
continue;
}
max_segment_name =
max_segment_name.max(i32::from_str_radix(&segment_name[1..], 36)?);
let mut cur_gen = max_per_segment_gen.get(segment_name).map_or(0, |x| *x);
if let Ok(gen) = parse_generation(filename) {
cur_gen = cur_gen.max(gen);
}
max_per_segment_gen.insert(segment_name.to_string(), cur_gen);
}
}
// Generation is advanced before write:
let next_write_gen = max_segment_gen.max(infos.generation);
infos.set_next_write_generation(next_write_gen)?;
if infos.counter < max_segment_name + 1 {
infos.counter = max_segment_name
}
for info in &mut infos.segments {
let gen = max_per_segment_gen[&info.info.name];
if info.next_write_del_gen() < gen + 1 {
info.set_next_write_del_gen(gen + 1);
}
if info.next_write_field_infos_gen() < gen + 1 {
info.set_next_write_field_infos_gen(gen + 1);
}
if info.next_write_doc_values_gen() < gen + 1 {
info.set_next_write_doc_values_gen(gen + 1);
}
}
Ok(())
}
/// For definition of "check point" see IndexWriter comments:
/// "Clarification: Check Points (and commits)".
///
/// Writer calls this when it has made a "consistent
/// change" to the index, meaning new files are written to
/// the index and the in-memory SegmentInfos have been
/// modified to point to those files.
///
/// This may or may not be a commit (segments_N may or may
/// not have been written).
///
/// We simply incref the files referenced by the new
/// SegmentInfos and decref the files we had previously
/// seen (if any).
///
/// If this is a commit, we also call the policy to give it
/// a chance to remove other commits. If any commits are
/// removed, we decref their files as well.
pub fn checkpoint<C: Codec>(
&mut self,
segment_infos: &SegmentInfos<D, C>,
is_commit: bool,
) -> Result<()> {
// incref the files:
self.inc_ref_files(&segment_infos.files(is_commit));
if is_commit {
// Append to our commits list:
let p = CommitPoint::new(
segment_infos.generation,
segment_infos.segment_file_name().unwrap_or("".to_string()),
segment_infos.files(true),
segment_infos.has_dv_updates(),
);
self.commits.push(p);
// Tell policy so it can remove commits:
{
let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len());
for i in &mut self.commits {
commits.push(i);
}
self.policy.on_commit(commits)?;
}
// DecRef file for commits that were deleted by the policy
self.delete_commits()
} else {
let res = self.dec_ref_files(&self.last_files);
self.last_files.clear();
res?;
// Save files so we can decr on next checkpoint/commit:
self.last_files.extend(segment_infos.files(false));
Ok(())
}
}
pub fn exists(&self, filename: &str) -> bool {
if !self.ref_counts.read().unwrap().contains_key(filename) {
false
} else {
self.ensure_ref_count(filename);
self.ref_counts.read().unwrap()[filename].count > 0
}
}
fn ensure_ref_count(&self, file_name: &str) {
let mut ref_counts = self.ref_counts.write().unwrap();
if !ref_counts.contains_key(file_name) {
ref_counts.insert(file_name.to_string(), RefCount::default());
}
}
pub fn inc_ref_files(&self, files: &HashSet<String>) {
for file in files {
self.ensure_ref_count(file);
self.ref_counts
.write()
.unwrap()
.get_mut(file)
.unwrap()
.inc_ref();
}
}
/// Decrefs all provided files, even on exception; throws first exception hit, if any.
pub fn dec_ref_files(&self, files: &HashSet<String>) -> Result<()> {
let mut to_delete = HashSet::new();
for f in files {
if self.dec_ref(f) {
to_delete.insert(f.clone());
}
}
self.delete_files(&to_delete, false)
}
fn _dec_ref_files_by_commit(&self, files: &HashSet<String>) -> Result<()> {
let mut to_delete = HashSet::new();
for f in files {
if self.dec_ref(f) {
to_delete.insert(f.clone());
}
}
self.delete_files(&to_delete, true)
}
pub fn dec_ref_files_no_error(&self, files: &HashSet<String>) {
if let Err(e) = self.dec_ref_files(files) {
warn!("dec_ref_files_no_error failed with '{:?}'", e);
}
}
/// Returns true if the file should now be deleted.
fn dec_ref(&self, filename: &str) -> bool {
self.ensure_ref_count(filename);
let mut ref_counts = self.ref_counts.write().unwrap();
if ref_counts.get_mut(filename).unwrap().dec_ref() == 0 {
// This file is no longer referenced by any past
// commit points nor by the in-memory SegmentInfos:
ref_counts.remove(filename);
true
} else {
false
}
}
/// Remove the CommitPoints in the commitsToDelete List by
/// DecRef'ing all files from each SegmentInfos.
fn delete_commits(&mut self) -> Result<()> {
let mut res = Ok(());
// First decref all files that had been referred to by
// the now-deleted commits:
for commit in &self.commits {
if commit.deleted {
res = self.dec_ref_files(&commit.files);
}
}
// NOTE: does nothing if not err
if res.is_err() {
return res;
}
// Now compact commits to remove deleted ones (preserving the sort):
let size = self.commits.len();
let mut read_from = 0;
let mut write_to = 0;
while read_from < size {
if !self.commits[read_from].deleted {
if write_to != read_from {
self.commits.swap(read_from, write_to);
}
write_to += 1;
}
read_from += 1;
}
self.commits.truncate(write_to);
Ok(())
}
fn delete_files(&self, files: &HashSet<String>, do_commit_filter: bool) -> Result<()> {
// We make two passes, first deleting any segments_N files, second
// deleting the rest. We do this so that if we throw exc or JVM
// crashes during deletions, even when not on Windows, we don't
// leave the index in an "apparently corrupt" state:
let mut copys = vec![];
for file in files {
copys.push(file);
if !file.starts_with(INDEX_FILE_SEGMENTS) {
continue;
}
self.delete_file(file)?;
}
if do_commit_filter {
self.filter_dv_update_files(&mut copys);
}
for file in copys {
if file.starts_with(INDEX_FILE_SEGMENTS) {
continue;
}
self.delete_file(file)?;
}
Ok(())
}
fn filter_dv_update_files(&self, candidates: &mut Vec<&String>) {
let dv_update_files: Vec<String> = candidates
.drain_filter(|f| -> bool {
self.fnm_pattern.is_match(f) || self.dv_pattern.is_match(f)
})
.map(|f| f.clone())
.collect();
let to_deletes: Vec<Vec<String>>;
{
let mut l = self.delayed_dv_update_files.lock();
let old_dv_update_files = l.as_mut().unwrap();
let tm_now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
to_deletes = old_dv_update_files
.drain_filter(|(x, _)| -> bool { *x < tm_now })
.map(|(_, y)| y)
.collect();
old_dv_update_files.push((tm_now + 60, dv_update_files));
}
for files in to_deletes {
for file in files {
self.delete_file(&file).unwrap_or(());
}
}
}
fn delete_file(&self, filename: &str) -> Result<()> {
// panic!("wrong deleted files");
self.directory.delete_file(filename)
}
/// Deletes the specified files, but only if they are new
/// (have not yes been incref'd).
pub fn delete_new_files(&self, files: &HashSet<String>) -> Result<()> {
let mut filtered = HashSet::with_capacity(files.len());
let ref_counts = self.ref_counts.read().unwrap();
for file in files {
// NOTE: it's very unusual yet possible for the
// refCount to be present and 0: it can happen if you
// open IW on a crashed index, and it removes a bunch
// of unref'd files, and then you add new docs / do
// merging, and it reuses that segment name.
// TestCrash.testCrashAfterReopen can hit this:
if !ref_counts.contains_key(file) || ref_counts[file].count == 0 {
filtered.insert(file.clone());
}
}
self.delete_files(&filtered, false)
}
/// Writer calls this when it has hit an error and had to
/// roll back, to tell us that there may now be
/// unreferenced files in the filesystem. So we re-list
/// the filesystem and delete such files. If segmentName
/// is non-null, we will only delete files corresponding to
/// that segment.
pub fn refresh(&mut self) -> Result<()> {
debug_assert!(self.inited);
let files = self.directory.list_all()?;
let mut to_delete = HashSet::new();
let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap();
for filename in &files {
if !self.ref_counts.read()?.contains_key(filename)
&& (pattern.is_match(filename)
|| filename.starts_with(INDEX_FILE_SEGMENTS)
|| filename.starts_with(INDEX_FILE_PENDING_SEGMENTS))
{
// Unreferenced file, so remove it
to_delete.insert(filename.clone());
}
}
self.delete_files(&to_delete, false)
}
pub fn close(&mut self) -> Result<()> {
if !self.last_files.is_empty() {
let files = mem::replace(&mut self.last_files, HashSet::new());
self.dec_ref_files(&files)?;
}
Ok(())
}
}
struct RefCount {
inited: bool,
count: u32,
}
impl Default for RefCount {
fn default() -> Self {
RefCount {
inited: false,
count: 0,
}
}
}
impl RefCount {
fn inc_ref(&mut self) -> u32 {
if !self.inited {
self.inited = true;
} else {
debug_assert!(self.count > 0);
}
self.count += 1;
self.count
}
fn dec_ref(&mut self) -> u32 {
debug_assert!(self.count > 0);
self.count -= 1;
self.count
}
}
impl fmt::Display for RefCount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.count)
}
}
impl fmt::Debug for RefCount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.count)
}
}
/// Expert: represents a single commit into an index as seen by the
/// {@link IndexDeletionPolicy} or {@link IndexReader}.
///
/// Changes to the content of an index are made visible
/// only after the writer who made that change commits by
/// writing a new segments file
/// (`segments_N</code`). This point in time, when the
/// action of writing of a new segments file to the directory
/// is completed, is an index commit.
///
/// Each index commit point has a unique segments file
/// associated with it. The segments file associated with a
/// later index commit point would have a larger N.
///
/// Holds details for each commit point. This class is also passed to
/// the deletion policy. Note: this class has a natural ordering that
/// is inconsistent with equals.
pub struct CommitPoint {
generation: i64,
segment_file_name: String,
files: HashSet<String>,
has_dv_updates: bool,
deleted: bool,
}
impl CommitPoint {
fn new(
generation: i64,
segment_file_name: String,
files: HashSet<String>,
has_dv_updates: bool,
) -> Self {
CommitPoint {
generation,
segment_file_name,
files,
has_dv_updates,
deleted: false,
}
}
/// Get the segments file (`segments_N`) associated with this commit point
pub fn segments_file_name(&self) -> &str {
&self.segment_file_name
}
/// Delete this commit point. This only applies when using
/// the commit point in the context of IndexWriter's
/// IndexDeletionPolicy.
///
/// Upon calling this, the writer is notified that this commit
/// point should be deleted.
///
/// Decision that a commit-point should be deleted is taken by the
/// `IndexDeletionPolicy` in effect and therefore this should only
/// be called by its `IndexDeletionPolicy#onInit on_init()` or
/// `IndexDeletionPolicy#onCommit on_commit()` methods.
pub fn | (&mut self) -> Result<()> {
self.deleted = true;
Ok(())
}
pub fn has_dv_updates(&self) -> bool {
self.has_dv_updates
}
}
impl Ord for CommitPoint {
fn cmp(&self, other: &Self) -> Ordering {
self.generation.cmp(&other.generation)
}
}
impl PartialOrd for CommitPoint {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for CommitPoint {}
impl PartialEq for CommitPoint {
fn eq(&self, other: &Self) -> bool {
self.segment_file_name == other.segment_file_name && self.generation == other.generation
}
}
| delete | identifier_name |
index_file_deleter.rs | // Copyright 2019 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use core::codec::segment_infos::{
generation_from_segments_file_name, parse_generation, parse_segment_name, SegmentInfos,
CODEC_FILE_PATTERN, CODEC_UPDATE_DV_PATTERN, CODEC_UPDATE_FNM_PATTERN,
INDEX_FILE_OLD_SEGMENT_GEN, INDEX_FILE_PENDING_SEGMENTS, INDEX_FILE_SEGMENTS,
};
use core::codec::Codec;
use core::index::writer::KeepOnlyLastCommitDeletionPolicy;
use core::store::directory::{Directory, LockValidatingDirectoryWrapper};
use regex::Regex;
use std::cmp::Ordering;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::mem;
use std::sync::{Arc, Mutex, RwLock};
use error::{ErrorKind, Result};
use std::time::{SystemTime, UNIX_EPOCH};
/// This class keeps track of each SegmentInfos instance that
/// is still "live", either because it corresponds to a
/// segments_N file in the Directory (a "commit", i.e. a
/// committed SegmentInfos) or because it's an in-memory
/// SegmentInfos that a writer is actively updating but has
/// not yet committed. This class uses simple reference
/// counting to map the live SegmentInfos instances to
/// individual files in the Directory.
///
/// The same directory file may be referenced by more than
/// one IndexCommit, i.e. more than one SegmentInfos.
/// Therefore we count how many commits reference each file.
/// When all the commits referencing a certain file have been
/// deleted, the refcount for that file becomes zero, and the
/// file is deleted.
///
/// A separate deletion policy interface
/// (IndexDeletionPolicy) is consulted on creation (onInit)
/// and once per commit (onCommit), to decide when a commit
/// should be removed.
///
/// It is the business of the IndexDeletionPolicy to choose
/// when to delete commit points. The actual mechanics of
/// file deletion, retrying, etc, derived from the deletion
/// of commit points is the business of the IndexFileDeleter.
///
/// The current default deletion policy is {@link
/// KeepOnlyLastCommitDeletionPolicy}, which removes all
/// prior commits when a new commit has completed. This
/// matches the behavior before 2.2.
///
/// Note that you must hold the write.lock before
/// instantiating this class. It opens segments_N file(s)
/// directly with no retry logic.
pub struct IndexFileDeleter<D: Directory> {
/// Reference count for all files in the index. Counts
/// how many existing commits reference a file.
ref_counts: Arc<RwLock<HashMap<String, RefCount>>>,
/// Holds all commits (segments_N) currently in the index.
/// this will have just 1 commit if you are using the default
/// delete policy (KeepOnlyLastCommitDeletionPolicy). Other policies
/// may leave commit points live for longer in which case this list
/// would be longer than 1.
commits: Vec<CommitPoint>,
/// Holds files we had inc_ref'd from the previous non-commit checkpoint:
last_files: HashSet<String>,
policy: KeepOnlyLastCommitDeletionPolicy,
delayed_dv_update_files: Arc<Mutex<Vec<(u64, Vec<String>)>>>,
dv_pattern: Regex,
fnm_pattern: Regex,
directory: Arc<LockValidatingDirectoryWrapper<D>>,
inited: bool,
}
impl<D: Directory> IndexFileDeleter<D> {
pub fn new(directory: Arc<LockValidatingDirectoryWrapper<D>>) -> Self {
IndexFileDeleter {
ref_counts: Arc::new(RwLock::new(HashMap::new())),
commits: vec![],
last_files: HashSet::new(),
policy: KeepOnlyLastCommitDeletionPolicy {},
delayed_dv_update_files: Arc::new(Mutex::new(Vec::new())),
dv_pattern: Regex::new(CODEC_UPDATE_DV_PATTERN).unwrap(),
fnm_pattern: Regex::new(CODEC_UPDATE_FNM_PATTERN).unwrap(),
directory,
inited: false,
}
}
pub fn init<C: Codec>(
&mut self,
directory_orig: Arc<D>,
files: &[String],
segment_infos: &mut SegmentInfos<D, C>,
initial_index_exists: bool,
) -> Result<bool> {
let mut current_commit_point_idx: Option<usize> = None;
if let Some(ref current_segments_file) = segment_infos.segment_file_name() {
let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap();
for filename in files {
if pattern.is_match(filename)
|| filename.starts_with(INDEX_FILE_SEGMENTS)
|| filename.starts_with(INDEX_FILE_PENDING_SEGMENTS)
{
// Add this file to ref_counts with initial count 0.
{
if !self.ref_counts.read()?.contains_key(filename) {
self.ref_counts
.write()?
.insert(filename.to_string(), RefCount::default());
}
}
if filename.starts_with(INDEX_FILE_SEGMENTS)
&& filename != INDEX_FILE_OLD_SEGMENT_GEN
{
// This is a commit (segments or segments_N), and
// it's valid (<= the max gen). Load it, then
// incref all files it refers to:
let sis: SegmentInfos<D, C> =
SegmentInfos::read_commit(&directory_orig, filename)?;
let commit_point = CommitPoint::new(
sis.generation,
sis.segment_file_name().unwrap_or("".to_string()),
sis.files(true),
sis.has_dv_updates(),
);
self.commits.push(commit_point);
if sis.generation == segment_infos.generation {
current_commit_point_idx = Some(self.commits.len() - 1);
}
self.inc_ref_files(&sis.files(true));
}
}
}
if current_commit_point_idx.is_none() && initial_index_exists {
// We did not in fact see the segments_N file
// corresponding to the segmentInfos that was passed
// in. Yet, it must exist, because our caller holds
// the write lock. This can happen when the directory
// listing was stale (eg when index accessed via NFS
// client with stale directory listing cache). So we
// try now to explicitly open this commit point:
let sis: SegmentInfos<D, C> =
SegmentInfos::read_commit(&directory_orig, current_segments_file)?;
let commit_point = CommitPoint::new(
sis.generation,
sis.segment_file_name().unwrap_or("".to_string()),
sis.files(true),
sis.has_dv_updates(),
);
self.commits.push(commit_point);
current_commit_point_idx = Some(self.commits.len() - 1);
self.inc_ref_files(&sis.files(true));
}
}
// We keep commits list in sorted order (oldest to newest):
self.commits.sort();
// refCounts only includes "normal" filenames (does not include write.lock)
{
let ref_counts = self.ref_counts.read()?;
let files: Vec<&str> = ref_counts.keys().map(|s| s.as_str()).collect();
Self::inflate_gens(segment_infos, files)?;
}
// Now delete anything with ref count at 0. These are
// presumably abandoned files eg due to crash of
// IndexWriter.
{
let mut to_delete = HashSet::new();
for (filename, rc) in &*self.ref_counts.read()? {
if rc.count == 0 {
// A segments_N file should never have ref count 0 on init
if filename.starts_with(INDEX_FILE_SEGMENTS) {
bail!(ErrorKind::IllegalState(format!(
"file '{}' has ref_count=0, shouldn't happen on init",
filename
)));
}
to_delete.insert(filename.clone());
}
}
self.delete_files(&to_delete, false)?;
}
// Finally, give policy a chance to remove things on
// startup:
{
let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len());
for i in &mut self.commits {
commits.push(i);
}
self.policy.on_init(commits)?;
}
// Always protect the incoming segmentInfos since
// sometime it may not be the most recent commit
self.checkpoint(segment_infos, false)?;
let mut starting_commit_deleted = false;
if let Some(idx) = current_commit_point_idx {
if self.commits[idx].deleted |
}
self.delete_commits()?;
self.inited = true;
Ok(starting_commit_deleted)
}
/// Set all gens beyond what we currently see in the directory, to avoid double-write
/// in cases where the previous IndexWriter did not gracefully close/rollback (e.g.
/// os/machine crashed or lost power).
fn inflate_gens<C: Codec>(infos: &mut SegmentInfos<D, C>, files: Vec<&str>) -> Result<()> {
let mut max_segment_gen = i64::min_value();
let mut max_segment_name = i32::min_value();
// Confusingly, this is the union of live_docs, field infos, doc values
// (and maybe others, in the future) gens. This is somewhat messy,
// since it means DV updates will suddenly write to the next gen after
// live docs' gen, for example, but we don't have the APIs to ask the
// codec which file is which:
let mut max_per_segment_gen = HashMap::new();
for filename in files {
if filename == INDEX_FILE_OLD_SEGMENT_GEN {
// do nothing
} else if filename.starts_with(INDEX_FILE_SEGMENTS) {
// trash file: we have to handle this since we allow anything
// starting with 'segments' here
if let Ok(gen) = generation_from_segments_file_name(filename) {
max_segment_gen = max_segment_gen.max(gen);
}
} else if filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) {
// the first 8 bytes is "pending_", so the slice operation is safe
if let Ok(gen) = generation_from_segments_file_name(&filename[8..]) {
max_segment_gen = max_segment_gen.max(gen);
}
} else {
let segment_name = parse_segment_name(filename);
debug_assert!(segment_name.starts_with('_'));
if filename.to_lowercase().ends_with(".tmp") {
// A temp file: don't try to look at its gen
continue;
}
max_segment_name =
max_segment_name.max(i32::from_str_radix(&segment_name[1..], 36)?);
let mut cur_gen = max_per_segment_gen.get(segment_name).map_or(0, |x| *x);
if let Ok(gen) = parse_generation(filename) {
cur_gen = cur_gen.max(gen);
}
max_per_segment_gen.insert(segment_name.to_string(), cur_gen);
}
}
// Generation is advanced before write:
let next_write_gen = max_segment_gen.max(infos.generation);
infos.set_next_write_generation(next_write_gen)?;
if infos.counter < max_segment_name + 1 {
infos.counter = max_segment_name
}
for info in &mut infos.segments {
let gen = max_per_segment_gen[&info.info.name];
if info.next_write_del_gen() < gen + 1 {
info.set_next_write_del_gen(gen + 1);
}
if info.next_write_field_infos_gen() < gen + 1 {
info.set_next_write_field_infos_gen(gen + 1);
}
if info.next_write_doc_values_gen() < gen + 1 {
info.set_next_write_doc_values_gen(gen + 1);
}
}
Ok(())
}
/// For definition of "check point" see IndexWriter comments:
/// "Clarification: Check Points (and commits)".
///
/// Writer calls this when it has made a "consistent
/// change" to the index, meaning new files are written to
/// the index and the in-memory SegmentInfos have been
/// modified to point to those files.
///
/// This may or may not be a commit (segments_N may or may
/// not have been written).
///
/// We simply incref the files referenced by the new
/// SegmentInfos and decref the files we had previously
/// seen (if any).
///
/// If this is a commit, we also call the policy to give it
/// a chance to remove other commits. If any commits are
/// removed, we decref their files as well.
pub fn checkpoint<C: Codec>(
&mut self,
segment_infos: &SegmentInfos<D, C>,
is_commit: bool,
) -> Result<()> {
// incref the files:
self.inc_ref_files(&segment_infos.files(is_commit));
if is_commit {
// Append to our commits list:
let p = CommitPoint::new(
segment_infos.generation,
segment_infos.segment_file_name().unwrap_or("".to_string()),
segment_infos.files(true),
segment_infos.has_dv_updates(),
);
self.commits.push(p);
// Tell policy so it can remove commits:
{
let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len());
for i in &mut self.commits {
commits.push(i);
}
self.policy.on_commit(commits)?;
}
// DecRef file for commits that were deleted by the policy
self.delete_commits()
} else {
let res = self.dec_ref_files(&self.last_files);
self.last_files.clear();
res?;
// Save files so we can decr on next checkpoint/commit:
self.last_files.extend(segment_infos.files(false));
Ok(())
}
}
pub fn exists(&self, filename: &str) -> bool {
if !self.ref_counts.read().unwrap().contains_key(filename) {
false
} else {
self.ensure_ref_count(filename);
self.ref_counts.read().unwrap()[filename].count > 0
}
}
fn ensure_ref_count(&self, file_name: &str) {
let mut ref_counts = self.ref_counts.write().unwrap();
if !ref_counts.contains_key(file_name) {
ref_counts.insert(file_name.to_string(), RefCount::default());
}
}
pub fn inc_ref_files(&self, files: &HashSet<String>) {
for file in files {
self.ensure_ref_count(file);
self.ref_counts
.write()
.unwrap()
.get_mut(file)
.unwrap()
.inc_ref();
}
}
/// Decrefs all provided files, even on exception; throws first exception hit, if any.
pub fn dec_ref_files(&self, files: &HashSet<String>) -> Result<()> {
let mut to_delete = HashSet::new();
for f in files {
if self.dec_ref(f) {
to_delete.insert(f.clone());
}
}
self.delete_files(&to_delete, false)
}
fn _dec_ref_files_by_commit(&self, files: &HashSet<String>) -> Result<()> {
let mut to_delete = HashSet::new();
for f in files {
if self.dec_ref(f) {
to_delete.insert(f.clone());
}
}
self.delete_files(&to_delete, true)
}
pub fn dec_ref_files_no_error(&self, files: &HashSet<String>) {
if let Err(e) = self.dec_ref_files(files) {
warn!("dec_ref_files_no_error failed with '{:?}'", e);
}
}
/// Returns true if the file should now be deleted.
fn dec_ref(&self, filename: &str) -> bool {
self.ensure_ref_count(filename);
let mut ref_counts = self.ref_counts.write().unwrap();
if ref_counts.get_mut(filename).unwrap().dec_ref() == 0 {
// This file is no longer referenced by any past
// commit points nor by the in-memory SegmentInfos:
ref_counts.remove(filename);
true
} else {
false
}
}
/// Remove the CommitPoints in the commitsToDelete List by
/// DecRef'ing all files from each SegmentInfos.
fn delete_commits(&mut self) -> Result<()> {
let mut res = Ok(());
// First decref all files that had been referred to by
// the now-deleted commits:
for commit in &self.commits {
if commit.deleted {
res = self.dec_ref_files(&commit.files);
}
}
// NOTE: does nothing if not err
if res.is_err() {
return res;
}
// Now compact commits to remove deleted ones (preserving the sort):
let size = self.commits.len();
let mut read_from = 0;
let mut write_to = 0;
while read_from < size {
if !self.commits[read_from].deleted {
if write_to != read_from {
self.commits.swap(read_from, write_to);
}
write_to += 1;
}
read_from += 1;
}
self.commits.truncate(write_to);
Ok(())
}
fn delete_files(&self, files: &HashSet<String>, do_commit_filter: bool) -> Result<()> {
// We make two passes, first deleting any segments_N files, second
// deleting the rest. We do this so that if we throw exc or JVM
// crashes during deletions, even when not on Windows, we don't
// leave the index in an "apparently corrupt" state:
let mut copys = vec![];
for file in files {
copys.push(file);
if !file.starts_with(INDEX_FILE_SEGMENTS) {
continue;
}
self.delete_file(file)?;
}
if do_commit_filter {
self.filter_dv_update_files(&mut copys);
}
for file in copys {
if file.starts_with(INDEX_FILE_SEGMENTS) {
continue;
}
self.delete_file(file)?;
}
Ok(())
}
fn filter_dv_update_files(&self, candidates: &mut Vec<&String>) {
let dv_update_files: Vec<String> = candidates
.drain_filter(|f| -> bool {
self.fnm_pattern.is_match(f) || self.dv_pattern.is_match(f)
})
.map(|f| f.clone())
.collect();
let to_deletes: Vec<Vec<String>>;
{
let mut l = self.delayed_dv_update_files.lock();
let old_dv_update_files = l.as_mut().unwrap();
let tm_now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
to_deletes = old_dv_update_files
.drain_filter(|(x, _)| -> bool { *x < tm_now })
.map(|(_, y)| y)
.collect();
old_dv_update_files.push((tm_now + 60, dv_update_files));
}
for files in to_deletes {
for file in files {
self.delete_file(&file).unwrap_or(());
}
}
}
fn delete_file(&self, filename: &str) -> Result<()> {
// panic!("wrong deleted files");
self.directory.delete_file(filename)
}
/// Deletes the specified files, but only if they are new
/// (have not yes been incref'd).
pub fn delete_new_files(&self, files: &HashSet<String>) -> Result<()> {
let mut filtered = HashSet::with_capacity(files.len());
let ref_counts = self.ref_counts.read().unwrap();
for file in files {
// NOTE: it's very unusual yet possible for the
// refCount to be present and 0: it can happen if you
// open IW on a crashed index, and it removes a bunch
// of unref'd files, and then you add new docs / do
// merging, and it reuses that segment name.
// TestCrash.testCrashAfterReopen can hit this:
if !ref_counts.contains_key(file) || ref_counts[file].count == 0 {
filtered.insert(file.clone());
}
}
self.delete_files(&filtered, false)
}
/// Writer calls this when it has hit an error and had to
/// roll back, to tell us that there may now be
/// unreferenced files in the filesystem. So we re-list
/// the filesystem and delete such files. If segmentName
/// is non-null, we will only delete files corresponding to
/// that segment.
pub fn refresh(&mut self) -> Result<()> {
debug_assert!(self.inited);
let files = self.directory.list_all()?;
let mut to_delete = HashSet::new();
let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap();
for filename in &files {
if !self.ref_counts.read()?.contains_key(filename)
&& (pattern.is_match(filename)
|| filename.starts_with(INDEX_FILE_SEGMENTS)
|| filename.starts_with(INDEX_FILE_PENDING_SEGMENTS))
{
// Unreferenced file, so remove it
to_delete.insert(filename.clone());
}
}
self.delete_files(&to_delete, false)
}
pub fn close(&mut self) -> Result<()> {
if !self.last_files.is_empty() {
let files = mem::replace(&mut self.last_files, HashSet::new());
self.dec_ref_files(&files)?;
}
Ok(())
}
}
struct RefCount {
inited: bool,
count: u32,
}
impl Default for RefCount {
fn default() -> Self {
RefCount {
inited: false,
count: 0,
}
}
}
impl RefCount {
fn inc_ref(&mut self) -> u32 {
if !self.inited {
self.inited = true;
} else {
debug_assert!(self.count > 0);
}
self.count += 1;
self.count
}
fn dec_ref(&mut self) -> u32 {
debug_assert!(self.count > 0);
self.count -= 1;
self.count
}
}
impl fmt::Display for RefCount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.count)
}
}
impl fmt::Debug for RefCount {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.count)
}
}
/// Expert: represents a single commit into an index as seen by the
/// {@link IndexDeletionPolicy} or {@link IndexReader}.
///
/// Changes to the content of an index are made visible
/// only after the writer who made that change commits by
/// writing a new segments file
/// (`segments_N</code`). This point in time, when the
/// action of writing of a new segments file to the directory
/// is completed, is an index commit.
///
/// Each index commit point has a unique segments file
/// associated with it. The segments file associated with a
/// later index commit point would have a larger N.
///
/// Holds details for each commit point. This class is also passed to
/// the deletion policy. Note: this class has a natural ordering that
/// is inconsistent with equals.
pub struct CommitPoint {
generation: i64,
segment_file_name: String,
files: HashSet<String>,
has_dv_updates: bool,
deleted: bool,
}
impl CommitPoint {
fn new(
generation: i64,
segment_file_name: String,
files: HashSet<String>,
has_dv_updates: bool,
) -> Self {
CommitPoint {
generation,
segment_file_name,
files,
has_dv_updates,
deleted: false,
}
}
/// Get the segments file (`segments_N`) associated with this commit point
pub fn segments_file_name(&self) -> &str {
&self.segment_file_name
}
/// Delete this commit point. This only applies when using
/// the commit point in the context of IndexWriter's
/// IndexDeletionPolicy.
///
/// Upon calling this, the writer is notified that this commit
/// point should be deleted.
///
/// Decision that a commit-point should be deleted is taken by the
/// `IndexDeletionPolicy` in effect and therefore this should only
/// be called by its `IndexDeletionPolicy#onInit on_init()` or
/// `IndexDeletionPolicy#onCommit on_commit()` methods.
pub fn delete(&mut self) -> Result<()> {
self.deleted = true;
Ok(())
}
pub fn has_dv_updates(&self) -> bool {
self.has_dv_updates
}
}
impl Ord for CommitPoint {
fn cmp(&self, other: &Self) -> Ordering {
self.generation.cmp(&other.generation)
}
}
impl PartialOrd for CommitPoint {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for CommitPoint {}
impl PartialEq for CommitPoint {
fn eq(&self, other: &Self) -> bool {
self.segment_file_name == other.segment_file_name && self.generation == other.generation
}
}
| {
starting_commit_deleted = true;
} | conditional_block |
goclass.go | // Copyright 2014 Elliott Stoneham and The TARDIS Go Authors
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package haxe
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
"unicode"
"github.com/tardisgo/tardisgo/pogo"
"golang.org/x/tools/go/exact"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/types"
)
// Start the main Go class in haxe
func (langType) GoClassStart() string {
// the code below makes the Go class globally visible in JS as window.Go in the browser or exports.Go in nodejs
//TODO consider how to make Go/Haxe libs available across all platforms
return `
#if js
@:expose("Go")
#end
class Go
{
public static function Platform():String { // codes returned the same as used by Haxe
#if flash
return "flash";
#elseif js
return "js";
#elseif cpp
return "cpp";
#elseif java
return "java";
#elseif cs
return "cs";
#elseif python
#error "SORRY: the python target is not yet ready for general use"
return "python";
#elseif php
return "php";
#elseif neko
return "neko";
#else
#error "Only the js, flash, cpp (C++), java, cs (C#), php, python and neko Haxe targets are supported as a Go platform"
#end
}
`
}
// end the main Go class
func (l langType) GoClassEnd(pkg *ssa.Package) string {
// init function
main := "public static var doneInit:Bool=false;\n" // flag to run this routine only once
main += "\npublic static function init() : Void {\ndoneInit=true;\nvar gr:Int=Scheduler.makeGoroutine();\n" // first goroutine number is always 0
main += `if(gr!=0) throw "non-zero goroutine number in init";` + "\n" // first goroutine number is always 0, NOTE using throw as panic not setup
main += "var _sfgr=new Go_haxegoruntime_init(gr,[]).run();\n" //haxegoruntime.init() NOTE can't use .hx() to call from Haxe as that would call this fn
main += `Go.haxegoruntime_ZZiLLen.store_uint32('字'.length);` // value required by haxegoruntime to know what type of strings we have
main += "while(_sfgr._incomplete) Scheduler.runAll();\n"
main += "var _sf=new Go_" + l.LangName(pkg.Object.Path(), "init") + `(gr,[]).run();` + "\n" //NOTE can't use .hx() to call from Haxe as that would call this fn
main += "while(_sf._incomplete) Scheduler.runAll();\n"
main += ""
main += "Scheduler.doneInit=true;\n"
main += "}\n"
// Haxe main function, only called in a go-only environment
main += "\npublic static function main() : Void {\n"
main += "Go_" + l.LangName(pkg.Object.Path(), "main") + `.hx();` + "\n"
main += "}\n"
pos := "public static function CPos(pos:Int):String {\nvar prefix:String=\"\";\n"
pos += fmt.Sprintf(`if (pos==%d) return "(pogo.NoPosHash)";`, pogo.NoPosHash) + "\n"
pos += "if (pos<0) { pos = -pos; prefix= \"near \";}\n"
for p := len(pogo.PosHashFileList) - 1; p >= 0; p-- {
if p != len(pogo.PosHashFileList)-1 {
pos += "else "
}
pos += fmt.Sprintf(`if(pos>%d) return prefix+"%s:"+Std.string(pos-%d);`,
pogo.PosHashFileList[p].BasePosHash,
strings.Replace(pogo.PosHashFileList[p].FileName, "\\", "\\\\", -1),
pogo.PosHashFileList[p].BasePosHash) + "\n"
}
pos += "else return \"(invalid pogo.PosHash:\"+Std.string(pos)+\")\";\n}\n"
if pogo.DebugFlag {
pos += "\npublic static function getStartCPos(s:String):Int {\n"
for p := len(pogo.PosHashFileList) - 1; p >= 0; p-- {
pos += "\t" + fmt.Sprintf(`if("%s".indexOf(s)!=-1) return %d;`,
strings.Replace(pogo.PosHashFileList[p].FileName, "\\", "\\\\", -1),
pogo.PosHashFileList[p].BasePosHash) + "\n"
}
pos += "\treturn -1;\n}\n"
pos += "\npublic static function getGlobal(s:String):String {\n"
globs := pogo.GlobalList()
for _, g := range globs {
goName := strings.Replace(g.Package+"."+g.Member, "\\", "\\\\", -1)
pos += "\t" + fmt.Sprintf(`if("%s".indexOf(s)!=-1) return "%s = "+%s.toString();`,
goName, goName, l.LangName(g.Package, g.Member)) + "\n"
}
pos += "\treturn \"Couldn't find global: \"+s;\n}\n"
}
return main + pos + "} // end Go class"
}
func haxeStringConst(sconst string, position string) string {
s, err := strconv.Unquote(sconst)
if err != nil {
pogo.LogError(position, "Haxe", errors.New(err.Error()+" : "+sconst))
return ""
}
ret0 := ""
hadEsc := false
for i := 0; i < len(s); i++ {
c := rune(s[i])
if unicode.IsPrint(c) && c < unicode.MaxASCII && c != '"' && c != '`' && c != '\\' && !hadEsc {
ret0 += string(c)
} else {
ret0 += fmt.Sprintf("\\x%02X", c)
hadEsc = true
}
}
ret0 = `"` + ret0 + `"`
ret := ``
compound := ""
hadStr := false
for i := 0; i < len(s); i++ {
c := rune(s[i])
if unicode.IsPrint(c) && c < unicode.MaxASCII && c != '"' && c != '`' && c != '\\' {
compound += string(c)
} else {
if hadStr {
ret += "+"
}
if compound != "" {
compound = `"` + compound + `"+`
}
ret += fmt.Sprintf("%sString.fromCharCode(%d)", compound, c)
compound = ""
hadStr = true
}
}
if hadStr {
if compound != "" {
ret += fmt.Sprintf("+\"%s\"", compound)
}
} else {
ret += fmt.Sprintf("\"%s\"", compound)
}
if ret0 == ret {
return ret
}
return ` #if (cpp || neko || php) ` + ret0 + ` #else ` + ret + " #end "
}
func constFloat64(lit ssa.Const, bits int, position string) string {
var f float64
var f32 float32
//sigBits := uint(53)
//if bits == 32 {
// sigBits = 24
//}
f, _ /*f64ok*/ = exact.Float64Val(lit.Value)
f32, _ /*f32ok*/ = exact.Float32Val(lit.Value)
if bits == 32 {
f = float64(f32)
}
haxeVal := pogo.FloatVal(lit.Value, bits, position)
switch {
case math.IsInf(f, +1):
haxeVal = "Math.POSITIVE_INFINITY"
case math.IsInf(f, -1):
haxeVal = "Math.NEGATIVE_INFINITY"
case math.IsNaN(f): // must come after infinity checks
haxeVal = "Math.NaN"
//case f == 0 && math.Signbit(f): // -0 is zero, but it has a -ve sign
// //println("DEBUG -0") // TODO this code never seems to get executed
// haxeVal = "({var f:Float=0; f*=-1; f;})"
default:
// there is a problem with haxe constant processing for some floats
// try to be as exact as the host can be ... but also concise
//if float64(int64(f)) != f { // not a simple integer
/*
frac, exp := math.Frexp(f)
intPart := int64(frac * float64(uint64(1)<<sigBits))
expPart := exp - int(sigBits)
if float64(intPart) == frac*float64(uint64(1)<<sigBits) &&
expPart >= -1022 && expPart <= 1023 {
//it is an integer in the correct range
haxeVal = fmt.Sprintf("(%d*Math.pow(2,%d))", intPart, expPart) // NOTE: need the Math.pow to avoid haxe constant folding
}
*/
/*
val := exact.MakeFloat64(frac)
num := exact.Num(val)
den := exact.Denom(val)
n64i, nok := exact.Int64Val(num)
d64i, dok := exact.Int64Val(den)
res := float64(n64i) * math.Pow(2, float64(exp)) / float64(d64i)
if !math.IsNaN(res) && !math.IsInf(res, +1) && !math.IsInf(res, -1) { //drop through
if nok && dok {
nh, nl := pogo.IntVal(num, position)
dh, dl := pogo.IntVal(den, position)
n := fmt.Sprintf("%d", nl)
if n64i < 0 {
n = "(" + n + ")"
}
if nh != 0 && nh != -1 {
n = fmt.Sprintf("GOint64.toFloat(Force.toInt64(GOint64.make(0x%x,0x%x)))", uint32(nh), uint32(nl))
}
if float64(d64i) == math.Pow(2, float64(exp)) {
haxeVal = n // divisor and multiplier the same
} else {
d := fmt.Sprintf("%d", dl)
if dh != 0 && dh != -1 {
d = fmt.Sprintf("GOint64.toFloat(Force.toInt64(GOint64.make(0x%x,0x%x)))", uint32(dh), uint32(dl))
}
if n64i == 1 {
n = "" // no point multiplying by 1
} else {
n = n + "*"
}
if d64i == 1 {
d = "" // no point in dividing by 1
} else {
d = "/" + d
}
haxeVal = fmt.Sprintf("(%sMath.pow(2,%d)%s)", n, exp, d) // NOTE: need the Math.pow to avoid haxe constant folding
}
}
}
*/
//}
}
return haxeVal
/*
bits64 := *(*uint64)(unsafe.Pointer(&f))
bitVal := exact.MakeUint64(bits64)
h, l := pogo.IntVal(bitVal, position)
bitStr := fmt.Sprintf("GOint64.make(0x%x,0x%x)", uint32(h), uint32(l))
return "Force.float64const(" + bitStr + "," + haxeVal + ")"
*/
}
func (langType) Const(lit ssa.Const, position string) (typ, val string) {
if lit.Value == nil {
return "Dynamic", "null"
}
lit.Name()
switch lit.Value.Kind() {
case exact.Bool:
return "Bool", lit.Value.String()
case exact.String:
// TODO check if conversion of some string constant declarations are required
switch lit.Type().Underlying().(type) {
case *types.Basic:
return "String", haxeStringConst(lit.Value.String(), position)
case *types.Slice:
return "Slice", "Force.toUTF8slice(this._goroutine," + haxeStringConst(lit.Value.String(), position) + ")"
default:
pogo.LogError(position, "Haxe", fmt.Errorf("haxe.Const() internal error, unknown string type"))
}
case exact.Float:
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Float32:
return "Float", constFloat64(lit, 32, position)
case types.Float64, types.UntypedFloat:
return "Float", constFloat64(lit, 64, position)
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 32, position))
case types.Complex128:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 64, position))
}
case exact.Int:
h, l := pogo.IntVal(lit.Value, position)
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Int64:
return "GOint64", fmt.Sprintf("Force.toInt64(GOint64.make(0x%x,0x%x))", uint32(h), uint32(l))
case types.Uint64:
return "GOint64", fmt.Sprintf("Force.toUint64(GOint64.make(0x%x,0x%x))", uint32(h), uint32(l))
case types.Float32:
return "Float", constFloat64(lit, 32, position)
case types.Float64, types.UntypedFloat:
return "Float", constFloat64(lit, 64, position)
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 32, position))
case types.Complex128:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 64, position))
default:
if h != 0 && h != -1 {
pogo.LogWarning(position, "Haxe", fmt.Errorf("integer constant value > 32 bits : %v", lit.Value))
}
ret := ""
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Uint, types.Uint32, types.Uintptr:
q := uint32(l)
ret = fmt.Sprintf(
" #if js untyped __js__(\"0x%x\") #elseif php untyped __php__(\"0x%x\") #else 0x%x #end ",
q, q, q)
case types.Uint16:
q := uint16(l)
ret = fmt.Sprintf(" 0x%x ", q)
case types.Uint8: // types.Byte
q := uint8(l)
ret = fmt.Sprintf(" 0x%x ", q)
case types.Int, types.Int32, types.UntypedRune, types.UntypedInt: // types.Rune
if l < 0 {
ret = fmt.Sprintf("(%d)", int32(l))
} else {
ret = fmt.Sprintf("%d", int32(l))
}
case types.Int16:
if l < 0 {
ret = fmt.Sprintf("(%d)", int16(l))
} else {
ret = fmt.Sprintf("%d", int16(l))
}
case types.Int8:
if l < 0 {
ret = fmt.Sprintf("(%d)", int8(l))
} else {
ret = fmt.Sprintf("%d", int8(l))
}
case types.UnsafePointer:
if l == 0 {
return "Pointer", "null"
}
pogo.LogError(position, "Haxe", fmt.Errorf("unsafe pointers cannot be initialized in TARDISgo/Haxe to a non-zero value: %v", l))
default:
panic("haxe.Const() unhandled integer constant for: " +
lit.Type().Underlying().(*types.Basic).String())
}
return "Int", ret
}
case exact.Unknown: // not sure we should ever get here!
return "Dynamic", "null"
case exact.Complex:
realV, _ := exact.Float64Val(exact.Real(lit.Value))
imagV, _ := exact.Float64Val(exact.Imag(lit.Value))
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%g,%g)", float32(realV), float32(imagV))
default:
return "Complex", fmt.Sprintf("new Complex(%g,%g)", realV, imagV)
}
}
pogo.LogError(position, "Haxe", fmt.Errorf("haxe.Const() internal error, unknown constant type: %v", lit.Value.Kind()))
return "", ""
}
// only public Literals are created here, so that they can be used by Haxe callers of the Go code
func (l langType) Na | ackageName, objectName string, lit ssa.Const, position string) string {
typ, rhs := l.Const(lit, position+":"+packageName+"."+objectName)
return fmt.Sprintf("public static var %s:%s = %s;%s",
l.LangName(packageName, objectName), typ, rhs, l.Comment(position))
}
func (l langType) Global(packageName, objectName string, glob ssa.Global, position string, isPublic bool) string {
pub := "public " // all globals have to be public in Haxe terms
//gTyp := glob.Type().Underlying().(*types.Pointer).Elem().Underlying() // globals are always pointers to an underlying element
/*
ptrTyp := "Pointer"
//ltDesc := "Dynamic" // these values suitable for *types.Struct
ltInit := "null"
switch gTyp.(type) {
case *types.Basic, *types.Pointer, *types.Interface, *types.Chan, *types.Map, *types.Signature:
ptrTyp = "Pointer"
//ltDesc = l.LangType(gTyp, false, position)
ltInit = l.LangType(gTyp, true, position)
case *types.Array:
ptrTyp = "Pointer"
//ltDesc = "Array<" + l.LangType(gTyp.(*types.Array).Elem().Underlying(), false, position) + ">"
ltInit = l.LangType(gTyp, true, position)
case *types.Slice:
ptrTyp = "Pointer"
//ltDesc = "Slice" // was: l.LangType(gTyp.(*types.Slice).Elem().Underlying(), false, position)
ltInit = l.LangType(gTyp, true, position)
case *types.Struct:
ptrTyp = "Pointer"
//ltDesc = "Dynamic" // TODO improve!
ltInit = l.LangType(gTyp, true, position)
}
init := "new " + ptrTyp + "(" + ltInit + ")" // initialize basic types only
*/
//return fmt.Sprintf("%sstatic %s %s",
// pub, haxeVar(l.LangName(packageName, objectName), ptrTyp, init, position, "Global()"),
// l.Comment(position))
obj := allocNewObject(glob.Type().Underlying().(*types.Pointer))
return fmt.Sprintf("%sstatic var %s:Pointer=new Pointer(%s); %s",
pub, l.LangName(packageName, objectName), obj, l.Comment(position))
}
| medConst(p | identifier_name |
goclass.go | // Copyright 2014 Elliott Stoneham and The TARDIS Go Authors
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package haxe
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
"unicode"
"github.com/tardisgo/tardisgo/pogo"
"golang.org/x/tools/go/exact"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/types"
)
// Start the main Go class in haxe
func (langType) GoClassStart() string {
// the code below makes the Go class globally visible in JS as window.Go in the browser or exports.Go in nodejs
//TODO consider how to make Go/Haxe libs available across all platforms
return `
#if js
@:expose("Go")
#end
class Go
{
public static function Platform():String { // codes returned the same as used by Haxe
#if flash
return "flash";
#elseif js
return "js";
#elseif cpp
return "cpp";
#elseif java
return "java";
#elseif cs
return "cs";
#elseif python
#error "SORRY: the python target is not yet ready for general use"
return "python";
#elseif php
return "php";
#elseif neko
return "neko";
#else
#error "Only the js, flash, cpp (C++), java, cs (C#), php, python and neko Haxe targets are supported as a Go platform"
#end
}
`
}
// end the main Go class
func (l langType) GoClassEnd(pkg *ssa.Package) string {
// init function
main := "public static var doneInit:Bool=false;\n" // flag to run this routine only once
main += "\npublic static function init() : Void {\ndoneInit=true;\nvar gr:Int=Scheduler.makeGoroutine();\n" // first goroutine number is always 0
main += `if(gr!=0) throw "non-zero goroutine number in init";` + "\n" // first goroutine number is always 0, NOTE using throw as panic not setup
main += "var _sfgr=new Go_haxegoruntime_init(gr,[]).run();\n" //haxegoruntime.init() NOTE can't use .hx() to call from Haxe as that would call this fn
main += `Go.haxegoruntime_ZZiLLen.store_uint32('字'.length);` // value required by haxegoruntime to know what type of strings we have
main += "while(_sfgr._incomplete) Scheduler.runAll();\n"
main += "var _sf=new Go_" + l.LangName(pkg.Object.Path(), "init") + `(gr,[]).run();` + "\n" //NOTE can't use .hx() to call from Haxe as that would call this fn
main += "while(_sf._incomplete) Scheduler.runAll();\n"
main += ""
main += "Scheduler.doneInit=true;\n"
main += "}\n"
// Haxe main function, only called in a go-only environment
main += "\npublic static function main() : Void {\n"
main += "Go_" + l.LangName(pkg.Object.Path(), "main") + `.hx();` + "\n"
main += "}\n"
pos := "public static function CPos(pos:Int):String {\nvar prefix:String=\"\";\n"
pos += fmt.Sprintf(`if (pos==%d) return "(pogo.NoPosHash)";`, pogo.NoPosHash) + "\n"
pos += "if (pos<0) { pos = -pos; prefix= \"near \";}\n"
for p := len(pogo.PosHashFileList) - 1; p >= 0; p-- {
if p != len(pogo.PosHashFileList)-1 {
| pos += fmt.Sprintf(`if(pos>%d) return prefix+"%s:"+Std.string(pos-%d);`,
pogo.PosHashFileList[p].BasePosHash,
strings.Replace(pogo.PosHashFileList[p].FileName, "\\", "\\\\", -1),
pogo.PosHashFileList[p].BasePosHash) + "\n"
}
pos += "else return \"(invalid pogo.PosHash:\"+Std.string(pos)+\")\";\n}\n"
if pogo.DebugFlag {
pos += "\npublic static function getStartCPos(s:String):Int {\n"
for p := len(pogo.PosHashFileList) - 1; p >= 0; p-- {
pos += "\t" + fmt.Sprintf(`if("%s".indexOf(s)!=-1) return %d;`,
strings.Replace(pogo.PosHashFileList[p].FileName, "\\", "\\\\", -1),
pogo.PosHashFileList[p].BasePosHash) + "\n"
}
pos += "\treturn -1;\n}\n"
pos += "\npublic static function getGlobal(s:String):String {\n"
globs := pogo.GlobalList()
for _, g := range globs {
goName := strings.Replace(g.Package+"."+g.Member, "\\", "\\\\", -1)
pos += "\t" + fmt.Sprintf(`if("%s".indexOf(s)!=-1) return "%s = "+%s.toString();`,
goName, goName, l.LangName(g.Package, g.Member)) + "\n"
}
pos += "\treturn \"Couldn't find global: \"+s;\n}\n"
}
return main + pos + "} // end Go class"
}
func haxeStringConst(sconst string, position string) string {
s, err := strconv.Unquote(sconst)
if err != nil {
pogo.LogError(position, "Haxe", errors.New(err.Error()+" : "+sconst))
return ""
}
ret0 := ""
hadEsc := false
for i := 0; i < len(s); i++ {
c := rune(s[i])
if unicode.IsPrint(c) && c < unicode.MaxASCII && c != '"' && c != '`' && c != '\\' && !hadEsc {
ret0 += string(c)
} else {
ret0 += fmt.Sprintf("\\x%02X", c)
hadEsc = true
}
}
ret0 = `"` + ret0 + `"`
ret := ``
compound := ""
hadStr := false
for i := 0; i < len(s); i++ {
c := rune(s[i])
if unicode.IsPrint(c) && c < unicode.MaxASCII && c != '"' && c != '`' && c != '\\' {
compound += string(c)
} else {
if hadStr {
ret += "+"
}
if compound != "" {
compound = `"` + compound + `"+`
}
ret += fmt.Sprintf("%sString.fromCharCode(%d)", compound, c)
compound = ""
hadStr = true
}
}
if hadStr {
if compound != "" {
ret += fmt.Sprintf("+\"%s\"", compound)
}
} else {
ret += fmt.Sprintf("\"%s\"", compound)
}
if ret0 == ret {
return ret
}
return ` #if (cpp || neko || php) ` + ret0 + ` #else ` + ret + " #end "
}
func constFloat64(lit ssa.Const, bits int, position string) string {
var f float64
var f32 float32
//sigBits := uint(53)
//if bits == 32 {
// sigBits = 24
//}
f, _ /*f64ok*/ = exact.Float64Val(lit.Value)
f32, _ /*f32ok*/ = exact.Float32Val(lit.Value)
if bits == 32 {
f = float64(f32)
}
haxeVal := pogo.FloatVal(lit.Value, bits, position)
switch {
case math.IsInf(f, +1):
haxeVal = "Math.POSITIVE_INFINITY"
case math.IsInf(f, -1):
haxeVal = "Math.NEGATIVE_INFINITY"
case math.IsNaN(f): // must come after infinity checks
haxeVal = "Math.NaN"
//case f == 0 && math.Signbit(f): // -0 is zero, but it has a -ve sign
// //println("DEBUG -0") // TODO this code never seems to get executed
// haxeVal = "({var f:Float=0; f*=-1; f;})"
default:
// there is a problem with haxe constant processing for some floats
// try to be as exact as the host can be ... but also concise
//if float64(int64(f)) != f { // not a simple integer
/*
frac, exp := math.Frexp(f)
intPart := int64(frac * float64(uint64(1)<<sigBits))
expPart := exp - int(sigBits)
if float64(intPart) == frac*float64(uint64(1)<<sigBits) &&
expPart >= -1022 && expPart <= 1023 {
//it is an integer in the correct range
haxeVal = fmt.Sprintf("(%d*Math.pow(2,%d))", intPart, expPart) // NOTE: need the Math.pow to avoid haxe constant folding
}
*/
/*
val := exact.MakeFloat64(frac)
num := exact.Num(val)
den := exact.Denom(val)
n64i, nok := exact.Int64Val(num)
d64i, dok := exact.Int64Val(den)
res := float64(n64i) * math.Pow(2, float64(exp)) / float64(d64i)
if !math.IsNaN(res) && !math.IsInf(res, +1) && !math.IsInf(res, -1) { //drop through
if nok && dok {
nh, nl := pogo.IntVal(num, position)
dh, dl := pogo.IntVal(den, position)
n := fmt.Sprintf("%d", nl)
if n64i < 0 {
n = "(" + n + ")"
}
if nh != 0 && nh != -1 {
n = fmt.Sprintf("GOint64.toFloat(Force.toInt64(GOint64.make(0x%x,0x%x)))", uint32(nh), uint32(nl))
}
if float64(d64i) == math.Pow(2, float64(exp)) {
haxeVal = n // divisor and multiplier the same
} else {
d := fmt.Sprintf("%d", dl)
if dh != 0 && dh != -1 {
d = fmt.Sprintf("GOint64.toFloat(Force.toInt64(GOint64.make(0x%x,0x%x)))", uint32(dh), uint32(dl))
}
if n64i == 1 {
n = "" // no point multiplying by 1
} else {
n = n + "*"
}
if d64i == 1 {
d = "" // no point in dividing by 1
} else {
d = "/" + d
}
haxeVal = fmt.Sprintf("(%sMath.pow(2,%d)%s)", n, exp, d) // NOTE: need the Math.pow to avoid haxe constant folding
}
}
}
*/
//}
}
return haxeVal
/*
bits64 := *(*uint64)(unsafe.Pointer(&f))
bitVal := exact.MakeUint64(bits64)
h, l := pogo.IntVal(bitVal, position)
bitStr := fmt.Sprintf("GOint64.make(0x%x,0x%x)", uint32(h), uint32(l))
return "Force.float64const(" + bitStr + "," + haxeVal + ")"
*/
}
func (langType) Const(lit ssa.Const, position string) (typ, val string) {
if lit.Value == nil {
return "Dynamic", "null"
}
lit.Name()
switch lit.Value.Kind() {
case exact.Bool:
return "Bool", lit.Value.String()
case exact.String:
// TODO check if conversion of some string constant declarations are required
switch lit.Type().Underlying().(type) {
case *types.Basic:
return "String", haxeStringConst(lit.Value.String(), position)
case *types.Slice:
return "Slice", "Force.toUTF8slice(this._goroutine," + haxeStringConst(lit.Value.String(), position) + ")"
default:
pogo.LogError(position, "Haxe", fmt.Errorf("haxe.Const() internal error, unknown string type"))
}
case exact.Float:
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Float32:
return "Float", constFloat64(lit, 32, position)
case types.Float64, types.UntypedFloat:
return "Float", constFloat64(lit, 64, position)
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 32, position))
case types.Complex128:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 64, position))
}
case exact.Int:
h, l := pogo.IntVal(lit.Value, position)
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Int64:
return "GOint64", fmt.Sprintf("Force.toInt64(GOint64.make(0x%x,0x%x))", uint32(h), uint32(l))
case types.Uint64:
return "GOint64", fmt.Sprintf("Force.toUint64(GOint64.make(0x%x,0x%x))", uint32(h), uint32(l))
case types.Float32:
return "Float", constFloat64(lit, 32, position)
case types.Float64, types.UntypedFloat:
return "Float", constFloat64(lit, 64, position)
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 32, position))
case types.Complex128:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 64, position))
default:
if h != 0 && h != -1 {
pogo.LogWarning(position, "Haxe", fmt.Errorf("integer constant value > 32 bits : %v", lit.Value))
}
ret := ""
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Uint, types.Uint32, types.Uintptr:
q := uint32(l)
ret = fmt.Sprintf(
" #if js untyped __js__(\"0x%x\") #elseif php untyped __php__(\"0x%x\") #else 0x%x #end ",
q, q, q)
case types.Uint16:
q := uint16(l)
ret = fmt.Sprintf(" 0x%x ", q)
case types.Uint8: // types.Byte
q := uint8(l)
ret = fmt.Sprintf(" 0x%x ", q)
case types.Int, types.Int32, types.UntypedRune, types.UntypedInt: // types.Rune
if l < 0 {
ret = fmt.Sprintf("(%d)", int32(l))
} else {
ret = fmt.Sprintf("%d", int32(l))
}
case types.Int16:
if l < 0 {
ret = fmt.Sprintf("(%d)", int16(l))
} else {
ret = fmt.Sprintf("%d", int16(l))
}
case types.Int8:
if l < 0 {
ret = fmt.Sprintf("(%d)", int8(l))
} else {
ret = fmt.Sprintf("%d", int8(l))
}
case types.UnsafePointer:
if l == 0 {
return "Pointer", "null"
}
pogo.LogError(position, "Haxe", fmt.Errorf("unsafe pointers cannot be initialized in TARDISgo/Haxe to a non-zero value: %v", l))
default:
panic("haxe.Const() unhandled integer constant for: " +
lit.Type().Underlying().(*types.Basic).String())
}
return "Int", ret
}
case exact.Unknown: // not sure we should ever get here!
return "Dynamic", "null"
case exact.Complex:
realV, _ := exact.Float64Val(exact.Real(lit.Value))
imagV, _ := exact.Float64Val(exact.Imag(lit.Value))
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%g,%g)", float32(realV), float32(imagV))
default:
return "Complex", fmt.Sprintf("new Complex(%g,%g)", realV, imagV)
}
}
pogo.LogError(position, "Haxe", fmt.Errorf("haxe.Const() internal error, unknown constant type: %v", lit.Value.Kind()))
return "", ""
}
// only public Literals are created here, so that they can be used by Haxe callers of the Go code
func (l langType) NamedConst(packageName, objectName string, lit ssa.Const, position string) string {
typ, rhs := l.Const(lit, position+":"+packageName+"."+objectName)
return fmt.Sprintf("public static var %s:%s = %s;%s",
l.LangName(packageName, objectName), typ, rhs, l.Comment(position))
}
func (l langType) Global(packageName, objectName string, glob ssa.Global, position string, isPublic bool) string {
pub := "public " // all globals have to be public in Haxe terms
//gTyp := glob.Type().Underlying().(*types.Pointer).Elem().Underlying() // globals are always pointers to an underlying element
/*
ptrTyp := "Pointer"
//ltDesc := "Dynamic" // these values suitable for *types.Struct
ltInit := "null"
switch gTyp.(type) {
case *types.Basic, *types.Pointer, *types.Interface, *types.Chan, *types.Map, *types.Signature:
ptrTyp = "Pointer"
//ltDesc = l.LangType(gTyp, false, position)
ltInit = l.LangType(gTyp, true, position)
case *types.Array:
ptrTyp = "Pointer"
//ltDesc = "Array<" + l.LangType(gTyp.(*types.Array).Elem().Underlying(), false, position) + ">"
ltInit = l.LangType(gTyp, true, position)
case *types.Slice:
ptrTyp = "Pointer"
//ltDesc = "Slice" // was: l.LangType(gTyp.(*types.Slice).Elem().Underlying(), false, position)
ltInit = l.LangType(gTyp, true, position)
case *types.Struct:
ptrTyp = "Pointer"
//ltDesc = "Dynamic" // TODO improve!
ltInit = l.LangType(gTyp, true, position)
}
init := "new " + ptrTyp + "(" + ltInit + ")" // initialize basic types only
*/
//return fmt.Sprintf("%sstatic %s %s",
// pub, haxeVar(l.LangName(packageName, objectName), ptrTyp, init, position, "Global()"),
// l.Comment(position))
obj := allocNewObject(glob.Type().Underlying().(*types.Pointer))
return fmt.Sprintf("%sstatic var %s:Pointer=new Pointer(%s); %s",
pub, l.LangName(packageName, objectName), obj, l.Comment(position))
}
| pos += "else "
}
| conditional_block |
goclass.go | // Copyright 2014 Elliott Stoneham and The TARDIS Go Authors
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package haxe
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
"unicode"
"github.com/tardisgo/tardisgo/pogo"
"golang.org/x/tools/go/exact"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/types"
)
// Start the main Go class in haxe
func (langType) GoClassStart() string {
// the code below makes the Go class globally visible in JS as window.Go in the browser or exports.Go in nodejs
//TODO consider how to make Go/Haxe libs available across all platforms
return `
#if js
@:expose("Go")
#end
class Go
{
public static function Platform():String { // codes returned the same as used by Haxe
#if flash
return "flash";
#elseif js
return "js";
#elseif cpp
return "cpp";
#elseif java
return "java";
#elseif cs
return "cs";
#elseif python
#error "SORRY: the python target is not yet ready for general use"
return "python";
#elseif php
return "php";
#elseif neko
return "neko";
#else
#error "Only the js, flash, cpp (C++), java, cs (C#), php, python and neko Haxe targets are supported as a Go platform"
#end
}
`
}
// end the main Go class
func (l langType) GoClassEnd(pkg *ssa.Package) string {
// init function
main := "public static var doneInit:Bool=false;\n" // flag to run this routine only once
main += "\npublic static function init() : Void {\ndoneInit=true;\nvar gr:Int=Scheduler.makeGoroutine();\n" // first goroutine number is always 0
main += `if(gr!=0) throw "non-zero goroutine number in init";` + "\n" // first goroutine number is always 0, NOTE using throw as panic not setup
main += "var _sfgr=new Go_haxegoruntime_init(gr,[]).run();\n" //haxegoruntime.init() NOTE can't use .hx() to call from Haxe as that would call this fn
main += `Go.haxegoruntime_ZZiLLen.store_uint32('字'.length);` // value required by haxegoruntime to know what type of strings we have
main += "while(_sfgr._incomplete) Scheduler.runAll();\n"
main += "var _sf=new Go_" + l.LangName(pkg.Object.Path(), "init") + `(gr,[]).run();` + "\n" //NOTE can't use .hx() to call from Haxe as that would call this fn
main += "while(_sf._incomplete) Scheduler.runAll();\n"
main += ""
main += "Scheduler.doneInit=true;\n"
main += "}\n"
// Haxe main function, only called in a go-only environment
main += "\npublic static function main() : Void {\n"
main += "Go_" + l.LangName(pkg.Object.Path(), "main") + `.hx();` + "\n"
main += "}\n"
pos := "public static function CPos(pos:Int):String {\nvar prefix:String=\"\";\n"
pos += fmt.Sprintf(`if (pos==%d) return "(pogo.NoPosHash)";`, pogo.NoPosHash) + "\n"
pos += "if (pos<0) { pos = -pos; prefix= \"near \";}\n"
for p := len(pogo.PosHashFileList) - 1; p >= 0; p-- {
if p != len(pogo.PosHashFileList)-1 {
pos += "else "
}
pos += fmt.Sprintf(`if(pos>%d) return prefix+"%s:"+Std.string(pos-%d);`,
pogo.PosHashFileList[p].BasePosHash,
strings.Replace(pogo.PosHashFileList[p].FileName, "\\", "\\\\", -1),
pogo.PosHashFileList[p].BasePosHash) + "\n"
}
pos += "else return \"(invalid pogo.PosHash:\"+Std.string(pos)+\")\";\n}\n"
if pogo.DebugFlag {
pos += "\npublic static function getStartCPos(s:String):Int {\n"
for p := len(pogo.PosHashFileList) - 1; p >= 0; p-- {
pos += "\t" + fmt.Sprintf(`if("%s".indexOf(s)!=-1) return %d;`,
strings.Replace(pogo.PosHashFileList[p].FileName, "\\", "\\\\", -1),
pogo.PosHashFileList[p].BasePosHash) + "\n"
}
pos += "\treturn -1;\n}\n"
pos += "\npublic static function getGlobal(s:String):String {\n"
globs := pogo.GlobalList()
for _, g := range globs {
goName := strings.Replace(g.Package+"."+g.Member, "\\", "\\\\", -1)
pos += "\t" + fmt.Sprintf(`if("%s".indexOf(s)!=-1) return "%s = "+%s.toString();`,
goName, goName, l.LangName(g.Package, g.Member)) + "\n"
}
pos += "\treturn \"Couldn't find global: \"+s;\n}\n"
}
return main + pos + "} // end Go class"
}
func haxeStringConst(sconst string, position string) string {
s, err := strconv.Unquote(sconst)
if err != nil {
pogo.LogError(position, "Haxe", errors.New(err.Error()+" : "+sconst))
return ""
}
ret0 := ""
hadEsc := false
for i := 0; i < len(s); i++ {
c := rune(s[i])
if unicode.IsPrint(c) && c < unicode.MaxASCII && c != '"' && c != '`' && c != '\\' && !hadEsc {
ret0 += string(c)
} else {
ret0 += fmt.Sprintf("\\x%02X", c)
hadEsc = true
}
}
ret0 = `"` + ret0 + `"`
ret := ``
compound := ""
hadStr := false
for i := 0; i < len(s); i++ {
c := rune(s[i])
if unicode.IsPrint(c) && c < unicode.MaxASCII && c != '"' && c != '`' && c != '\\' {
compound += string(c)
} else {
if hadStr {
ret += "+"
}
if compound != "" {
compound = `"` + compound + `"+`
}
ret += fmt.Sprintf("%sString.fromCharCode(%d)", compound, c)
compound = ""
hadStr = true
}
}
if hadStr {
if compound != "" {
ret += fmt.Sprintf("+\"%s\"", compound)
}
} else {
ret += fmt.Sprintf("\"%s\"", compound)
}
if ret0 == ret {
return ret
}
return ` #if (cpp || neko || php) ` + ret0 + ` #else ` + ret + " #end "
}
func constFloat64(lit ssa.Const, bits int, position string) string {
var f float64
var f32 float32
//sigBits := uint(53)
//if bits == 32 {
// sigBits = 24
//}
f, _ /*f64ok*/ = exact.Float64Val(lit.Value)
f32, _ /*f32ok*/ = exact.Float32Val(lit.Value)
if bits == 32 {
f = float64(f32)
}
haxeVal := pogo.FloatVal(lit.Value, bits, position)
switch {
case math.IsInf(f, +1):
haxeVal = "Math.POSITIVE_INFINITY"
case math.IsInf(f, -1):
haxeVal = "Math.NEGATIVE_INFINITY"
case math.IsNaN(f): // must come after infinity checks
haxeVal = "Math.NaN"
//case f == 0 && math.Signbit(f): // -0 is zero, but it has a -ve sign
// //println("DEBUG -0") // TODO this code never seems to get executed
// haxeVal = "({var f:Float=0; f*=-1; f;})"
default:
// there is a problem with haxe constant processing for some floats
// try to be as exact as the host can be ... but also concise
//if float64(int64(f)) != f { // not a simple integer
/*
frac, exp := math.Frexp(f)
intPart := int64(frac * float64(uint64(1)<<sigBits))
expPart := exp - int(sigBits)
if float64(intPart) == frac*float64(uint64(1)<<sigBits) &&
expPart >= -1022 && expPart <= 1023 {
//it is an integer in the correct range
haxeVal = fmt.Sprintf("(%d*Math.pow(2,%d))", intPart, expPart) // NOTE: need the Math.pow to avoid haxe constant folding
}
*/
/*
val := exact.MakeFloat64(frac)
num := exact.Num(val)
den := exact.Denom(val)
n64i, nok := exact.Int64Val(num)
d64i, dok := exact.Int64Val(den)
res := float64(n64i) * math.Pow(2, float64(exp)) / float64(d64i)
if !math.IsNaN(res) && !math.IsInf(res, +1) && !math.IsInf(res, -1) { //drop through
if nok && dok {
nh, nl := pogo.IntVal(num, position)
dh, dl := pogo.IntVal(den, position)
n := fmt.Sprintf("%d", nl)
if n64i < 0 {
n = "(" + n + ")"
}
if nh != 0 && nh != -1 {
n = fmt.Sprintf("GOint64.toFloat(Force.toInt64(GOint64.make(0x%x,0x%x)))", uint32(nh), uint32(nl))
}
if float64(d64i) == math.Pow(2, float64(exp)) {
haxeVal = n // divisor and multiplier the same
} else {
d := fmt.Sprintf("%d", dl)
if dh != 0 && dh != -1 {
d = fmt.Sprintf("GOint64.toFloat(Force.toInt64(GOint64.make(0x%x,0x%x)))", uint32(dh), uint32(dl))
}
if n64i == 1 {
n = "" // no point multiplying by 1
} else {
n = n + "*"
}
if d64i == 1 {
d = "" // no point in dividing by 1
} else {
d = "/" + d
}
haxeVal = fmt.Sprintf("(%sMath.pow(2,%d)%s)", n, exp, d) // NOTE: need the Math.pow to avoid haxe constant folding
}
}
}
*/
//}
}
return haxeVal
/*
bits64 := *(*uint64)(unsafe.Pointer(&f))
bitVal := exact.MakeUint64(bits64)
h, l := pogo.IntVal(bitVal, position)
bitStr := fmt.Sprintf("GOint64.make(0x%x,0x%x)", uint32(h), uint32(l))
return "Force.float64const(" + bitStr + "," + haxeVal + ")"
*/
}
func (langType) Const(lit ssa.Const, position string) (typ, val string) {
if lit.Value == nil {
return "Dynamic", "null"
}
lit.Name()
switch lit.Value.Kind() {
case exact.Bool:
return "Bool", lit.Value.String()
case exact.String:
// TODO check if conversion of some string constant declarations are required
switch lit.Type().Underlying().(type) {
case *types.Basic:
return "String", haxeStringConst(lit.Value.String(), position)
case *types.Slice:
return "Slice", "Force.toUTF8slice(this._goroutine," + haxeStringConst(lit.Value.String(), position) + ")"
default:
pogo.LogError(position, "Haxe", fmt.Errorf("haxe.Const() internal error, unknown string type"))
}
case exact.Float:
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Float32:
return "Float", constFloat64(lit, 32, position)
case types.Float64, types.UntypedFloat:
return "Float", constFloat64(lit, 64, position)
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 32, position))
case types.Complex128:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 64, position))
}
case exact.Int:
h, l := pogo.IntVal(lit.Value, position)
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Int64:
return "GOint64", fmt.Sprintf("Force.toInt64(GOint64.make(0x%x,0x%x))", uint32(h), uint32(l))
case types.Uint64:
return "GOint64", fmt.Sprintf("Force.toUint64(GOint64.make(0x%x,0x%x))", uint32(h), uint32(l))
case types.Float32:
return "Float", constFloat64(lit, 32, position)
case types.Float64, types.UntypedFloat:
return "Float", constFloat64(lit, 64, position)
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 32, position))
case types.Complex128:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 64, position))
default:
if h != 0 && h != -1 {
pogo.LogWarning(position, "Haxe", fmt.Errorf("integer constant value > 32 bits : %v", lit.Value))
}
ret := ""
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Uint, types.Uint32, types.Uintptr:
q := uint32(l)
ret = fmt.Sprintf(
" #if js untyped __js__(\"0x%x\") #elseif php untyped __php__(\"0x%x\") #else 0x%x #end ",
q, q, q)
case types.Uint16:
q := uint16(l)
ret = fmt.Sprintf(" 0x%x ", q)
case types.Uint8: // types.Byte
q := uint8(l)
ret = fmt.Sprintf(" 0x%x ", q)
case types.Int, types.Int32, types.UntypedRune, types.UntypedInt: // types.Rune
if l < 0 {
ret = fmt.Sprintf("(%d)", int32(l))
} else {
ret = fmt.Sprintf("%d", int32(l))
}
case types.Int16:
if l < 0 {
ret = fmt.Sprintf("(%d)", int16(l))
} else {
ret = fmt.Sprintf("%d", int16(l))
}
case types.Int8:
if l < 0 {
ret = fmt.Sprintf("(%d)", int8(l))
} else {
ret = fmt.Sprintf("%d", int8(l))
}
case types.UnsafePointer:
if l == 0 {
return "Pointer", "null"
}
pogo.LogError(position, "Haxe", fmt.Errorf("unsafe pointers cannot be initialized in TARDISgo/Haxe to a non-zero value: %v", l))
default:
panic("haxe.Const() unhandled integer constant for: " +
lit.Type().Underlying().(*types.Basic).String())
}
return "Int", ret
}
case exact.Unknown: // not sure we should ever get here!
return "Dynamic", "null"
case exact.Complex:
realV, _ := exact.Float64Val(exact.Real(lit.Value))
imagV, _ := exact.Float64Val(exact.Imag(lit.Value))
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%g,%g)", float32(realV), float32(imagV))
default:
return "Complex", fmt.Sprintf("new Complex(%g,%g)", realV, imagV)
}
}
pogo.LogError(position, "Haxe", fmt.Errorf("haxe.Const() internal error, unknown constant type: %v", lit.Value.Kind()))
return "", ""
}
// only public Literals are created here, so that they can be used by Haxe callers of the Go code
func (l langType) NamedConst(packageName, objectName string, lit ssa.Const, position string) string {
typ, rhs := l.Const(lit, position+":"+packageName+"."+objectName)
return fmt.Sprintf("public static var %s:%s = %s;%s",
l.LangName(packageName, objectName), typ, rhs, l.Comment(position))
}
func (l langType) Global(packageName, objectName string, glob ssa.Global, position string, isPublic bool) string {
| pub := "public " // all globals have to be public in Haxe terms
//gTyp := glob.Type().Underlying().(*types.Pointer).Elem().Underlying() // globals are always pointers to an underlying element
/*
ptrTyp := "Pointer"
//ltDesc := "Dynamic" // these values suitable for *types.Struct
ltInit := "null"
switch gTyp.(type) {
case *types.Basic, *types.Pointer, *types.Interface, *types.Chan, *types.Map, *types.Signature:
ptrTyp = "Pointer"
//ltDesc = l.LangType(gTyp, false, position)
ltInit = l.LangType(gTyp, true, position)
case *types.Array:
ptrTyp = "Pointer"
//ltDesc = "Array<" + l.LangType(gTyp.(*types.Array).Elem().Underlying(), false, position) + ">"
ltInit = l.LangType(gTyp, true, position)
case *types.Slice:
ptrTyp = "Pointer"
//ltDesc = "Slice" // was: l.LangType(gTyp.(*types.Slice).Elem().Underlying(), false, position)
ltInit = l.LangType(gTyp, true, position)
case *types.Struct:
ptrTyp = "Pointer"
//ltDesc = "Dynamic" // TODO improve!
ltInit = l.LangType(gTyp, true, position)
}
init := "new " + ptrTyp + "(" + ltInit + ")" // initialize basic types only
*/
//return fmt.Sprintf("%sstatic %s %s",
// pub, haxeVar(l.LangName(packageName, objectName), ptrTyp, init, position, "Global()"),
// l.Comment(position))
obj := allocNewObject(glob.Type().Underlying().(*types.Pointer))
return fmt.Sprintf("%sstatic var %s:Pointer=new Pointer(%s); %s",
pub, l.LangName(packageName, objectName), obj, l.Comment(position))
}
| identifier_body | |
goclass.go | // Copyright 2014 Elliott Stoneham and The TARDIS Go Authors
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package haxe
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
"unicode"
"github.com/tardisgo/tardisgo/pogo"
"golang.org/x/tools/go/exact"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/types"
)
// Start the main Go class in haxe
func (langType) GoClassStart() string {
// the code below makes the Go class globally visible in JS as window.Go in the browser or exports.Go in nodejs
//TODO consider how to make Go/Haxe libs available across all platforms
return `
#if js
@:expose("Go")
#end
class Go
{
public static function Platform():String { // codes returned the same as used by Haxe
#if flash
return "flash";
#elseif js
return "js";
#elseif cpp
return "cpp";
#elseif java
return "java";
#elseif cs
return "cs";
#elseif python
#error "SORRY: the python target is not yet ready for general use"
return "python";
#elseif php
return "php";
#elseif neko
return "neko";
#else
#error "Only the js, flash, cpp (C++), java, cs (C#), php, python and neko Haxe targets are supported as a Go platform"
#end
}
`
}
// end the main Go class
func (l langType) GoClassEnd(pkg *ssa.Package) string {
// init function
main := "public static var doneInit:Bool=false;\n" // flag to run this routine only once
main += "\npublic static function init() : Void {\ndoneInit=true;\nvar gr:Int=Scheduler.makeGoroutine();\n" // first goroutine number is always 0
main += `if(gr!=0) throw "non-zero goroutine number in init";` + "\n" // first goroutine number is always 0, NOTE using throw as panic not setup
main += "var _sfgr=new Go_haxegoruntime_init(gr,[]).run();\n" //haxegoruntime.init() NOTE can't use .hx() to call from Haxe as that would call this fn
main += `Go.haxegoruntime_ZZiLLen.store_uint32('字'.length);` // value required by haxegoruntime to know what type of strings we have
main += "while(_sfgr._incomplete) Scheduler.runAll();\n"
main += "var _sf=new Go_" + l.LangName(pkg.Object.Path(), "init") + `(gr,[]).run();` + "\n" //NOTE can't use .hx() to call from Haxe as that would call this fn
main += "while(_sf._incomplete) Scheduler.runAll();\n"
main += ""
main += "Scheduler.doneInit=true;\n"
main += "}\n"
// Haxe main function, only called in a go-only environment
main += "\npublic static function main() : Void {\n"
main += "Go_" + l.LangName(pkg.Object.Path(), "main") + `.hx();` + "\n"
main += "}\n"
pos := "public static function CPos(pos:Int):String {\nvar prefix:String=\"\";\n"
pos += fmt.Sprintf(`if (pos==%d) return "(pogo.NoPosHash)";`, pogo.NoPosHash) + "\n"
pos += "if (pos<0) { pos = -pos; prefix= \"near \";}\n"
for p := len(pogo.PosHashFileList) - 1; p >= 0; p-- {
if p != len(pogo.PosHashFileList)-1 {
pos += "else "
}
pos += fmt.Sprintf(`if(pos>%d) return prefix+"%s:"+Std.string(pos-%d);`,
pogo.PosHashFileList[p].BasePosHash,
strings.Replace(pogo.PosHashFileList[p].FileName, "\\", "\\\\", -1),
pogo.PosHashFileList[p].BasePosHash) + "\n"
}
pos += "else return \"(invalid pogo.PosHash:\"+Std.string(pos)+\")\";\n}\n"
if pogo.DebugFlag {
pos += "\npublic static function getStartCPos(s:String):Int {\n"
for p := len(pogo.PosHashFileList) - 1; p >= 0; p-- {
pos += "\t" + fmt.Sprintf(`if("%s".indexOf(s)!=-1) return %d;`,
strings.Replace(pogo.PosHashFileList[p].FileName, "\\", "\\\\", -1),
pogo.PosHashFileList[p].BasePosHash) + "\n"
}
pos += "\treturn -1;\n}\n"
pos += "\npublic static function getGlobal(s:String):String {\n"
globs := pogo.GlobalList()
for _, g := range globs {
goName := strings.Replace(g.Package+"."+g.Member, "\\", "\\\\", -1)
pos += "\t" + fmt.Sprintf(`if("%s".indexOf(s)!=-1) return "%s = "+%s.toString();`,
goName, goName, l.LangName(g.Package, g.Member)) + "\n"
}
pos += "\treturn \"Couldn't find global: \"+s;\n}\n"
}
return main + pos + "} // end Go class"
}
func haxeStringConst(sconst string, position string) string {
s, err := strconv.Unquote(sconst)
if err != nil {
pogo.LogError(position, "Haxe", errors.New(err.Error()+" : "+sconst))
return ""
}
ret0 := ""
hadEsc := false
for i := 0; i < len(s); i++ {
c := rune(s[i])
if unicode.IsPrint(c) && c < unicode.MaxASCII && c != '"' && c != '`' && c != '\\' && !hadEsc {
ret0 += string(c)
} else {
ret0 += fmt.Sprintf("\\x%02X", c)
hadEsc = true
}
}
ret0 = `"` + ret0 + `"`
ret := ``
compound := ""
hadStr := false
for i := 0; i < len(s); i++ {
c := rune(s[i])
if unicode.IsPrint(c) && c < unicode.MaxASCII && c != '"' && c != '`' && c != '\\' {
compound += string(c)
} else {
if hadStr {
ret += "+"
}
if compound != "" {
compound = `"` + compound + `"+`
}
ret += fmt.Sprintf("%sString.fromCharCode(%d)", compound, c)
compound = ""
hadStr = true
}
}
if hadStr {
if compound != "" {
ret += fmt.Sprintf("+\"%s\"", compound)
}
} else {
ret += fmt.Sprintf("\"%s\"", compound)
}
if ret0 == ret {
return ret
}
return ` #if (cpp || neko || php) ` + ret0 + ` #else ` + ret + " #end "
}
func constFloat64(lit ssa.Const, bits int, position string) string {
var f float64
var f32 float32
//sigBits := uint(53)
//if bits == 32 {
// sigBits = 24
//}
f, _ /*f64ok*/ = exact.Float64Val(lit.Value)
f32, _ /*f32ok*/ = exact.Float32Val(lit.Value)
if bits == 32 {
f = float64(f32)
}
haxeVal := pogo.FloatVal(lit.Value, bits, position)
switch {
case math.IsInf(f, +1):
haxeVal = "Math.POSITIVE_INFINITY"
case math.IsInf(f, -1):
haxeVal = "Math.NEGATIVE_INFINITY"
case math.IsNaN(f): // must come after infinity checks
haxeVal = "Math.NaN"
//case f == 0 && math.Signbit(f): // -0 is zero, but it has a -ve sign
// //println("DEBUG -0") // TODO this code never seems to get executed
// haxeVal = "({var f:Float=0; f*=-1; f;})"
default:
// there is a problem with haxe constant processing for some floats
// try to be as exact as the host can be ... but also concise
//if float64(int64(f)) != f { // not a simple integer
/*
frac, exp := math.Frexp(f)
intPart := int64(frac * float64(uint64(1)<<sigBits))
expPart := exp - int(sigBits) | */
/*
val := exact.MakeFloat64(frac)
num := exact.Num(val)
den := exact.Denom(val)
n64i, nok := exact.Int64Val(num)
d64i, dok := exact.Int64Val(den)
res := float64(n64i) * math.Pow(2, float64(exp)) / float64(d64i)
if !math.IsNaN(res) && !math.IsInf(res, +1) && !math.IsInf(res, -1) { //drop through
if nok && dok {
nh, nl := pogo.IntVal(num, position)
dh, dl := pogo.IntVal(den, position)
n := fmt.Sprintf("%d", nl)
if n64i < 0 {
n = "(" + n + ")"
}
if nh != 0 && nh != -1 {
n = fmt.Sprintf("GOint64.toFloat(Force.toInt64(GOint64.make(0x%x,0x%x)))", uint32(nh), uint32(nl))
}
if float64(d64i) == math.Pow(2, float64(exp)) {
haxeVal = n // divisor and multiplier the same
} else {
d := fmt.Sprintf("%d", dl)
if dh != 0 && dh != -1 {
d = fmt.Sprintf("GOint64.toFloat(Force.toInt64(GOint64.make(0x%x,0x%x)))", uint32(dh), uint32(dl))
}
if n64i == 1 {
n = "" // no point multiplying by 1
} else {
n = n + "*"
}
if d64i == 1 {
d = "" // no point in dividing by 1
} else {
d = "/" + d
}
haxeVal = fmt.Sprintf("(%sMath.pow(2,%d)%s)", n, exp, d) // NOTE: need the Math.pow to avoid haxe constant folding
}
}
}
*/
//}
}
return haxeVal
/*
bits64 := *(*uint64)(unsafe.Pointer(&f))
bitVal := exact.MakeUint64(bits64)
h, l := pogo.IntVal(bitVal, position)
bitStr := fmt.Sprintf("GOint64.make(0x%x,0x%x)", uint32(h), uint32(l))
return "Force.float64const(" + bitStr + "," + haxeVal + ")"
*/
}
func (langType) Const(lit ssa.Const, position string) (typ, val string) {
if lit.Value == nil {
return "Dynamic", "null"
}
lit.Name()
switch lit.Value.Kind() {
case exact.Bool:
return "Bool", lit.Value.String()
case exact.String:
// TODO check if conversion of some string constant declarations are required
switch lit.Type().Underlying().(type) {
case *types.Basic:
return "String", haxeStringConst(lit.Value.String(), position)
case *types.Slice:
return "Slice", "Force.toUTF8slice(this._goroutine," + haxeStringConst(lit.Value.String(), position) + ")"
default:
pogo.LogError(position, "Haxe", fmt.Errorf("haxe.Const() internal error, unknown string type"))
}
case exact.Float:
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Float32:
return "Float", constFloat64(lit, 32, position)
case types.Float64, types.UntypedFloat:
return "Float", constFloat64(lit, 64, position)
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 32, position))
case types.Complex128:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 64, position))
}
case exact.Int:
h, l := pogo.IntVal(lit.Value, position)
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Int64:
return "GOint64", fmt.Sprintf("Force.toInt64(GOint64.make(0x%x,0x%x))", uint32(h), uint32(l))
case types.Uint64:
return "GOint64", fmt.Sprintf("Force.toUint64(GOint64.make(0x%x,0x%x))", uint32(h), uint32(l))
case types.Float32:
return "Float", constFloat64(lit, 32, position)
case types.Float64, types.UntypedFloat:
return "Float", constFloat64(lit, 64, position)
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 32, position))
case types.Complex128:
return "Complex", fmt.Sprintf("new Complex(%s,0)", pogo.FloatVal(lit.Value, 64, position))
default:
if h != 0 && h != -1 {
pogo.LogWarning(position, "Haxe", fmt.Errorf("integer constant value > 32 bits : %v", lit.Value))
}
ret := ""
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Uint, types.Uint32, types.Uintptr:
q := uint32(l)
ret = fmt.Sprintf(
" #if js untyped __js__(\"0x%x\") #elseif php untyped __php__(\"0x%x\") #else 0x%x #end ",
q, q, q)
case types.Uint16:
q := uint16(l)
ret = fmt.Sprintf(" 0x%x ", q)
case types.Uint8: // types.Byte
q := uint8(l)
ret = fmt.Sprintf(" 0x%x ", q)
case types.Int, types.Int32, types.UntypedRune, types.UntypedInt: // types.Rune
if l < 0 {
ret = fmt.Sprintf("(%d)", int32(l))
} else {
ret = fmt.Sprintf("%d", int32(l))
}
case types.Int16:
if l < 0 {
ret = fmt.Sprintf("(%d)", int16(l))
} else {
ret = fmt.Sprintf("%d", int16(l))
}
case types.Int8:
if l < 0 {
ret = fmt.Sprintf("(%d)", int8(l))
} else {
ret = fmt.Sprintf("%d", int8(l))
}
case types.UnsafePointer:
if l == 0 {
return "Pointer", "null"
}
pogo.LogError(position, "Haxe", fmt.Errorf("unsafe pointers cannot be initialized in TARDISgo/Haxe to a non-zero value: %v", l))
default:
panic("haxe.Const() unhandled integer constant for: " +
lit.Type().Underlying().(*types.Basic).String())
}
return "Int", ret
}
case exact.Unknown: // not sure we should ever get here!
return "Dynamic", "null"
case exact.Complex:
realV, _ := exact.Float64Val(exact.Real(lit.Value))
imagV, _ := exact.Float64Val(exact.Imag(lit.Value))
switch lit.Type().Underlying().(*types.Basic).Kind() {
case types.Complex64:
return "Complex", fmt.Sprintf("new Complex(%g,%g)", float32(realV), float32(imagV))
default:
return "Complex", fmt.Sprintf("new Complex(%g,%g)", realV, imagV)
}
}
pogo.LogError(position, "Haxe", fmt.Errorf("haxe.Const() internal error, unknown constant type: %v", lit.Value.Kind()))
return "", ""
}
// only public Literals are created here, so that they can be used by Haxe callers of the Go code
func (l langType) NamedConst(packageName, objectName string, lit ssa.Const, position string) string {
typ, rhs := l.Const(lit, position+":"+packageName+"."+objectName)
return fmt.Sprintf("public static var %s:%s = %s;%s",
l.LangName(packageName, objectName), typ, rhs, l.Comment(position))
}
func (l langType) Global(packageName, objectName string, glob ssa.Global, position string, isPublic bool) string {
pub := "public " // all globals have to be public in Haxe terms
//gTyp := glob.Type().Underlying().(*types.Pointer).Elem().Underlying() // globals are always pointers to an underlying element
/*
ptrTyp := "Pointer"
//ltDesc := "Dynamic" // these values suitable for *types.Struct
ltInit := "null"
switch gTyp.(type) {
case *types.Basic, *types.Pointer, *types.Interface, *types.Chan, *types.Map, *types.Signature:
ptrTyp = "Pointer"
//ltDesc = l.LangType(gTyp, false, position)
ltInit = l.LangType(gTyp, true, position)
case *types.Array:
ptrTyp = "Pointer"
//ltDesc = "Array<" + l.LangType(gTyp.(*types.Array).Elem().Underlying(), false, position) + ">"
ltInit = l.LangType(gTyp, true, position)
case *types.Slice:
ptrTyp = "Pointer"
//ltDesc = "Slice" // was: l.LangType(gTyp.(*types.Slice).Elem().Underlying(), false, position)
ltInit = l.LangType(gTyp, true, position)
case *types.Struct:
ptrTyp = "Pointer"
//ltDesc = "Dynamic" // TODO improve!
ltInit = l.LangType(gTyp, true, position)
}
init := "new " + ptrTyp + "(" + ltInit + ")" // initialize basic types only
*/
//return fmt.Sprintf("%sstatic %s %s",
// pub, haxeVar(l.LangName(packageName, objectName), ptrTyp, init, position, "Global()"),
// l.Comment(position))
obj := allocNewObject(glob.Type().Underlying().(*types.Pointer))
return fmt.Sprintf("%sstatic var %s:Pointer=new Pointer(%s); %s",
pub, l.LangName(packageName, objectName), obj, l.Comment(position))
} | if float64(intPart) == frac*float64(uint64(1)<<sigBits) &&
expPart >= -1022 && expPart <= 1023 {
//it is an integer in the correct range
haxeVal = fmt.Sprintf("(%d*Math.pow(2,%d))", intPart, expPart) // NOTE: need the Math.pow to avoid haxe constant folding
} | random_line_split |
ssh.rs | //! The "ssh-rsa" and "ssh-ed25519" recipient types, which allow reusing existing SSH keys
//! for encryption with age-encryption.org/v1.
//!
//! These recipient types should only be used for compatibility with existing keys, and
//! native X25519 keys should be preferred otherwise.
//!
//! Note that these recipient types are not anonymous: the encrypted message will include
//! a short 32-bit ID of the public key.
use aes::{Aes128, Aes192, Aes256};
use aes_gcm::{AeadCore, Aes256Gcm};
use age_core::secrecy::{ExposeSecret, SecretString};
use bcrypt_pbkdf::bcrypt_pbkdf;
use cipher::Unsigned;
use sha2::{Digest, Sha256};
use crate::error::DecryptError;
pub(crate) mod identity;
pub(crate) mod recipient;
pub use identity::{Identity, UnsupportedKey};
pub use recipient::{ParseRecipientKeyError, Recipient};
pub(crate) const SSH_RSA_KEY_PREFIX: &str = "ssh-rsa";
pub(crate) const SSH_ED25519_KEY_PREFIX: &str = "ssh-ed25519";
pub(super) const SSH_RSA_RECIPIENT_TAG: &str = "ssh-rsa";
const SSH_RSA_OAEP_LABEL: &str = "age-encryption.org/v1/ssh-rsa";
pub(super) const SSH_ED25519_RECIPIENT_TAG: &str = "ssh-ed25519";
const SSH_ED25519_RECIPIENT_KEY_LABEL: &[u8] = b"age-encryption.org/v1/ssh-ed25519";
const TAG_LEN_BYTES: usize = 4;
type Aes256CbcDec = cbc::Decryptor<Aes256>;
type Aes128Ctr = ctr::Ctr64BE<Aes128>;
type Aes192Ctr = ctr::Ctr64BE<Aes192>;
type Aes256Ctr = ctr::Ctr64BE<Aes256>;
fn ssh_tag(pubkey: &[u8]) -> [u8; TAG_LEN_BYTES] {
let tag_bytes = Sha256::digest(pubkey);
let mut tag = [0; TAG_LEN_BYTES];
tag.copy_from_slice(&tag_bytes[..TAG_LEN_BYTES]);
tag
}
/// OpenSSH-supported ciphers.
#[allow(clippy::enum_variant_names)]
#[derive(Clone, Copy, Debug)]
enum OpenSshCipher {
Aes256Cbc,
Aes128Ctr,
Aes192Ctr,
Aes256Ctr,
Aes256Gcm,
}
impl OpenSshCipher {
/// Returns the length of the authenticating part of the cipher (the tag of an AEAD).
fn auth_len(self) -> usize {
match self {
OpenSshCipher::Aes256Cbc
| OpenSshCipher::Aes128Ctr
| OpenSshCipher::Aes192Ctr
| OpenSshCipher::Aes256Ctr => 0,
OpenSshCipher::Aes256Gcm => <Aes256Gcm as AeadCore>::TagSize::USIZE,
}
}
fn | (
self,
kdf: &OpenSshKdf,
p: SecretString,
ct: &[u8],
) -> Result<Vec<u8>, DecryptError> {
match self {
OpenSshCipher::Aes256Cbc => decrypt::aes_cbc::<Aes256CbcDec>(kdf, p, ct),
OpenSshCipher::Aes128Ctr => Ok(decrypt::aes_ctr::<Aes128Ctr>(kdf, p, ct)),
OpenSshCipher::Aes192Ctr => Ok(decrypt::aes_ctr::<Aes192Ctr>(kdf, p, ct)),
OpenSshCipher::Aes256Ctr => Ok(decrypt::aes_ctr::<Aes256Ctr>(kdf, p, ct)),
OpenSshCipher::Aes256Gcm => decrypt::aes_gcm::<Aes256Gcm>(kdf, p, ct),
}
}
}
/// OpenSSH-supported KDFs.
#[derive(Clone, Debug)]
enum OpenSshKdf {
Bcrypt { salt: Vec<u8>, rounds: u32 },
}
impl OpenSshKdf {
fn derive(&self, passphrase: SecretString, out_len: usize) -> Vec<u8> {
match self {
OpenSshKdf::Bcrypt { salt, rounds } => {
let mut output = vec![0; out_len];
bcrypt_pbkdf(passphrase.expose_secret(), salt, *rounds, &mut output)
.expect("parameters are valid");
output
}
}
}
}
/// An encrypted SSH private key.
#[derive(Clone)]
pub struct EncryptedKey {
ssh_key: Vec<u8>,
cipher: OpenSshCipher,
kdf: OpenSshKdf,
encrypted: Vec<u8>,
filename: Option<String>,
}
impl EncryptedKey {
/// Decrypts this private key.
pub fn decrypt(
&self,
passphrase: SecretString,
) -> Result<identity::UnencryptedKey, DecryptError> {
let decrypted = self
.cipher
.decrypt(&self.kdf, passphrase, &self.encrypted)?;
let mut parser = read_ssh::openssh_unencrypted_privkey(&self.ssh_key);
match parser(&decrypted)
.map(|(_, sk)| sk)
.map_err(|_| DecryptError::KeyDecryptionFailed)?
{
Identity::Unencrypted(key) => Ok(key),
Identity::Unsupported(_) => Err(DecryptError::KeyDecryptionFailed),
Identity::Encrypted(_) => unreachable!(),
}
}
}
mod decrypt {
use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit, StreamCipher};
use aes_gcm::aead::{AeadMut, KeyInit};
use age_core::secrecy::SecretString;
use cipher::generic_array::{ArrayLength, GenericArray};
use super::OpenSshKdf;
use crate::error::DecryptError;
fn derive_key_material<KeySize: ArrayLength<u8>, IvSize: ArrayLength<u8>>(
kdf: &OpenSshKdf,
passphrase: SecretString,
) -> (GenericArray<u8, KeySize>, GenericArray<u8, IvSize>) {
let kdf_output = kdf.derive(passphrase, KeySize::USIZE + IvSize::USIZE);
let (key, iv) = kdf_output.split_at(KeySize::USIZE);
(
GenericArray::from_exact_iter(key.iter().copied()).expect("key is correct length"),
GenericArray::from_exact_iter(iv.iter().copied()).expect("iv is correct length"),
)
}
pub(super) fn aes_cbc<C: BlockDecryptMut + KeyIvInit>(
kdf: &OpenSshKdf,
passphrase: SecretString,
ciphertext: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase);
let cipher = C::new(&key, &iv);
cipher
.decrypt_padded_vec_mut::<NoPadding>(ciphertext)
.map_err(|_| DecryptError::KeyDecryptionFailed)
}
pub(super) fn aes_ctr<C: StreamCipher + KeyIvInit>(
kdf: &OpenSshKdf,
passphrase: SecretString,
ciphertext: &[u8],
) -> Vec<u8> {
let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase);
let mut cipher = C::new(&key, &iv);
let mut plaintext = ciphertext.to_vec();
cipher.apply_keystream(&mut plaintext);
plaintext
}
pub(super) fn aes_gcm<C: AeadMut + KeyInit>(
kdf: &OpenSshKdf,
passphrase: SecretString,
ciphertext: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let (key, nonce) = derive_key_material::<C::KeySize, C::NonceSize>(kdf, passphrase);
let mut cipher = C::new(&key);
cipher
.decrypt(&nonce, ciphertext)
.map_err(|_| DecryptError::KeyDecryptionFailed)
}
}
mod read_ssh {
use age_core::secrecy::Secret;
use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint};
use nom::{
branch::alt,
bytes::complete::{tag, take},
combinator::{flat_map, map, map_opt, map_parser, map_res, recognize, rest, verify},
multi::{length_data, length_value},
number::complete::be_u32,
sequence::{delimited, pair, preceded, terminated, tuple},
IResult,
};
use num_traits::Zero;
use rsa::BigUint;
use super::{
identity::{UnencryptedKey, UnsupportedKey},
EncryptedKey, Identity, OpenSshCipher, OpenSshKdf, SSH_ED25519_KEY_PREFIX,
SSH_RSA_KEY_PREFIX,
};
/// The SSH `string` [data type](https://tools.ietf.org/html/rfc4251#section-5).
pub(crate) fn string(input: &[u8]) -> IResult<&[u8], &[u8]> {
length_data(be_u32)(input)
}
/// Recognizes an SSH `string` matching a tag.
#[allow(clippy::needless_lifetimes)] // false positive
pub fn string_tag<'a>(value: &'a str) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], &'a [u8]> {
move |input: &[u8]| length_value(be_u32, tag(value))(input)
}
/// The SSH `mpint` data type, restricted to non-negative integers.
///
/// From [RFC 4251](https://tools.ietf.org/html/rfc4251#section-5):
/// ```text
/// Represents multiple precision integers in two's complement format,
/// stored as a string, 8 bits per byte, MSB first. Negative numbers
/// have the value 1 as the most significant bit of the first byte of
/// the data partition. If the most significant bit would be set for
/// a positive number, the number MUST be preceded by a zero byte.
/// Unnecessary leading bytes with the value 0 or 255 MUST NOT be
/// included. The value zero MUST be stored as a string with zero
/// bytes of data.
/// ```
fn mpint(input: &[u8]) -> IResult<&[u8], BigUint> {
map_opt(string, |bytes| {
if bytes.is_empty() {
Some(BigUint::zero())
} else {
// Enforce canonicity
let mut non_zero_bytes = bytes;
while non_zero_bytes[0] == 0 {
non_zero_bytes = &non_zero_bytes[1..];
}
if non_zero_bytes.is_empty() {
// Non-canonical zero
return None;
}
if non_zero_bytes.len() + (non_zero_bytes[0] >> 7) as usize != bytes.len() {
// Negative number or non-canonical positive number
return None;
}
Some(BigUint::from_bytes_be(bytes))
}
})(input)
}
enum CipherResult {
Supported(OpenSshCipher),
Unsupported(String),
}
/// Parse a cipher and KDF.
fn encryption_header(input: &[u8]) -> IResult<&[u8], Option<(CipherResult, OpenSshKdf)>> {
alt((
// If either cipher or KDF is None, both must be.
map(
tuple((string_tag("none"), string_tag("none"), string_tag(""))),
|_| None,
),
map(
tuple((
alt((
map(string_tag("aes256-cbc"), |_| {
CipherResult::Supported(OpenSshCipher::Aes256Cbc)
}),
map(string_tag("aes128-ctr"), |_| {
CipherResult::Supported(OpenSshCipher::Aes128Ctr)
}),
map(string_tag("aes192-ctr"), |_| {
CipherResult::Supported(OpenSshCipher::Aes192Ctr)
}),
map(string_tag("aes256-ctr"), |_| {
CipherResult::Supported(OpenSshCipher::Aes256Ctr)
}),
map(string_tag("aes256-gcm@openssh.com"), |_| {
CipherResult::Supported(OpenSshCipher::Aes256Gcm)
}),
map(string, |s| {
CipherResult::Unsupported(String::from_utf8_lossy(s).into_owned())
}),
)),
map_opt(
preceded(
string_tag("bcrypt"),
map_parser(string, tuple((string, be_u32))),
),
|(salt, rounds)| {
if salt.is_empty() || rounds == 0 {
// Invalid parameters
None
} else {
Some(OpenSshKdf::Bcrypt {
salt: salt.into(),
rounds,
})
}
},
),
)),
Some,
),
))(input)
}
/// Parses the comment from an OpenSSH privkey and verifies its deterministic padding.
fn comment_and_padding(input: &[u8]) -> IResult<&[u8], &[u8]> {
terminated(
// Comment
string,
// Deterministic padding
verify(rest, |padding: &[u8]| {
padding.iter().enumerate().all(|(i, b)| *b == (i + 1) as u8)
}),
)(input)
}
/// Internal OpenSSH encoding of an RSA private key.
///
/// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3187-L3198)
fn openssh_rsa_privkey(input: &[u8]) -> IResult<&[u8], rsa::RsaPrivateKey> {
delimited(
string_tag(SSH_RSA_KEY_PREFIX),
map_res(
tuple((mpint, mpint, mpint, mpint, mpint, mpint)),
|(n, e, d, _iqmp, p, q)| rsa::RsaPrivateKey::from_components(n, e, d, vec![p, q]),
),
comment_and_padding,
)(input)
}
/// Internal OpenSSH encoding of an Ed25519 private key.
///
/// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3277-L3283)
fn openssh_ed25519_privkey(input: &[u8]) -> IResult<&[u8], Secret<[u8; 64]>> {
delimited(
string_tag(SSH_ED25519_KEY_PREFIX),
map_opt(tuple((string, string)), |(pubkey_bytes, privkey_bytes)| {
if privkey_bytes.len() == 64 && pubkey_bytes == &privkey_bytes[32..64] {
let mut privkey = [0; 64];
privkey.copy_from_slice(privkey_bytes);
Some(Secret::new(privkey))
} else {
None
}
}),
comment_and_padding,
)(input)
}
/// Unencrypted, padded list of private keys.
///
/// From the [specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key):
/// ```text
/// uint32 checkint
/// uint32 checkint
/// string privatekey1
/// string comment1
/// string privatekey2
/// string comment2
/// ...
/// string privatekeyN
/// string commentN
/// char 1
/// char 2
/// char 3
/// ...
/// char padlen % 255
/// ```
///
/// Note however that the `string` type for the private keys is wrong; it should be
/// an opaque type, or the composite type `(string, byte[])`.
///
/// We only support a single key, like OpenSSH.
#[allow(clippy::needless_lifetimes)]
pub(super) fn openssh_unencrypted_privkey<'a>(
ssh_key: &[u8],
) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> {
// We need to own, move, and clone these in order to keep them alive.
let ssh_key_rsa = ssh_key.to_vec();
let ssh_key_ed25519 = ssh_key.to_vec();
preceded(
// Repeated checkint, intended for verifying correct decryption.
// Don't copy this idea into a new protocol; use an AEAD instead.
map_opt(pair(take(4usize), take(4usize)), |(c1, c2)| {
if c1 == c2 {
Some(c1)
} else {
None
}
}),
alt((
map(openssh_rsa_privkey, move |sk| {
UnencryptedKey::SshRsa(ssh_key_rsa.clone(), Box::new(sk)).into()
}),
map(openssh_ed25519_privkey, move |privkey| {
UnencryptedKey::SshEd25519(ssh_key_ed25519.clone(), privkey).into()
}),
map(string, |key_type| {
UnsupportedKey::Type(String::from_utf8_lossy(key_type).to_string()).into()
}),
)),
)
}
/// An OpenSSH-formatted private key.
///
/// - [Specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key)
pub(super) fn openssh_privkey(input: &[u8]) -> IResult<&[u8], Identity> {
flat_map(
pair(
preceded(tag(b"openssh-key-v1\x00"), encryption_header),
preceded(
// We only support a single key, like OpenSSH:
// https://github.com/openssh/openssh-portable/blob/4103a3ec/sshkey.c#L4171
tag(b"\x00\x00\x00\x01"),
string, // The public key in SSH format
),
),
openssh_privkey_inner,
)(input)
}
/// Encrypted, padded list of private keys.
fn openssh_privkey_inner<'a>(
(encryption, ssh_key): (Option<(CipherResult, OpenSshKdf)>, &'a [u8]),
) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> {
// `PROTOCOL.key` specifies that the encrypted list of private keys is encoded as
// a `string`, but this is incorrect when AEAD ciphertexts are used. For what I
// can only assume are backwards-compatibility reasons, the `string` part encodes
// the ciphertext without tag, and the tag is just appended to the encoding. So
// you can only parse the full data structure by interpreting the encryption
// header.
let expected_remainder = encryption.as_ref().map_or(0, |(cipher_res, _)| {
if let CipherResult::Supported(cipher) = cipher_res {
cipher.auth_len()
} else {
0
}
});
move |input: &[u8]| match &encryption {
None => map_parser(string, openssh_unencrypted_privkey(ssh_key))(input),
Some((cipher_res, kdf)) => map(
map_parser(
recognize(pair(string, take(expected_remainder))),
preceded(be_u32, rest),
),
|private| match cipher_res {
CipherResult::Supported(cipher) => EncryptedKey {
ssh_key: ssh_key.to_vec(),
cipher: *cipher,
kdf: kdf.clone(),
encrypted: private.to_vec(),
filename: None,
}
.into(),
CipherResult::Unsupported(cipher) => {
UnsupportedKey::EncryptedSsh(cipher.clone()).into()
}
},
)(input),
}
}
/// An SSH-encoded RSA public key.
///
/// From [RFC 4253](https://tools.ietf.org/html/rfc4253#section-6.6):
/// ```text
/// string "ssh-rsa"
/// mpint e
/// mpint n
/// ```
pub(super) fn rsa_pubkey(input: &[u8]) -> IResult<&[u8], rsa::RsaPublicKey> {
preceded(
string_tag(SSH_RSA_KEY_PREFIX),
map_res(tuple((mpint, mpint)), |(exponent, modulus)| {
rsa::RsaPublicKey::new(modulus, exponent)
}),
)(input)
}
/// An SSH-encoded Ed25519 public key.
///
/// From [draft-ietf-curdle-ssh-ed25519-02](https://tools.ietf.org/html/draft-ietf-curdle-ssh-ed25519-02#section-4):
/// ```text
/// string "ssh-ed25519"
/// string key
/// ```
pub(super) fn ed25519_pubkey(input: &[u8]) -> IResult<&[u8], EdwardsPoint> {
preceded(
string_tag(SSH_ED25519_KEY_PREFIX),
map_opt(string, |buf| {
if buf.len() == 32 {
CompressedEdwardsY::from_slice(buf).decompress()
} else {
None
}
}),
)(input)
}
}
mod write_ssh {
use cookie_factory::{bytes::be_u32, combinator::slice, sequence::tuple, SerializeFn};
use num_traits::identities::Zero;
use rsa::{traits::PublicKeyParts, BigUint};
use std::io::Write;
use super::SSH_RSA_KEY_PREFIX;
/// Writes the SSH `string` data type.
fn string<S: AsRef<[u8]>, W: Write>(value: S) -> impl SerializeFn<W> {
tuple((be_u32(value.as_ref().len() as u32), slice(value)))
}
/// Writes the SSH `mpint` data type.
fn mpint<W: Write>(value: &BigUint) -> impl SerializeFn<W> {
let mut bytes = value.to_bytes_be();
// From RFC 4251 section 5:
// If the most significant bit would be set for a positive number,
// the number MUST be preceded by a zero byte. Unnecessary leading
// bytes with the value 0 or 255 MUST NOT be included. The value
// zero MUST be stored as a string with zero bytes of data.
if value.is_zero() {
// BigUint represents zero as vec![0]
bytes = vec![];
} else if bytes[0] >> 7 != 0 {
bytes.insert(0, 0);
}
string(bytes)
}
/// Writes an SSH-encoded RSA public key.
///
/// From [RFC 4253](https://tools.ietf.org/html/rfc4253#section-6.6):
/// ```text
/// string "ssh-rsa"
/// mpint e
/// mpint n
/// ```
pub(super) fn rsa_pubkey<W: Write>(pubkey: &rsa::RsaPublicKey) -> impl SerializeFn<W> {
tuple((
string(SSH_RSA_KEY_PREFIX),
mpint(pubkey.e()),
mpint(pubkey.n()),
))
}
}
| decrypt | identifier_name |
ssh.rs | //! The "ssh-rsa" and "ssh-ed25519" recipient types, which allow reusing existing SSH keys
//! for encryption with age-encryption.org/v1.
//!
//! These recipient types should only be used for compatibility with existing keys, and
//! native X25519 keys should be preferred otherwise.
//!
//! Note that these recipient types are not anonymous: the encrypted message will include
//! a short 32-bit ID of the public key.
use aes::{Aes128, Aes192, Aes256};
use aes_gcm::{AeadCore, Aes256Gcm};
use age_core::secrecy::{ExposeSecret, SecretString};
use bcrypt_pbkdf::bcrypt_pbkdf;
use cipher::Unsigned;
use sha2::{Digest, Sha256};
use crate::error::DecryptError;
pub(crate) mod identity;
pub(crate) mod recipient;
pub use identity::{Identity, UnsupportedKey};
pub use recipient::{ParseRecipientKeyError, Recipient};
pub(crate) const SSH_RSA_KEY_PREFIX: &str = "ssh-rsa";
pub(crate) const SSH_ED25519_KEY_PREFIX: &str = "ssh-ed25519";
pub(super) const SSH_RSA_RECIPIENT_TAG: &str = "ssh-rsa";
const SSH_RSA_OAEP_LABEL: &str = "age-encryption.org/v1/ssh-rsa";
pub(super) const SSH_ED25519_RECIPIENT_TAG: &str = "ssh-ed25519";
const SSH_ED25519_RECIPIENT_KEY_LABEL: &[u8] = b"age-encryption.org/v1/ssh-ed25519";
const TAG_LEN_BYTES: usize = 4;
type Aes256CbcDec = cbc::Decryptor<Aes256>;
type Aes128Ctr = ctr::Ctr64BE<Aes128>;
type Aes192Ctr = ctr::Ctr64BE<Aes192>;
type Aes256Ctr = ctr::Ctr64BE<Aes256>;
fn ssh_tag(pubkey: &[u8]) -> [u8; TAG_LEN_BYTES] {
let tag_bytes = Sha256::digest(pubkey);
let mut tag = [0; TAG_LEN_BYTES];
tag.copy_from_slice(&tag_bytes[..TAG_LEN_BYTES]);
tag
}
/// OpenSSH-supported ciphers.
#[allow(clippy::enum_variant_names)]
#[derive(Clone, Copy, Debug)]
enum OpenSshCipher {
Aes256Cbc,
Aes128Ctr,
Aes192Ctr,
Aes256Ctr,
Aes256Gcm,
}
impl OpenSshCipher {
/// Returns the length of the authenticating part of the cipher (the tag of an AEAD).
fn auth_len(self) -> usize {
match self {
OpenSshCipher::Aes256Cbc
| OpenSshCipher::Aes128Ctr
| OpenSshCipher::Aes192Ctr
| OpenSshCipher::Aes256Ctr => 0,
OpenSshCipher::Aes256Gcm => <Aes256Gcm as AeadCore>::TagSize::USIZE,
}
}
fn decrypt(
self,
kdf: &OpenSshKdf,
p: SecretString,
ct: &[u8],
) -> Result<Vec<u8>, DecryptError> {
match self {
OpenSshCipher::Aes256Cbc => decrypt::aes_cbc::<Aes256CbcDec>(kdf, p, ct),
OpenSshCipher::Aes128Ctr => Ok(decrypt::aes_ctr::<Aes128Ctr>(kdf, p, ct)),
OpenSshCipher::Aes192Ctr => Ok(decrypt::aes_ctr::<Aes192Ctr>(kdf, p, ct)),
OpenSshCipher::Aes256Ctr => Ok(decrypt::aes_ctr::<Aes256Ctr>(kdf, p, ct)),
OpenSshCipher::Aes256Gcm => decrypt::aes_gcm::<Aes256Gcm>(kdf, p, ct),
}
}
}
/// OpenSSH-supported KDFs.
#[derive(Clone, Debug)]
enum OpenSshKdf {
Bcrypt { salt: Vec<u8>, rounds: u32 },
}
impl OpenSshKdf {
fn derive(&self, passphrase: SecretString, out_len: usize) -> Vec<u8> {
match self {
OpenSshKdf::Bcrypt { salt, rounds } => {
let mut output = vec![0; out_len];
bcrypt_pbkdf(passphrase.expose_secret(), salt, *rounds, &mut output)
.expect("parameters are valid");
output
}
}
}
}
/// An encrypted SSH private key.
#[derive(Clone)]
pub struct EncryptedKey {
ssh_key: Vec<u8>,
cipher: OpenSshCipher,
kdf: OpenSshKdf,
encrypted: Vec<u8>,
filename: Option<String>,
}
impl EncryptedKey {
/// Decrypts this private key.
pub fn decrypt(
&self,
passphrase: SecretString,
) -> Result<identity::UnencryptedKey, DecryptError> {
let decrypted = self
.cipher
.decrypt(&self.kdf, passphrase, &self.encrypted)?;
let mut parser = read_ssh::openssh_unencrypted_privkey(&self.ssh_key);
match parser(&decrypted)
.map(|(_, sk)| sk)
.map_err(|_| DecryptError::KeyDecryptionFailed)?
{
Identity::Unencrypted(key) => Ok(key),
Identity::Unsupported(_) => Err(DecryptError::KeyDecryptionFailed), | }
}
mod decrypt {
use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit, StreamCipher};
use aes_gcm::aead::{AeadMut, KeyInit};
use age_core::secrecy::SecretString;
use cipher::generic_array::{ArrayLength, GenericArray};
use super::OpenSshKdf;
use crate::error::DecryptError;
fn derive_key_material<KeySize: ArrayLength<u8>, IvSize: ArrayLength<u8>>(
kdf: &OpenSshKdf,
passphrase: SecretString,
) -> (GenericArray<u8, KeySize>, GenericArray<u8, IvSize>) {
let kdf_output = kdf.derive(passphrase, KeySize::USIZE + IvSize::USIZE);
let (key, iv) = kdf_output.split_at(KeySize::USIZE);
(
GenericArray::from_exact_iter(key.iter().copied()).expect("key is correct length"),
GenericArray::from_exact_iter(iv.iter().copied()).expect("iv is correct length"),
)
}
pub(super) fn aes_cbc<C: BlockDecryptMut + KeyIvInit>(
kdf: &OpenSshKdf,
passphrase: SecretString,
ciphertext: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase);
let cipher = C::new(&key, &iv);
cipher
.decrypt_padded_vec_mut::<NoPadding>(ciphertext)
.map_err(|_| DecryptError::KeyDecryptionFailed)
}
pub(super) fn aes_ctr<C: StreamCipher + KeyIvInit>(
kdf: &OpenSshKdf,
passphrase: SecretString,
ciphertext: &[u8],
) -> Vec<u8> {
let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase);
let mut cipher = C::new(&key, &iv);
let mut plaintext = ciphertext.to_vec();
cipher.apply_keystream(&mut plaintext);
plaintext
}
pub(super) fn aes_gcm<C: AeadMut + KeyInit>(
kdf: &OpenSshKdf,
passphrase: SecretString,
ciphertext: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let (key, nonce) = derive_key_material::<C::KeySize, C::NonceSize>(kdf, passphrase);
let mut cipher = C::new(&key);
cipher
.decrypt(&nonce, ciphertext)
.map_err(|_| DecryptError::KeyDecryptionFailed)
}
}
mod read_ssh {
use age_core::secrecy::Secret;
use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint};
use nom::{
branch::alt,
bytes::complete::{tag, take},
combinator::{flat_map, map, map_opt, map_parser, map_res, recognize, rest, verify},
multi::{length_data, length_value},
number::complete::be_u32,
sequence::{delimited, pair, preceded, terminated, tuple},
IResult,
};
use num_traits::Zero;
use rsa::BigUint;
use super::{
identity::{UnencryptedKey, UnsupportedKey},
EncryptedKey, Identity, OpenSshCipher, OpenSshKdf, SSH_ED25519_KEY_PREFIX,
SSH_RSA_KEY_PREFIX,
};
/// The SSH `string` [data type](https://tools.ietf.org/html/rfc4251#section-5).
pub(crate) fn string(input: &[u8]) -> IResult<&[u8], &[u8]> {
length_data(be_u32)(input)
}
/// Recognizes an SSH `string` matching a tag.
#[allow(clippy::needless_lifetimes)] // false positive
pub fn string_tag<'a>(value: &'a str) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], &'a [u8]> {
move |input: &[u8]| length_value(be_u32, tag(value))(input)
}
/// The SSH `mpint` data type, restricted to non-negative integers.
///
/// From [RFC 4251](https://tools.ietf.org/html/rfc4251#section-5):
/// ```text
/// Represents multiple precision integers in two's complement format,
/// stored as a string, 8 bits per byte, MSB first. Negative numbers
/// have the value 1 as the most significant bit of the first byte of
/// the data partition. If the most significant bit would be set for
/// a positive number, the number MUST be preceded by a zero byte.
/// Unnecessary leading bytes with the value 0 or 255 MUST NOT be
/// included. The value zero MUST be stored as a string with zero
/// bytes of data.
/// ```
fn mpint(input: &[u8]) -> IResult<&[u8], BigUint> {
map_opt(string, |bytes| {
if bytes.is_empty() {
Some(BigUint::zero())
} else {
// Enforce canonicity
let mut non_zero_bytes = bytes;
while non_zero_bytes[0] == 0 {
non_zero_bytes = &non_zero_bytes[1..];
}
if non_zero_bytes.is_empty() {
// Non-canonical zero
return None;
}
if non_zero_bytes.len() + (non_zero_bytes[0] >> 7) as usize != bytes.len() {
// Negative number or non-canonical positive number
return None;
}
Some(BigUint::from_bytes_be(bytes))
}
})(input)
}
enum CipherResult {
Supported(OpenSshCipher),
Unsupported(String),
}
/// Parse a cipher and KDF.
fn encryption_header(input: &[u8]) -> IResult<&[u8], Option<(CipherResult, OpenSshKdf)>> {
alt((
// If either cipher or KDF is None, both must be.
map(
tuple((string_tag("none"), string_tag("none"), string_tag(""))),
|_| None,
),
map(
tuple((
alt((
map(string_tag("aes256-cbc"), |_| {
CipherResult::Supported(OpenSshCipher::Aes256Cbc)
}),
map(string_tag("aes128-ctr"), |_| {
CipherResult::Supported(OpenSshCipher::Aes128Ctr)
}),
map(string_tag("aes192-ctr"), |_| {
CipherResult::Supported(OpenSshCipher::Aes192Ctr)
}),
map(string_tag("aes256-ctr"), |_| {
CipherResult::Supported(OpenSshCipher::Aes256Ctr)
}),
map(string_tag("aes256-gcm@openssh.com"), |_| {
CipherResult::Supported(OpenSshCipher::Aes256Gcm)
}),
map(string, |s| {
CipherResult::Unsupported(String::from_utf8_lossy(s).into_owned())
}),
)),
map_opt(
preceded(
string_tag("bcrypt"),
map_parser(string, tuple((string, be_u32))),
),
|(salt, rounds)| {
if salt.is_empty() || rounds == 0 {
// Invalid parameters
None
} else {
Some(OpenSshKdf::Bcrypt {
salt: salt.into(),
rounds,
})
}
},
),
)),
Some,
),
))(input)
}
/// Parses the comment from an OpenSSH privkey and verifies its deterministic padding.
fn comment_and_padding(input: &[u8]) -> IResult<&[u8], &[u8]> {
terminated(
// Comment
string,
// Deterministic padding
verify(rest, |padding: &[u8]| {
padding.iter().enumerate().all(|(i, b)| *b == (i + 1) as u8)
}),
)(input)
}
/// Internal OpenSSH encoding of an RSA private key.
///
/// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3187-L3198)
fn openssh_rsa_privkey(input: &[u8]) -> IResult<&[u8], rsa::RsaPrivateKey> {
delimited(
string_tag(SSH_RSA_KEY_PREFIX),
map_res(
tuple((mpint, mpint, mpint, mpint, mpint, mpint)),
|(n, e, d, _iqmp, p, q)| rsa::RsaPrivateKey::from_components(n, e, d, vec![p, q]),
),
comment_and_padding,
)(input)
}
/// Internal OpenSSH encoding of an Ed25519 private key.
///
/// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3277-L3283)
fn openssh_ed25519_privkey(input: &[u8]) -> IResult<&[u8], Secret<[u8; 64]>> {
delimited(
string_tag(SSH_ED25519_KEY_PREFIX),
map_opt(tuple((string, string)), |(pubkey_bytes, privkey_bytes)| {
if privkey_bytes.len() == 64 && pubkey_bytes == &privkey_bytes[32..64] {
let mut privkey = [0; 64];
privkey.copy_from_slice(privkey_bytes);
Some(Secret::new(privkey))
} else {
None
}
}),
comment_and_padding,
)(input)
}
/// Unencrypted, padded list of private keys.
///
/// From the [specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key):
/// ```text
/// uint32 checkint
/// uint32 checkint
/// string privatekey1
/// string comment1
/// string privatekey2
/// string comment2
/// ...
/// string privatekeyN
/// string commentN
/// char 1
/// char 2
/// char 3
/// ...
/// char padlen % 255
/// ```
///
/// Note however that the `string` type for the private keys is wrong; it should be
/// an opaque type, or the composite type `(string, byte[])`.
///
/// We only support a single key, like OpenSSH.
#[allow(clippy::needless_lifetimes)]
pub(super) fn openssh_unencrypted_privkey<'a>(
ssh_key: &[u8],
) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> {
// We need to own, move, and clone these in order to keep them alive.
let ssh_key_rsa = ssh_key.to_vec();
let ssh_key_ed25519 = ssh_key.to_vec();
preceded(
// Repeated checkint, intended for verifying correct decryption.
// Don't copy this idea into a new protocol; use an AEAD instead.
map_opt(pair(take(4usize), take(4usize)), |(c1, c2)| {
if c1 == c2 {
Some(c1)
} else {
None
}
}),
alt((
map(openssh_rsa_privkey, move |sk| {
UnencryptedKey::SshRsa(ssh_key_rsa.clone(), Box::new(sk)).into()
}),
map(openssh_ed25519_privkey, move |privkey| {
UnencryptedKey::SshEd25519(ssh_key_ed25519.clone(), privkey).into()
}),
map(string, |key_type| {
UnsupportedKey::Type(String::from_utf8_lossy(key_type).to_string()).into()
}),
)),
)
}
/// An OpenSSH-formatted private key.
///
/// - [Specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key)
pub(super) fn openssh_privkey(input: &[u8]) -> IResult<&[u8], Identity> {
flat_map(
pair(
preceded(tag(b"openssh-key-v1\x00"), encryption_header),
preceded(
// We only support a single key, like OpenSSH:
// https://github.com/openssh/openssh-portable/blob/4103a3ec/sshkey.c#L4171
tag(b"\x00\x00\x00\x01"),
string, // The public key in SSH format
),
),
openssh_privkey_inner,
)(input)
}
/// Encrypted, padded list of private keys.
fn openssh_privkey_inner<'a>(
(encryption, ssh_key): (Option<(CipherResult, OpenSshKdf)>, &'a [u8]),
) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> {
// `PROTOCOL.key` specifies that the encrypted list of private keys is encoded as
// a `string`, but this is incorrect when AEAD ciphertexts are used. For what I
// can only assume are backwards-compatibility reasons, the `string` part encodes
// the ciphertext without tag, and the tag is just appended to the encoding. So
// you can only parse the full data structure by interpreting the encryption
// header.
let expected_remainder = encryption.as_ref().map_or(0, |(cipher_res, _)| {
if let CipherResult::Supported(cipher) = cipher_res {
cipher.auth_len()
} else {
0
}
});
move |input: &[u8]| match &encryption {
None => map_parser(string, openssh_unencrypted_privkey(ssh_key))(input),
Some((cipher_res, kdf)) => map(
map_parser(
recognize(pair(string, take(expected_remainder))),
preceded(be_u32, rest),
),
|private| match cipher_res {
CipherResult::Supported(cipher) => EncryptedKey {
ssh_key: ssh_key.to_vec(),
cipher: *cipher,
kdf: kdf.clone(),
encrypted: private.to_vec(),
filename: None,
}
.into(),
CipherResult::Unsupported(cipher) => {
UnsupportedKey::EncryptedSsh(cipher.clone()).into()
}
},
)(input),
}
}
/// An SSH-encoded RSA public key.
///
/// From [RFC 4253](https://tools.ietf.org/html/rfc4253#section-6.6):
/// ```text
/// string "ssh-rsa"
/// mpint e
/// mpint n
/// ```
pub(super) fn rsa_pubkey(input: &[u8]) -> IResult<&[u8], rsa::RsaPublicKey> {
preceded(
string_tag(SSH_RSA_KEY_PREFIX),
map_res(tuple((mpint, mpint)), |(exponent, modulus)| {
rsa::RsaPublicKey::new(modulus, exponent)
}),
)(input)
}
/// An SSH-encoded Ed25519 public key.
///
/// From [draft-ietf-curdle-ssh-ed25519-02](https://tools.ietf.org/html/draft-ietf-curdle-ssh-ed25519-02#section-4):
/// ```text
/// string "ssh-ed25519"
/// string key
/// ```
pub(super) fn ed25519_pubkey(input: &[u8]) -> IResult<&[u8], EdwardsPoint> {
preceded(
string_tag(SSH_ED25519_KEY_PREFIX),
map_opt(string, |buf| {
if buf.len() == 32 {
CompressedEdwardsY::from_slice(buf).decompress()
} else {
None
}
}),
)(input)
}
}
mod write_ssh {
use cookie_factory::{bytes::be_u32, combinator::slice, sequence::tuple, SerializeFn};
use num_traits::identities::Zero;
use rsa::{traits::PublicKeyParts, BigUint};
use std::io::Write;
use super::SSH_RSA_KEY_PREFIX;
/// Writes the SSH `string` data type.
fn string<S: AsRef<[u8]>, W: Write>(value: S) -> impl SerializeFn<W> {
tuple((be_u32(value.as_ref().len() as u32), slice(value)))
}
/// Writes the SSH `mpint` data type.
fn mpint<W: Write>(value: &BigUint) -> impl SerializeFn<W> {
let mut bytes = value.to_bytes_be();
// From RFC 4251 section 5:
// If the most significant bit would be set for a positive number,
// the number MUST be preceded by a zero byte. Unnecessary leading
// bytes with the value 0 or 255 MUST NOT be included. The value
// zero MUST be stored as a string with zero bytes of data.
if value.is_zero() {
// BigUint represents zero as vec![0]
bytes = vec![];
} else if bytes[0] >> 7 != 0 {
bytes.insert(0, 0);
}
string(bytes)
}
/// Writes an SSH-encoded RSA public key.
///
/// From [RFC 4253](https://tools.ietf.org/html/rfc4253#section-6.6):
/// ```text
/// string "ssh-rsa"
/// mpint e
/// mpint n
/// ```
pub(super) fn rsa_pubkey<W: Write>(pubkey: &rsa::RsaPublicKey) -> impl SerializeFn<W> {
tuple((
string(SSH_RSA_KEY_PREFIX),
mpint(pubkey.e()),
mpint(pubkey.n()),
))
}
} | Identity::Encrypted(_) => unreachable!(),
} | random_line_split |
ssh.rs | //! The "ssh-rsa" and "ssh-ed25519" recipient types, which allow reusing existing SSH keys
//! for encryption with age-encryption.org/v1.
//!
//! These recipient types should only be used for compatibility with existing keys, and
//! native X25519 keys should be preferred otherwise.
//!
//! Note that these recipient types are not anonymous: the encrypted message will include
//! a short 32-bit ID of the public key.
use aes::{Aes128, Aes192, Aes256};
use aes_gcm::{AeadCore, Aes256Gcm};
use age_core::secrecy::{ExposeSecret, SecretString};
use bcrypt_pbkdf::bcrypt_pbkdf;
use cipher::Unsigned;
use sha2::{Digest, Sha256};
use crate::error::DecryptError;
pub(crate) mod identity;
pub(crate) mod recipient;
pub use identity::{Identity, UnsupportedKey};
pub use recipient::{ParseRecipientKeyError, Recipient};
pub(crate) const SSH_RSA_KEY_PREFIX: &str = "ssh-rsa";
pub(crate) const SSH_ED25519_KEY_PREFIX: &str = "ssh-ed25519";
pub(super) const SSH_RSA_RECIPIENT_TAG: &str = "ssh-rsa";
const SSH_RSA_OAEP_LABEL: &str = "age-encryption.org/v1/ssh-rsa";
pub(super) const SSH_ED25519_RECIPIENT_TAG: &str = "ssh-ed25519";
const SSH_ED25519_RECIPIENT_KEY_LABEL: &[u8] = b"age-encryption.org/v1/ssh-ed25519";
const TAG_LEN_BYTES: usize = 4;
type Aes256CbcDec = cbc::Decryptor<Aes256>;
type Aes128Ctr = ctr::Ctr64BE<Aes128>;
type Aes192Ctr = ctr::Ctr64BE<Aes192>;
type Aes256Ctr = ctr::Ctr64BE<Aes256>;
fn ssh_tag(pubkey: &[u8]) -> [u8; TAG_LEN_BYTES] {
let tag_bytes = Sha256::digest(pubkey);
let mut tag = [0; TAG_LEN_BYTES];
tag.copy_from_slice(&tag_bytes[..TAG_LEN_BYTES]);
tag
}
/// OpenSSH-supported ciphers.
#[allow(clippy::enum_variant_names)]
#[derive(Clone, Copy, Debug)]
enum OpenSshCipher {
Aes256Cbc,
Aes128Ctr,
Aes192Ctr,
Aes256Ctr,
Aes256Gcm,
}
impl OpenSshCipher {
/// Returns the length of the authenticating part of the cipher (the tag of an AEAD).
fn auth_len(self) -> usize {
match self {
OpenSshCipher::Aes256Cbc
| OpenSshCipher::Aes128Ctr
| OpenSshCipher::Aes192Ctr
| OpenSshCipher::Aes256Ctr => 0,
OpenSshCipher::Aes256Gcm => <Aes256Gcm as AeadCore>::TagSize::USIZE,
}
}
fn decrypt(
self,
kdf: &OpenSshKdf,
p: SecretString,
ct: &[u8],
) -> Result<Vec<u8>, DecryptError> {
match self {
OpenSshCipher::Aes256Cbc => decrypt::aes_cbc::<Aes256CbcDec>(kdf, p, ct),
OpenSshCipher::Aes128Ctr => Ok(decrypt::aes_ctr::<Aes128Ctr>(kdf, p, ct)),
OpenSshCipher::Aes192Ctr => Ok(decrypt::aes_ctr::<Aes192Ctr>(kdf, p, ct)),
OpenSshCipher::Aes256Ctr => Ok(decrypt::aes_ctr::<Aes256Ctr>(kdf, p, ct)),
OpenSshCipher::Aes256Gcm => decrypt::aes_gcm::<Aes256Gcm>(kdf, p, ct),
}
}
}
/// OpenSSH-supported KDFs.
#[derive(Clone, Debug)]
enum OpenSshKdf {
Bcrypt { salt: Vec<u8>, rounds: u32 },
}
impl OpenSshKdf {
fn derive(&self, passphrase: SecretString, out_len: usize) -> Vec<u8> {
match self {
OpenSshKdf::Bcrypt { salt, rounds } => {
let mut output = vec![0; out_len];
bcrypt_pbkdf(passphrase.expose_secret(), salt, *rounds, &mut output)
.expect("parameters are valid");
output
}
}
}
}
/// An encrypted SSH private key.
#[derive(Clone)]
pub struct EncryptedKey {
ssh_key: Vec<u8>,
cipher: OpenSshCipher,
kdf: OpenSshKdf,
encrypted: Vec<u8>,
filename: Option<String>,
}
impl EncryptedKey {
/// Decrypts this private key.
pub fn decrypt(
&self,
passphrase: SecretString,
) -> Result<identity::UnencryptedKey, DecryptError> {
let decrypted = self
.cipher
.decrypt(&self.kdf, passphrase, &self.encrypted)?;
let mut parser = read_ssh::openssh_unencrypted_privkey(&self.ssh_key);
match parser(&decrypted)
.map(|(_, sk)| sk)
.map_err(|_| DecryptError::KeyDecryptionFailed)?
{
Identity::Unencrypted(key) => Ok(key),
Identity::Unsupported(_) => Err(DecryptError::KeyDecryptionFailed),
Identity::Encrypted(_) => unreachable!(),
}
}
}
mod decrypt {
use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit, StreamCipher};
use aes_gcm::aead::{AeadMut, KeyInit};
use age_core::secrecy::SecretString;
use cipher::generic_array::{ArrayLength, GenericArray};
use super::OpenSshKdf;
use crate::error::DecryptError;
fn derive_key_material<KeySize: ArrayLength<u8>, IvSize: ArrayLength<u8>>(
kdf: &OpenSshKdf,
passphrase: SecretString,
) -> (GenericArray<u8, KeySize>, GenericArray<u8, IvSize>) {
let kdf_output = kdf.derive(passphrase, KeySize::USIZE + IvSize::USIZE);
let (key, iv) = kdf_output.split_at(KeySize::USIZE);
(
GenericArray::from_exact_iter(key.iter().copied()).expect("key is correct length"),
GenericArray::from_exact_iter(iv.iter().copied()).expect("iv is correct length"),
)
}
pub(super) fn aes_cbc<C: BlockDecryptMut + KeyIvInit>(
kdf: &OpenSshKdf,
passphrase: SecretString,
ciphertext: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase);
let cipher = C::new(&key, &iv);
cipher
.decrypt_padded_vec_mut::<NoPadding>(ciphertext)
.map_err(|_| DecryptError::KeyDecryptionFailed)
}
pub(super) fn aes_ctr<C: StreamCipher + KeyIvInit>(
kdf: &OpenSshKdf,
passphrase: SecretString,
ciphertext: &[u8],
) -> Vec<u8> {
let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase);
let mut cipher = C::new(&key, &iv);
let mut plaintext = ciphertext.to_vec();
cipher.apply_keystream(&mut plaintext);
plaintext
}
pub(super) fn aes_gcm<C: AeadMut + KeyInit>(
kdf: &OpenSshKdf,
passphrase: SecretString,
ciphertext: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let (key, nonce) = derive_key_material::<C::KeySize, C::NonceSize>(kdf, passphrase);
let mut cipher = C::new(&key);
cipher
.decrypt(&nonce, ciphertext)
.map_err(|_| DecryptError::KeyDecryptionFailed)
}
}
mod read_ssh {
use age_core::secrecy::Secret;
use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint};
use nom::{
branch::alt,
bytes::complete::{tag, take},
combinator::{flat_map, map, map_opt, map_parser, map_res, recognize, rest, verify},
multi::{length_data, length_value},
number::complete::be_u32,
sequence::{delimited, pair, preceded, terminated, tuple},
IResult,
};
use num_traits::Zero;
use rsa::BigUint;
use super::{
identity::{UnencryptedKey, UnsupportedKey},
EncryptedKey, Identity, OpenSshCipher, OpenSshKdf, SSH_ED25519_KEY_PREFIX,
SSH_RSA_KEY_PREFIX,
};
/// The SSH `string` [data type](https://tools.ietf.org/html/rfc4251#section-5).
pub(crate) fn string(input: &[u8]) -> IResult<&[u8], &[u8]> {
length_data(be_u32)(input)
}
/// Recognizes an SSH `string` matching a tag.
#[allow(clippy::needless_lifetimes)] // false positive
pub fn string_tag<'a>(value: &'a str) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], &'a [u8]> {
move |input: &[u8]| length_value(be_u32, tag(value))(input)
}
/// The SSH `mpint` data type, restricted to non-negative integers.
///
/// From [RFC 4251](https://tools.ietf.org/html/rfc4251#section-5):
/// ```text
/// Represents multiple precision integers in two's complement format,
/// stored as a string, 8 bits per byte, MSB first. Negative numbers
/// have the value 1 as the most significant bit of the first byte of
/// the data partition. If the most significant bit would be set for
/// a positive number, the number MUST be preceded by a zero byte.
/// Unnecessary leading bytes with the value 0 or 255 MUST NOT be
/// included. The value zero MUST be stored as a string with zero
/// bytes of data.
/// ```
fn mpint(input: &[u8]) -> IResult<&[u8], BigUint> {
map_opt(string, |bytes| {
if bytes.is_empty() {
Some(BigUint::zero())
} else {
// Enforce canonicity
let mut non_zero_bytes = bytes;
while non_zero_bytes[0] == 0 {
non_zero_bytes = &non_zero_bytes[1..];
}
if non_zero_bytes.is_empty() {
// Non-canonical zero
return None;
}
if non_zero_bytes.len() + (non_zero_bytes[0] >> 7) as usize != bytes.len() {
// Negative number or non-canonical positive number
return None;
}
Some(BigUint::from_bytes_be(bytes))
}
})(input)
}
enum CipherResult {
Supported(OpenSshCipher),
Unsupported(String),
}
/// Parse a cipher and KDF.
fn encryption_header(input: &[u8]) -> IResult<&[u8], Option<(CipherResult, OpenSshKdf)>> {
alt((
// If either cipher or KDF is None, both must be.
map(
tuple((string_tag("none"), string_tag("none"), string_tag(""))),
|_| None,
),
map(
tuple((
alt((
map(string_tag("aes256-cbc"), |_| {
CipherResult::Supported(OpenSshCipher::Aes256Cbc)
}),
map(string_tag("aes128-ctr"), |_| {
CipherResult::Supported(OpenSshCipher::Aes128Ctr)
}),
map(string_tag("aes192-ctr"), |_| {
CipherResult::Supported(OpenSshCipher::Aes192Ctr)
}),
map(string_tag("aes256-ctr"), |_| {
CipherResult::Supported(OpenSshCipher::Aes256Ctr)
}),
map(string_tag("aes256-gcm@openssh.com"), |_| {
CipherResult::Supported(OpenSshCipher::Aes256Gcm)
}),
map(string, |s| {
CipherResult::Unsupported(String::from_utf8_lossy(s).into_owned())
}),
)),
map_opt(
preceded(
string_tag("bcrypt"),
map_parser(string, tuple((string, be_u32))),
),
|(salt, rounds)| {
if salt.is_empty() || rounds == 0 {
// Invalid parameters
None
} else {
Some(OpenSshKdf::Bcrypt {
salt: salt.into(),
rounds,
})
}
},
),
)),
Some,
),
))(input)
}
/// Parses the comment from an OpenSSH privkey and verifies its deterministic padding.
fn comment_and_padding(input: &[u8]) -> IResult<&[u8], &[u8]> {
terminated(
// Comment
string,
// Deterministic padding
verify(rest, |padding: &[u8]| {
padding.iter().enumerate().all(|(i, b)| *b == (i + 1) as u8)
}),
)(input)
}
/// Internal OpenSSH encoding of an RSA private key.
///
/// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3187-L3198)
fn openssh_rsa_privkey(input: &[u8]) -> IResult<&[u8], rsa::RsaPrivateKey> {
delimited(
string_tag(SSH_RSA_KEY_PREFIX),
map_res(
tuple((mpint, mpint, mpint, mpint, mpint, mpint)),
|(n, e, d, _iqmp, p, q)| rsa::RsaPrivateKey::from_components(n, e, d, vec![p, q]),
),
comment_and_padding,
)(input)
}
/// Internal OpenSSH encoding of an Ed25519 private key.
///
/// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3277-L3283)
fn openssh_ed25519_privkey(input: &[u8]) -> IResult<&[u8], Secret<[u8; 64]>> {
delimited(
string_tag(SSH_ED25519_KEY_PREFIX),
map_opt(tuple((string, string)), |(pubkey_bytes, privkey_bytes)| {
if privkey_bytes.len() == 64 && pubkey_bytes == &privkey_bytes[32..64] {
let mut privkey = [0; 64];
privkey.copy_from_slice(privkey_bytes);
Some(Secret::new(privkey))
} else {
None
}
}),
comment_and_padding,
)(input)
}
/// Unencrypted, padded list of private keys.
///
/// From the [specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key):
/// ```text
/// uint32 checkint
/// uint32 checkint
/// string privatekey1
/// string comment1
/// string privatekey2
/// string comment2
/// ...
/// string privatekeyN
/// string commentN
/// char 1
/// char 2
/// char 3
/// ...
/// char padlen % 255
/// ```
///
/// Note however that the `string` type for the private keys is wrong; it should be
/// an opaque type, or the composite type `(string, byte[])`.
///
/// We only support a single key, like OpenSSH.
#[allow(clippy::needless_lifetimes)]
pub(super) fn openssh_unencrypted_privkey<'a>(
ssh_key: &[u8],
) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> {
// We need to own, move, and clone these in order to keep them alive.
let ssh_key_rsa = ssh_key.to_vec();
let ssh_key_ed25519 = ssh_key.to_vec();
preceded(
// Repeated checkint, intended for verifying correct decryption.
// Don't copy this idea into a new protocol; use an AEAD instead.
map_opt(pair(take(4usize), take(4usize)), |(c1, c2)| {
if c1 == c2 | else {
None
}
}),
alt((
map(openssh_rsa_privkey, move |sk| {
UnencryptedKey::SshRsa(ssh_key_rsa.clone(), Box::new(sk)).into()
}),
map(openssh_ed25519_privkey, move |privkey| {
UnencryptedKey::SshEd25519(ssh_key_ed25519.clone(), privkey).into()
}),
map(string, |key_type| {
UnsupportedKey::Type(String::from_utf8_lossy(key_type).to_string()).into()
}),
)),
)
}
/// An OpenSSH-formatted private key.
///
/// - [Specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key)
pub(super) fn openssh_privkey(input: &[u8]) -> IResult<&[u8], Identity> {
flat_map(
pair(
preceded(tag(b"openssh-key-v1\x00"), encryption_header),
preceded(
// We only support a single key, like OpenSSH:
// https://github.com/openssh/openssh-portable/blob/4103a3ec/sshkey.c#L4171
tag(b"\x00\x00\x00\x01"),
string, // The public key in SSH format
),
),
openssh_privkey_inner,
)(input)
}
/// Encrypted, padded list of private keys.
fn openssh_privkey_inner<'a>(
(encryption, ssh_key): (Option<(CipherResult, OpenSshKdf)>, &'a [u8]),
) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> {
// `PROTOCOL.key` specifies that the encrypted list of private keys is encoded as
// a `string`, but this is incorrect when AEAD ciphertexts are used. For what I
// can only assume are backwards-compatibility reasons, the `string` part encodes
// the ciphertext without tag, and the tag is just appended to the encoding. So
// you can only parse the full data structure by interpreting the encryption
// header.
let expected_remainder = encryption.as_ref().map_or(0, |(cipher_res, _)| {
if let CipherResult::Supported(cipher) = cipher_res {
cipher.auth_len()
} else {
0
}
});
move |input: &[u8]| match &encryption {
None => map_parser(string, openssh_unencrypted_privkey(ssh_key))(input),
Some((cipher_res, kdf)) => map(
map_parser(
recognize(pair(string, take(expected_remainder))),
preceded(be_u32, rest),
),
|private| match cipher_res {
CipherResult::Supported(cipher) => EncryptedKey {
ssh_key: ssh_key.to_vec(),
cipher: *cipher,
kdf: kdf.clone(),
encrypted: private.to_vec(),
filename: None,
}
.into(),
CipherResult::Unsupported(cipher) => {
UnsupportedKey::EncryptedSsh(cipher.clone()).into()
}
},
)(input),
}
}
/// An SSH-encoded RSA public key.
///
/// From [RFC 4253](https://tools.ietf.org/html/rfc4253#section-6.6):
/// ```text
/// string "ssh-rsa"
/// mpint e
/// mpint n
/// ```
pub(super) fn rsa_pubkey(input: &[u8]) -> IResult<&[u8], rsa::RsaPublicKey> {
preceded(
string_tag(SSH_RSA_KEY_PREFIX),
map_res(tuple((mpint, mpint)), |(exponent, modulus)| {
rsa::RsaPublicKey::new(modulus, exponent)
}),
)(input)
}
/// An SSH-encoded Ed25519 public key.
///
/// From [draft-ietf-curdle-ssh-ed25519-02](https://tools.ietf.org/html/draft-ietf-curdle-ssh-ed25519-02#section-4):
/// ```text
/// string "ssh-ed25519"
/// string key
/// ```
pub(super) fn ed25519_pubkey(input: &[u8]) -> IResult<&[u8], EdwardsPoint> {
preceded(
string_tag(SSH_ED25519_KEY_PREFIX),
map_opt(string, |buf| {
if buf.len() == 32 {
CompressedEdwardsY::from_slice(buf).decompress()
} else {
None
}
}),
)(input)
}
}
mod write_ssh {
use cookie_factory::{bytes::be_u32, combinator::slice, sequence::tuple, SerializeFn};
use num_traits::identities::Zero;
use rsa::{traits::PublicKeyParts, BigUint};
use std::io::Write;
use super::SSH_RSA_KEY_PREFIX;
/// Writes the SSH `string` data type.
fn string<S: AsRef<[u8]>, W: Write>(value: S) -> impl SerializeFn<W> {
tuple((be_u32(value.as_ref().len() as u32), slice(value)))
}
/// Writes the SSH `mpint` data type.
fn mpint<W: Write>(value: &BigUint) -> impl SerializeFn<W> {
let mut bytes = value.to_bytes_be();
// From RFC 4251 section 5:
// If the most significant bit would be set for a positive number,
// the number MUST be preceded by a zero byte. Unnecessary leading
// bytes with the value 0 or 255 MUST NOT be included. The value
// zero MUST be stored as a string with zero bytes of data.
if value.is_zero() {
// BigUint represents zero as vec![0]
bytes = vec![];
} else if bytes[0] >> 7 != 0 {
bytes.insert(0, 0);
}
string(bytes)
}
/// Writes an SSH-encoded RSA public key.
///
/// From [RFC 4253](https://tools.ietf.org/html/rfc4253#section-6.6):
/// ```text
/// string "ssh-rsa"
/// mpint e
/// mpint n
/// ```
pub(super) fn rsa_pubkey<W: Write>(pubkey: &rsa::RsaPublicKey) -> impl SerializeFn<W> {
tuple((
string(SSH_RSA_KEY_PREFIX),
mpint(pubkey.e()),
mpint(pubkey.n()),
))
}
}
| {
Some(c1)
} | conditional_block |
Rmag_aperture_annulus_per_target.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 05:22:35 2019
@author: altsai
"""
import os
import sys
import shutil
import numpy as np
import csv
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord # High-level coordinates
#from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
#from astropy.coordinates import Angle, Latitude, Longitude # Angles
#from astropy.coordinates import match_coordinates_sky
from astropy.table import Table
from photutils import CircularAperture
from photutils import SkyCircularAperture
from photutils import aperture_photometry
from photutils import CircularAnnulus
from photutils import SkyCircularAnnulus
# https://photutils.readthedocs.io/en/stable/aperture.html
#from phot import aperphot
# http://www.mit.edu/~iancross/python/phot.html
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
#from photutils import DAOStarFinder
#from astropy.stats import mad_std
# https://photutils.readthedocs.io/en/stable/getting_started.html
from numpy.polynomial.polynomial import polyfit
'''
# 3C345-20190714@135841-R_Astrodon_2018_calib.fits
#fits_root=input('Please input the file name of the fitsimage: ').split('.',-1)[0].split('_calib',-1)[0]
fits_root='3C345-20190822@130653-R_Astrodon_2018'
fits_calib=fits_root+'_calib.fits'
fits_ori=fits_root+'.fts'
#print(fits_root)
#print(fits_calib)
#print(fits_ori)
date=fits_root.split('@',-1)[0].split('-',-1)[-1]
print(date)
print(type(date))
yearmonth=date[0:6]
'''
#file_info='gasp_target_fitsheader_info_slt201907.txt'
file_info='gasp_target_fitsheader_info_slt2019.txt'
#file_info='gasp_target_fitsheader_info_slt'+date+'.txt'
#file_info='gasp_target_fitsheader_info_slt'+year+month+'.txt'
print('... will read '+file_info+' ...')
df_info=pd.read_csv(file_info,delimiter='|')
#print(df_info)
obj_name='3C345'
dir_obj='Rmag_InstMag/'+obj_name+'/annu/'
if os.path.exists(dir_obj):
shutil.rmtree(dir_obj)
os.makedirs(dir_obj,exist_ok=True)
dir_refstar='RefStar/'
file_refstar='gasp_refStar_radec.txt'
df_refstar=pd.read_csv(file_refstar,sep='|')
#print(df_refstar)
idx_refstar=df_refstar[df_refstar['ObjectName']==obj_name].index.tolist()
n_refstar=len(idx_refstar)
ra_deg=np.array([0.]*n_refstar)
dec_deg=np.array([0.]*n_refstar)
radec_deg=np.array([[0.,0.]]*n_refstar)
ra_hhmmss=['']*n_refstar
dec_ddmmss=['']*n_refstar
rmag=np.array([0.]*n_refstar)
rmag_err=np.array([0.]*n_refstar)
print(rmag)
mag_instrument=np.array([0.]*n_refstar)
j=0
for i in idx_refstar:
ra_deg[j]=df_refstar['RefStarRA_deg'][i]
dec_deg[j]=df_refstar['RefStarDEC_deg'][i]
# print(ra_deg[j],dec_deg[j])
radec_deg[j]=[ra_deg[j],dec_deg[j]]
# print(i,j,radec_deg[j])
ra_hhmmss[j]=df_refstar['RefStarRA_hhmmss'][i]
dec_ddmmss[j]=df_refstar['RefStarDEC_ddmmss'][i]
# print(i,j,radec_deg[j],ra_hhmmss[j],dec_ddmmss[j])
rmag[j]=df_refstar['Rmag'][i]
rmag_err[j]=df_refstar['Rmag_err'][i]
j=j+1
idx_fitsheader=df_info[df_info['Object']==obj_name].index
#print(idx_fitsheader)
#obj_name=df_info['Object'][idx_fitsheader]
fwhm=df_info['FWHM'][idx_fitsheader]
#print(fwhm)
#sys.exit(0)
#position= SkyCoord([ICRS(ra=ra_deg*u.deg,dec=dec_deg*u.deg)])
positions= SkyCoord(ra_deg,dec_deg,unit=(u.hourangle,u.deg),frame='icrs')
#print(positions)
#print(positions.ra)
#print(positions.dec)
#sys.exit(0)
#=======================
fits_ori=df_info['Filename'][idx_fitsheader]
#fits_root=input('Please input the file name of the fitsimage: ').split('.',-1)[0].split('_calib',-1)[0]
#fits_root=['']*n_idx
#fits_calib=['']*n_idx
#sys.exit(0)
# i=2674
# 3C345-20190822@130653-R_Astrodon_2018.fts
# 3C345-20190714@135841-R_Astrodon_2018_calib.fits
'''
# here is for test
idx_fitsheader=np.array([1107,1315,1316])
#idx_fitsheader=np.array([2673])
print(fits_ori[idx_fitsheader])
'''
n_idx=len(idx_fitsheader)
Rmag0=np.array([0.]*n_idx) | r_circle_as=r_circle*u.arcsec
r_inner=20.
r_outer=25.
r_inner_as=r_inner*u.arcsec
r_outer_as=r_outer*u.arcsec
aperture=SkyCircularAperture(positions, r_circle_as)
#print(aperture)
r_as=aperture.r
print('r_as =',r_as)
print('number of reference stars : ',n_refstar)
k=0
for i in idx_fitsheader:
print('-----------------------')
fits_root=fits_ori[i].split('.',-1)[0].split('_calib',-1)[0]
fits_calib=fits_root+'_calib.fits'
print(fits_calib)
#print(fits_root)
#print(fits_calib)
#print(fits_ori)
# sys.exit(0)
# print(radec_deg)
# print(rmag)
date=fits_root.split('@',-1)[0].split('-',-1)[1]
year=date[0:4]
month=date[4:6]
day=date[6:8]
yearmonth=date[0:6]
# sys.exit(0)
dir_file=yearmonth+'/slt'+date+'_calib_sci/'
# dir_reg=yearmonth+'/slt'+date+'_reg/'
hdu=fits.open(dir_file+fits_calib)[0]
imhead=hdu.header
imdata=hdu.data
wcs = WCS(imhead)
print('ID = ',i,'#', k)
print('WCS(imhead)',wcs)
# plt.subplot(projection=wcs)
# plt.imshow(hdu.data, origin='lower')
# plt.grid(color='white', ls='solid')
# plt.show()
# r_circle=fwhm[i]*5
# r_circle=15.
# r_circle_as=r_circle*u.arcsec
# aperture=SkyCircularAperture(position, r=4. * u.arcsec)
# print(r_circle_as)
# r_inner=fwhm[i]*8
# r_outer=fwhm[i]*10
# r_inner=20.
# r_outer=25.
# r_inner_as=r_inner*u.arcsec
# r_outer_as=r_outer*u.arcsec
# print(r_inner_as,r_outer_as)
# sys.exit(0)
# print(WCS.world_axis_physical_types)
aperture_pix=aperture.to_pixel(wcs)
# print(aperture_pix)
r_pix=aperture_pix.r
print('r_pix =',r_pix)
# phot_table = aperture_photometry(imdata, aperture,wcs=wcs)
phot_table = aperture_photometry(imdata, aperture_pix)
# print(phot_table)
# print(phot_table.colnames)
# print(phot_table['sky_center'])
# print(phot_table['xcenter'])
# print(phot_table['ycenter'])
aper_sum=phot_table['aperture_sum']
phot_table['aperture_sum'].info.format = '%.8g' # for consistent table output
aper_annu=SkyCircularAnnulus(positions,r_inner_as,r_outer_as)
# print(aper_annu)
# print(aper_annu.r_in)
# print(aper_annu.r_out)
aper_annu_pix=aper_annu.to_pixel(wcs)
# print(aper_annu_pix)
r_in_annu_pix=aper_annu_pix.r_in
r_out_annu_pix=aper_annu_pix.r_out
# print(r_in_annu_pix,r_out_annu_pix)
apper=[aperture_pix,aper_annu_pix]
phot_annu_table = aperture_photometry(imdata, apper)
# print(phot_annu_table)
# print(phot_annu_table.colnames)
aper_annu_sum0=phot_annu_table['aperture_sum_0']
# print(aper_annu_sum0)
aper_annu_sum1=phot_annu_table['aperture_sum_1']
# print(aper_annu_sum1)
bkg_mean = phot_annu_table['aperture_sum_1'] / aper_annu_pix.area
# print(bkg_mean)
bkg_sum = bkg_mean * aper_annu_pix.area
# print(bkg_sum)
final_sum = phot_annu_table['aperture_sum_0'] - bkg_sum
# print(final_sum)
phot_annu_table['residual_aperture_sum'] = final_sum
phot_annu_table['xcenter'].info.format = '%.8g' # for consistent table output
phot_annu_table['ycenter'].info.format = '%.8g' # for consistent table output
phot_annu_table['aperture_sum_0'].info.format = '%.8g' # for consistent table output
phot_annu_table['aperture_sum_1'].info.format = '%.8g' # for consistent table output
phot_annu_table['residual_aperture_sum'].info.format = '%.8g' # for consistent table output
# print(phot_annu_table['residual_aperture_sum'])
# print(phot_annu_table)
# sys.exit(0)
j=0
print('j final_sum mag_instrument Rmag[0]')
for j in range(n_refstar):
mag_instrument[j]=-2.5*np.log10(final_sum[j])
print(j, final_sum[j],mag_instrument[j],rmag[j])
mag_instrument_1=mag_instrument[1:n_refstar]
rmag_1=rmag[1:n_refstar]
b,m=polyfit(mag_instrument_1,rmag_1,1)
# print('b =','%.3f' %b)
# print('m =','%.3f' %m)
# print('Rmag =','%.3f' %b,'+','%.3f' %m,'*(Instrument Magnitude)')
rmag[0]=b+m*mag_instrument[0]
# print(rmag[0])
Rmag0[k]=rmag[0]
plt.figure()
# plt.scatter(mag_instrument_1,rmag_1)
plt.plot(mag_instrument_1,rmag_1,'o')
plt.plot(mag_instrument[0],rmag[0],'o')
plt.plot(mag_instrument,b+m*mag_instrument,'-')
plt.xlabel('Instrument Magnitude')
plt.ylabel('Rmag')
plt.title(fits_ori[i])
# plt.show()
plt.savefig(dir_obj+'Rmag_InsMag_'+obj_name+'_'+date+'_'+str(k)+'.png')
plt.close()
print('new Rmag[0] =',rmag[0])
# print('Instrument Magnitude',mag_instrument)
k=k+1
print('-----------------------')
print('Rmag',Rmag0)
#sys.exit(0)
#==============================
'''
file_reg_fk5=dir_refstar+'RefStar_'+obj_name+'_annu_fk5.reg'
print('will write to : '+file_reg_fk5)
if os.path.exists(file_reg_fk5):
os.remove(file_reg_fk5)
f_reg=open(file_reg_fk5,'w')
f_reg.write('# Region file format: DS9 version 4.1\n')
f_reg.write('global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n')
f_reg.write('fk5\n')
k=0
for i in idx_refstar:
if k==0:
txt_target='circle('+str(ra_hhmmss[k])+','+str(dec_ddmmss[k])+','+str(r_circle)+'") # color=red'
txt_bkg='annulus('+str(ra_hhmmss[k])+','+str(dec_ddmmss[k])+','+str(r_inner)+'",'+str(r_outer)+'")'
else:
txt_target='circle('+str(ra_hhmmss[k])+','+str(dec_ddmmss[k])+','+str(r_circle)+'") # color=white'
txt_bkg='annulus('+str(ra_hhmmss[k])+','+str(dec_ddmmss[k])+','+str(r_inner)+'",'+str(r_outer)+'")'
# print(k,i)
f_reg.write(txt_target+'\n')
f_reg.write(txt_bkg+'\n')
k=k+1
f_reg.close()
'''
'''
#img='62_z_CDFs_goods_stamp_img.fits' #path to the image
#RA = 52.9898239
#DEC = -27.7143114
#hdulist = astropy.io.fits.open(img)
#w = wcs.WCS(hdulist['PRIMARY'].header)
#world = np.array([[RA, DEC]])
pix = wcs.wcs_world2pix(radec_deg,1) # Pixel coordinates of (RA, DEC)
print( "Pixel Coordinates: ", pix[0,0], pix[0,1])
sys.exit(0)
#call aperture function
observation=aperphot(imdata, timekey=None, pos=[pix[0,0], pix[0,1]], dap=[4*fwhm,8*fwhm,12*fwhm], resamp=2, retfull=False)
# Print outputs
print( "Aperture flux:", observation.phot)
print( "Background: ", observation.bg)
'''
'''
annulus_masks = aper_annu_pix.to_mask(method='center')
plt.imshow(annulus_masks)
plt.colorbar()
annulus_data = annulus_masks[0].multiply(imdata)
plt.imshow(annulus_data)
plt.colorbar()
mask = annulus_masks[0].data
annulus_data_1d = annulus_data[mask > 0]
annulus_data_1d.shape
'''
'''
annulus_aperture = CircularAnnulus(position, r_in=fwhm*6.* u.arcsec, r_out=fwhm*8.* u.arcsec)
apers=[aperture,annulus_aperture]
phot_table2=aperture_photometry(imdata,apers)
for col in phot_table2.colnames:
phot_table2[col].info.format = '%.8g' # for consistent table output
(phot_table2)
#bkg_mean = phot_table2['aperture_sum_1'] / annulus_aperture.area
''' | #sys.exit(0)
r_circle=15. | random_line_split |
Rmag_aperture_annulus_per_target.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 05:22:35 2019
@author: altsai
"""
import os
import sys
import shutil
import numpy as np
import csv
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord # High-level coordinates
#from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
#from astropy.coordinates import Angle, Latitude, Longitude # Angles
#from astropy.coordinates import match_coordinates_sky
from astropy.table import Table
from photutils import CircularAperture
from photutils import SkyCircularAperture
from photutils import aperture_photometry
from photutils import CircularAnnulus
from photutils import SkyCircularAnnulus
# https://photutils.readthedocs.io/en/stable/aperture.html
#from phot import aperphot
# http://www.mit.edu/~iancross/python/phot.html
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
#from photutils import DAOStarFinder
#from astropy.stats import mad_std
# https://photutils.readthedocs.io/en/stable/getting_started.html
from numpy.polynomial.polynomial import polyfit
'''
# 3C345-20190714@135841-R_Astrodon_2018_calib.fits
#fits_root=input('Please input the file name of the fitsimage: ').split('.',-1)[0].split('_calib',-1)[0]
fits_root='3C345-20190822@130653-R_Astrodon_2018'
fits_calib=fits_root+'_calib.fits'
fits_ori=fits_root+'.fts'
#print(fits_root)
#print(fits_calib)
#print(fits_ori)
date=fits_root.split('@',-1)[0].split('-',-1)[-1]
print(date)
print(type(date))
yearmonth=date[0:6]
'''
#file_info='gasp_target_fitsheader_info_slt201907.txt'
file_info='gasp_target_fitsheader_info_slt2019.txt'
#file_info='gasp_target_fitsheader_info_slt'+date+'.txt'
#file_info='gasp_target_fitsheader_info_slt'+year+month+'.txt'
print('... will read '+file_info+' ...')
df_info=pd.read_csv(file_info,delimiter='|')
#print(df_info)
obj_name='3C345'
dir_obj='Rmag_InstMag/'+obj_name+'/annu/'
if os.path.exists(dir_obj):
shutil.rmtree(dir_obj)
os.makedirs(dir_obj,exist_ok=True)
dir_refstar='RefStar/'
file_refstar='gasp_refStar_radec.txt'
df_refstar=pd.read_csv(file_refstar,sep='|')
#print(df_refstar)
idx_refstar=df_refstar[df_refstar['ObjectName']==obj_name].index.tolist()
n_refstar=len(idx_refstar)
ra_deg=np.array([0.]*n_refstar)
dec_deg=np.array([0.]*n_refstar)
radec_deg=np.array([[0.,0.]]*n_refstar)
ra_hhmmss=['']*n_refstar
dec_ddmmss=['']*n_refstar
rmag=np.array([0.]*n_refstar)
rmag_err=np.array([0.]*n_refstar)
print(rmag)
mag_instrument=np.array([0.]*n_refstar)
j=0
for i in idx_refstar:
ra_deg[j]=df_refstar['RefStarRA_deg'][i]
dec_deg[j]=df_refstar['RefStarDEC_deg'][i]
# print(ra_deg[j],dec_deg[j])
radec_deg[j]=[ra_deg[j],dec_deg[j]]
# print(i,j,radec_deg[j])
ra_hhmmss[j]=df_refstar['RefStarRA_hhmmss'][i]
dec_ddmmss[j]=df_refstar['RefStarDEC_ddmmss'][i]
# print(i,j,radec_deg[j],ra_hhmmss[j],dec_ddmmss[j])
rmag[j]=df_refstar['Rmag'][i]
rmag_err[j]=df_refstar['Rmag_err'][i]
j=j+1
idx_fitsheader=df_info[df_info['Object']==obj_name].index
#print(idx_fitsheader)
#obj_name=df_info['Object'][idx_fitsheader]
fwhm=df_info['FWHM'][idx_fitsheader]
#print(fwhm)
#sys.exit(0)
#position= SkyCoord([ICRS(ra=ra_deg*u.deg,dec=dec_deg*u.deg)])
positions= SkyCoord(ra_deg,dec_deg,unit=(u.hourangle,u.deg),frame='icrs')
#print(positions)
#print(positions.ra)
#print(positions.dec)
#sys.exit(0)
#=======================
fits_ori=df_info['Filename'][idx_fitsheader]
#fits_root=input('Please input the file name of the fitsimage: ').split('.',-1)[0].split('_calib',-1)[0]
#fits_root=['']*n_idx
#fits_calib=['']*n_idx
#sys.exit(0)
# i=2674
# 3C345-20190822@130653-R_Astrodon_2018.fts
# 3C345-20190714@135841-R_Astrodon_2018_calib.fits
'''
# here is for test
idx_fitsheader=np.array([1107,1315,1316])
#idx_fitsheader=np.array([2673])
print(fits_ori[idx_fitsheader])
'''
n_idx=len(idx_fitsheader)
Rmag0=np.array([0.]*n_idx)
#sys.exit(0)
r_circle=15.
r_circle_as=r_circle*u.arcsec
r_inner=20.
r_outer=25.
r_inner_as=r_inner*u.arcsec
r_outer_as=r_outer*u.arcsec
aperture=SkyCircularAperture(positions, r_circle_as)
#print(aperture)
r_as=aperture.r
print('r_as =',r_as)
print('number of reference stars : ',n_refstar)
k=0
for i in idx_fitsheader:
print('-----------------------')
fits_root=fits_ori[i].split('.',-1)[0].split('_calib',-1)[0]
fits_calib=fits_root+'_calib.fits'
print(fits_calib)
#print(fits_root)
#print(fits_calib)
#print(fits_ori)
# sys.exit(0)
# print(radec_deg)
# print(rmag)
date=fits_root.split('@',-1)[0].split('-',-1)[1]
year=date[0:4]
month=date[4:6]
day=date[6:8]
yearmonth=date[0:6]
# sys.exit(0)
dir_file=yearmonth+'/slt'+date+'_calib_sci/'
# dir_reg=yearmonth+'/slt'+date+'_reg/'
hdu=fits.open(dir_file+fits_calib)[0]
imhead=hdu.header
imdata=hdu.data
wcs = WCS(imhead)
print('ID = ',i,'#', k)
print('WCS(imhead)',wcs)
# plt.subplot(projection=wcs)
# plt.imshow(hdu.data, origin='lower')
# plt.grid(color='white', ls='solid')
# plt.show()
# r_circle=fwhm[i]*5
# r_circle=15.
# r_circle_as=r_circle*u.arcsec
# aperture=SkyCircularAperture(position, r=4. * u.arcsec)
# print(r_circle_as)
# r_inner=fwhm[i]*8
# r_outer=fwhm[i]*10
# r_inner=20.
# r_outer=25.
# r_inner_as=r_inner*u.arcsec
# r_outer_as=r_outer*u.arcsec
# print(r_inner_as,r_outer_as)
# sys.exit(0)
# print(WCS.world_axis_physical_types)
aperture_pix=aperture.to_pixel(wcs)
# print(aperture_pix)
r_pix=aperture_pix.r
print('r_pix =',r_pix)
# phot_table = aperture_photometry(imdata, aperture,wcs=wcs)
phot_table = aperture_photometry(imdata, aperture_pix)
# print(phot_table)
# print(phot_table.colnames)
# print(phot_table['sky_center'])
# print(phot_table['xcenter'])
# print(phot_table['ycenter'])
aper_sum=phot_table['aperture_sum']
phot_table['aperture_sum'].info.format = '%.8g' # for consistent table output
aper_annu=SkyCircularAnnulus(positions,r_inner_as,r_outer_as)
# print(aper_annu)
# print(aper_annu.r_in)
# print(aper_annu.r_out)
aper_annu_pix=aper_annu.to_pixel(wcs)
# print(aper_annu_pix)
r_in_annu_pix=aper_annu_pix.r_in
r_out_annu_pix=aper_annu_pix.r_out
# print(r_in_annu_pix,r_out_annu_pix)
apper=[aperture_pix,aper_annu_pix]
phot_annu_table = aperture_photometry(imdata, apper)
# print(phot_annu_table)
# print(phot_annu_table.colnames)
aper_annu_sum0=phot_annu_table['aperture_sum_0']
# print(aper_annu_sum0)
aper_annu_sum1=phot_annu_table['aperture_sum_1']
# print(aper_annu_sum1)
bkg_mean = phot_annu_table['aperture_sum_1'] / aper_annu_pix.area
# print(bkg_mean)
bkg_sum = bkg_mean * aper_annu_pix.area
# print(bkg_sum)
final_sum = phot_annu_table['aperture_sum_0'] - bkg_sum
# print(final_sum)
phot_annu_table['residual_aperture_sum'] = final_sum
phot_annu_table['xcenter'].info.format = '%.8g' # for consistent table output
phot_annu_table['ycenter'].info.format = '%.8g' # for consistent table output
phot_annu_table['aperture_sum_0'].info.format = '%.8g' # for consistent table output
phot_annu_table['aperture_sum_1'].info.format = '%.8g' # for consistent table output
phot_annu_table['residual_aperture_sum'].info.format = '%.8g' # for consistent table output
# print(phot_annu_table['residual_aperture_sum'])
# print(phot_annu_table)
# sys.exit(0)
j=0
print('j final_sum mag_instrument Rmag[0]')
for j in range(n_refstar):
|
mag_instrument_1=mag_instrument[1:n_refstar]
rmag_1=rmag[1:n_refstar]
b,m=polyfit(mag_instrument_1,rmag_1,1)
# print('b =','%.3f' %b)
# print('m =','%.3f' %m)
# print('Rmag =','%.3f' %b,'+','%.3f' %m,'*(Instrument Magnitude)')
rmag[0]=b+m*mag_instrument[0]
# print(rmag[0])
Rmag0[k]=rmag[0]
plt.figure()
# plt.scatter(mag_instrument_1,rmag_1)
plt.plot(mag_instrument_1,rmag_1,'o')
plt.plot(mag_instrument[0],rmag[0],'o')
plt.plot(mag_instrument,b+m*mag_instrument,'-')
plt.xlabel('Instrument Magnitude')
plt.ylabel('Rmag')
plt.title(fits_ori[i])
# plt.show()
plt.savefig(dir_obj+'Rmag_InsMag_'+obj_name+'_'+date+'_'+str(k)+'.png')
plt.close()
print('new Rmag[0] =',rmag[0])
# print('Instrument Magnitude',mag_instrument)
k=k+1
print('-----------------------')
print('Rmag',Rmag0)
#sys.exit(0)
#==============================
'''
file_reg_fk5=dir_refstar+'RefStar_'+obj_name+'_annu_fk5.reg'
print('will write to : '+file_reg_fk5)
if os.path.exists(file_reg_fk5):
os.remove(file_reg_fk5)
f_reg=open(file_reg_fk5,'w')
f_reg.write('# Region file format: DS9 version 4.1\n')
f_reg.write('global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n')
f_reg.write('fk5\n')
k=0
for i in idx_refstar:
if k==0:
txt_target='circle('+str(ra_hhmmss[k])+','+str(dec_ddmmss[k])+','+str(r_circle)+'") # color=red'
txt_bkg='annulus('+str(ra_hhmmss[k])+','+str(dec_ddmmss[k])+','+str(r_inner)+'",'+str(r_outer)+'")'
else:
txt_target='circle('+str(ra_hhmmss[k])+','+str(dec_ddmmss[k])+','+str(r_circle)+'") # color=white'
txt_bkg='annulus('+str(ra_hhmmss[k])+','+str(dec_ddmmss[k])+','+str(r_inner)+'",'+str(r_outer)+'")'
# print(k,i)
f_reg.write(txt_target+'\n')
f_reg.write(txt_bkg+'\n')
k=k+1
f_reg.close()
'''
'''
#img='62_z_CDFs_goods_stamp_img.fits' #path to the image
#RA = 52.9898239
#DEC = -27.7143114
#hdulist = astropy.io.fits.open(img)
#w = wcs.WCS(hdulist['PRIMARY'].header)
#world = np.array([[RA, DEC]])
pix = wcs.wcs_world2pix(radec_deg,1) # Pixel coordinates of (RA, DEC)
print( "Pixel Coordinates: ", pix[0,0], pix[0,1])
sys.exit(0)
#call aperture function
observation=aperphot(imdata, timekey=None, pos=[pix[0,0], pix[0,1]], dap=[4*fwhm,8*fwhm,12*fwhm], resamp=2, retfull=False)
# Print outputs
print( "Aperture flux:", observation.phot)
print( "Background: ", observation.bg)
'''
'''
annulus_masks = aper_annu_pix.to_mask(method='center')
plt.imshow(annulus_masks)
plt.colorbar()
annulus_data = annulus_masks[0].multiply(imdata)
plt.imshow(annulus_data)
plt.colorbar()
mask = annulus_masks[0].data
annulus_data_1d = annulus_data[mask > 0]
annulus_data_1d.shape
'''
'''
annulus_aperture = CircularAnnulus(position, r_in=fwhm*6.* u.arcsec, r_out=fwhm*8.* u.arcsec)
apers=[aperture,annulus_aperture]
phot_table2=aperture_photometry(imdata,apers)
for col in phot_table2.colnames:
phot_table2[col].info.format = '%.8g' # for consistent table output
(phot_table2)
#bkg_mean = phot_table2['aperture_sum_1'] / annulus_aperture.area
''' | mag_instrument[j]=-2.5*np.log10(final_sum[j])
print(j, final_sum[j],mag_instrument[j],rmag[j]) | conditional_block |
save_secret_in_image_better.py | #! /usr/bin/env -S /usr/bin/time /usr/bin/python3.8.6 -i
# -*- coding: utf-8 -*-
import dill
import gzip
import os
import sys
import string
import shutil
from typing import List, Dict, Set, Mapping, Any, Tuple
# import tempfile
from memory_tempfile import MemoryTempfile
tempfile : MemoryTempfile = MemoryTempfile()
from collections import defaultdict
from copy import deepcopy
from dotmap import DotMap
from operator import itemgetter
from pprint import pprint
from os.path import expanduser
import itertools
import multiprocessing as mp
PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/")+"/"
HOME_DIR = os.path.expanduser("~")
TEMP_DIR = tempfile.gettempdir()+"/"
CURRENT_DIR = os.getcwdb().decode('utf-8')
from PIL import Image
import numpy as np
import pandas as pd
sys.path.append('..')
import utils
from utils_multiprocessing_manager import MultiprocessingManager
def convert_1d_to_2d_arr(arr, length):
arr_2d = np.zeros((arr.shape[0]-length+1, length), dtype=np.uint8)
for i in range(0, length-1):
arr_2d[:, i] = arr[i:-length+1+i]
arr_2d[:, -1] = arr[length-1:]
return arr_2d
lst_int_base_100 = string.printable
# lst_int_base_100 = list("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_.,:;!?#$%&()[]{}/\\ \'\"")
base_100_len = len(lst_int_base_100)
assert base_100_len == 100
dict_base_100_int = {v: i for i, v in enumerate(lst_int_base_100, 0)}
def convert_base_100_to_int(num_base_100):
b = 1
s = 0
for i, v in enumerate(reversed(list(num_base_100)), 0):
n = dict_base_100_int[v]
s += n*b
b *= base_100_len
return s
def convert_int_to_base_100(num_int):
l = []
while num_int > 0:
l.append(num_int % base_100_len)
num_int //= base_100_len
n = list(map(lambda x: lst_int_base_100[x], reversed(l)))
return "".join(n)
def convert_int_to_lst_bin(num_int):
return list(map(int, bin(num_int)[2:]))
def convert_lst_bin_to_int(l_bin):
arr = np.array(l_bin, dtype=object)
length = arr.shape[0]
return np.sum(arr*2**np.arange(length-1, -1, -1).astype(object))
secret_test = "test123%$&/?!-_,:.;"
assert secret_test==convert_int_to_base_100(convert_base_100_to_int(secret_test))
assert 12345678901234567890==convert_base_100_to_int(convert_int_to_base_100(12345678901234567890))
assert 1234567==convert_lst_bin_to_int(convert_int_to_lst_bin(1234567))
assert [1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]==convert_int_to_lst_bin(convert_lst_bin_to_int([1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]))
prefix_int : int = 0xabcd2533
suffix_int : int = 0x34bf4634
arr_prefix : np.ndarray = np.array(convert_int_to_lst_bin(prefix_int), dtype=np.uint8)
arr_suffix : np.ndarray = np.array(convert_int_to_lst_bin(suffix_int), dtype=np.uint8)
len_arr_prefix = arr_prefix.shape[0]
len_arr_suffix = arr_suffix.shape[0]
if __name__ == '__main__':
print('Hello World!')
path_images = os.path.join(PATH_ROOT_DIR, 'images/')
assert os.path.exists(path_images)
img_src_path : str = "images/orig_image_2_no_secret.png"
img_src_new_path : str = "images/orig_image_2_no_secret_new.png"
img_dst_path : str = "images/orig_image_2_with_secret.png"
MAX_WIDTH = 200
MAX_HEIGHT = 150
# MAX_WIDTH = 400
# MAX_HEIGHT = 300
# MAX_WIDTH = 800
# MAX_HEIGHT = 600
img_src_orig : Image = Image.open(img_src_path)
pix : np.ndarray = np.array(img_src_orig)
if not os.path.exists(img_src_new_path):
if len(pix.shape)==3:
# remove alpha channel, if alpha is contained! also save the file again.
if pix.shape[2]==4:
pix = pix[..., :3]
img2 : Image = Image.fromarray(pix)
width, height = img2.size
if width > MAX_WIDTH or height > MAX_HEIGHT:
if width > MAX_WIDTH:
width_new = MAX_WIDTH
height_new = int(width_new * height / width)
elif height > MAX_HEIGHT:
height_new = MAX_HEIGHT
width_new = int(height_new * width / height)
img2 = img2.resize(size=(width_new, height_new), resample=Image.LANCZOS)
img2.save(img_src_new_path)
img_src : Image = Image.open(img_src_new_path)
pix_orig : np.ndarray = np.array(img_src)
assert len(pix_orig.shape) == 3
assert pix_orig.shape[2] == 3
shape_img_src : Tuple[int, int, int] = pix_orig.shape
print("shape_img_src: {}".format(shape_img_src))
pix : np.ndarray = pix_orig.copy()
arr_1_bit : np.ndarray = (pix & 0x1).reshape((-1, ))
arr_1_bit_orig : np.ndarray = arr_1_bit.copy()
len_arr_1_bit : int = arr_1_bit.shape[0]
l_secret_str = [
'hello',
'this is',
'a little test! 123?',
"""def print_some_stuff():\n print(\"Test! 123=!=!=!= xD\")""",
'lolololululululusfsfsdlfjsdlfjsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi',
'lolololululululusfsfsdlfjsdlf',
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi',
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi'*3,
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi'*6,
]
# amount_secrets = 2
# secret_len = 400
# l_secret_str = [''.join(np.random.choice(lst_int_base_100, (secret_len, ))) for _ in range(0, amount_secrets)]
MIN_BITS_LENGTH = 16
JUMP_MIN = 10
JUMP_MAX = 16
def create_secret_bin_content(secret_str : str) -> List[int]:
|
l_arr_secret_bin_content = [np.array(create_secret_bin_content(secret_str), dtype=np.uint8) for secret_str in l_secret_str]
# TODO: make this into a multiprocessing function too!
def find_best_possible_parameters(n : int=100):
def inner_function():
arr_line_param_offset : np.ndarray = np.random.randint(0, len_arr_1_bit, (len(l_secret_str), ))
arr_line_param_jumps : np.ndarray = np.random.randint(JUMP_MIN, JUMP_MAX+1, (len(l_secret_str), ))
l_arr_pos : List[np.ndarray] = [(np.arange(0, len(l_bin_content)) * jumps + offset) % len_arr_1_bit for l_bin_content, offset, jumps in zip(l_arr_secret_bin_content, arr_line_param_offset, arr_line_param_jumps)]
# check, if any overlaps are there between the position of each secret!
i_1 : int
arr_secret_bin_content_1 : np.ndarray
arr_pos_1 : np.ndarray
for i_1, (arr_secret_bin_content_1, arr_pos_1) in enumerate(zip(l_arr_secret_bin_content[:-1], l_arr_pos[:-1]), 0):
i_2 : int
arr_secret_bin_content_2 : np.ndarray
arr_pos_2 : np.ndarray
for i_2, (arr_secret_bin_content_2, arr_pos_2) in enumerate(zip(l_arr_secret_bin_content[i_1+1:], l_arr_pos[i_1+1:]), i_1+1):
arr_idxs_bool_1 : np.ndarray = np.isin(arr_pos_1, arr_pos_2)
if np.any(arr_idxs_bool_1):
print("Some Equal postiions! i_1: {}, i_2: {}".format(i_1, i_2))
arr_idxs_bool_2 : np.ndarray = np.isin(arr_pos_2, arr_pos_1)
arr_bin_1_part : np.ndarray = arr_secret_bin_content_1[arr_idxs_bool_1]
arr_bin_2_part : np.ndarray = arr_secret_bin_content_2[arr_idxs_bool_2]
if np.any(arr_bin_1_part != arr_bin_2_part):
print("arr_bin_1_part: {}".format(arr_bin_1_part))
print("arr_bin_2_part: {}".format(arr_bin_2_part))
return None
return arr_line_param_offset, arr_line_param_jumps, l_arr_pos
arr_line_param_offset = None
arr_line_param_jumps = None
l_arr_pos = None
for nr_try in range(1, n + 1):
ret = inner_function()
if ret is None:
print(f'Failed to find good params at nr_try {nr_try}!')
continue
print(f'Found params at nr_try {nr_try}!')
arr_line_param_offset, arr_line_param_jumps, l_arr_pos = ret
break
return arr_line_param_offset, arr_line_param_jumps, l_arr_pos
# TODO: make this multiprocessing possible!
arr_line_param_offset, arr_line_param_jumps, l_arr_pos = find_best_possible_parameters(n=1000000)
if arr_line_param_offset is None:
sys.exit('Failed to find good params!')
print("arr_line_param_offset: {}".format(arr_line_param_offset))
print("arr_line_param_jumps: {}".format(arr_line_param_jumps))
l_params = [(jump, offset, arr_secret_bin_content.shape[0]) for jump, offset, arr_secret_bin_content in zip(arr_line_param_jumps, arr_line_param_offset, l_arr_secret_bin_content)]
# apply the bit changes to the pix array!
for arr_pos, arr_secret_bin_content in zip(l_arr_pos, l_arr_secret_bin_content):
arr_1_bit[arr_pos] = arr_secret_bin_content
pix_secret = (pix & 0xF8) | arr_1_bit.reshape(pix.shape)
pix_1_bit_orig = arr_1_bit_orig.reshape(shape_img_src) * 255
pix_1_bit = arr_1_bit.reshape(shape_img_src) * 255
Image.fromarray(pix_1_bit_orig).save('images/img_path_src_1bit_orig.png')
Image.fromarray(pix_1_bit).save('images/img_path_src_1bit_encoded_in.png')
img_secret : Image = Image.fromarray(pix_secret)
img_secret.save(img_dst_path)
img_src = Image.open(img_src_new_path)
img_dst = Image.open(img_dst_path)
pix_src = np.array(img_src)
pix_dst = np.array(img_dst)
pix_src_1bit = (pix_src & 0x1) * 255
pix_dst_1bit = (pix_dst & 0x1) * 255
pix_src_dst_1bit = pix_src_1bit ^ pix_dst_1bit
img_path_src_1bit = 'images/img_path_src_1bit.png'
img_path_dst_1bit = 'images/img_path_dst_1bit.png'
img_path_src_dst_1bit = 'images/img_path_src_dst_1bit.png'
Image.fromarray(pix_src_1bit).save(img_path_src_1bit)
Image.fromarray(pix_dst_1bit).save(img_path_dst_1bit)
Image.fromarray(pix_src_dst_1bit).save(img_path_src_dst_1bit)
# try to find some matches!
img_dst : Image = Image.open(img_dst_path)
pix_dst : np.ndarray = np.array(img_dst)
assert len(pix_dst.shape) == 3
assert pix_dst.shape[2] == 3
arr_dst_1_bit : np.ndarray = (pix_dst & 0x1).reshape((-1, ))
def func_find_possible_params(
arr_dst_1_bit : np.ndarray,
arr_prefix : np.ndarray,
arr_suffix : np.ndarray,
l_jump : List[int],
) -> List[Tuple[int, int, int]]:
len_arr_dst_1_bit : int = arr_dst_1_bit.shape[0]
l_possible_params : List[Tuple[int, int, int]] = []
len_arr_prefix : int = len(arr_prefix)
len_arr_suffix : int = len(arr_suffix)
xs_prefix_basic : np.ndarray = np.arange(0, len_arr_prefix)
xs_suffix_basic : np.ndarray = np.arange(0, len_arr_suffix)
for jump in l_jump:
# for jump in range(JUMP_MIN, JUMP_MAX + 1):
print("jump: {}".format(jump))
xs_jump = (xs_prefix_basic * jump) % len_arr_dst_1_bit
for offset_prefix in range(0, len_arr_dst_1_bit):
xs_prefix = (xs_jump + offset_prefix) % len_arr_dst_1_bit
# first: find the left part (prefix)
if np.all(np.equal(arr_dst_1_bit[xs_prefix], arr_prefix)):
print("offset_prefix: {}".format(offset_prefix))
for offset_jump in range(MIN_BITS_LENGTH + len_arr_prefix, len_arr_dst_1_bit - len_arr_suffix):
offset_suffix = offset_jump * jump
# print("offset_suffix: {}".format(offset_suffix))
xs_suffix = (xs_suffix_basic * jump + offset_prefix + offset_suffix) % len_arr_dst_1_bit
# send: find the right part (suffix)
if np.all(np.equal(arr_dst_1_bit[xs_suffix], arr_suffix)):
arr_pos : np.ndarray = (np.arange(len_arr_prefix, offset_jump) * jump + offset_prefix) % len_arr_dst_1_bit
arr_part : np.ndarray = arr_dst_1_bit[arr_pos]
arr_secret_bin : np.ndarray = arr_part[:-MIN_BITS_LENGTH]
arr_secret_bin_len : np.ndarray = arr_part[-MIN_BITS_LENGTH:]
# third: check, if the content length is the same as the given binary number length!
if arr_secret_bin.shape[0] == convert_lst_bin_to_int(arr_secret_bin_len):
t_params = (jump, offset_prefix, offset_jump + len_arr_suffix)
print("t_params: {}".format(t_params))
l_possible_params.append(t_params)
return l_possible_params
mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count())
mult_proc_mng.test_worker_threads_response()
l_jump_all = list(range(JUMP_MIN, JUMP_MAX+1))
amount_parts = 7
len_l_jump_all = len(l_jump_all)
factor = len_l_jump_all / amount_parts
l_jump_range_one = [0] + [int(factor * j) for j in range(1, amount_parts)] + [len_l_jump_all]
l_l_jump = [l_jump_all[i1:i2] for i1, i2 in zip(l_jump_range_one[:-1], l_jump_range_one[1:])]
print("l_l_jump: {}".format(l_l_jump))
# del mult_proc_mng
# sys.exit()
print('Define new Function!')
mult_proc_mng.define_new_func('find_possible_params', func_find_possible_params)
print('Do the jobs!!')
l_ret : List[List[Tuple[int, int, int]]] = mult_proc_mng.do_new_jobs(
['find_possible_params']*len(l_l_jump),
[
(arr_dst_1_bit, arr_prefix, arr_suffix, l_jump)
for l_jump
in l_l_jump
]
)
print("len(l_ret): {}".format(len(l_ret)))
# print("l_ret: {}".format(l_ret))
mult_proc_mng.test_worker_threads_response()
del mult_proc_mng
l_possible_params : List[Tuple[int, int, int]] = [t for l_possible_params_part in l_ret for t in l_possible_params_part]
l_params = sorted(l_params)
l_possible_params = sorted(l_possible_params)
# only for checking, if the output is correct or not!
assert l_params == l_possible_params
print("l_possible_params:\n{}".format(l_possible_params))
print("l_params:\n{}".format(l_params))
print("len(l_possible_params): {}".format(len(l_possible_params)))
print("len(l_params): {}".format(len(l_params)))
| secret_int : List[int] = convert_base_100_to_int(secret_str)
secret_bin : List[int] = convert_int_to_lst_bin(secret_int)
l_len_secret_bin = convert_int_to_lst_bin(len(secret_bin))
len_l_len_secret_bin = len(l_len_secret_bin)
assert len_l_len_secret_bin <= MIN_BITS_LENGTH
if len_l_len_secret_bin < MIN_BITS_LENGTH:
l_len_secret_bin = [0] * (MIN_BITS_LENGTH - len_l_len_secret_bin) + l_len_secret_bin
return arr_prefix.tolist() + secret_bin + l_len_secret_bin + arr_suffix.tolist() | identifier_body |
save_secret_in_image_better.py | #! /usr/bin/env -S /usr/bin/time /usr/bin/python3.8.6 -i
# -*- coding: utf-8 -*-
import dill
import gzip
import os
import sys
import string
import shutil
from typing import List, Dict, Set, Mapping, Any, Tuple
# import tempfile
from memory_tempfile import MemoryTempfile
tempfile : MemoryTempfile = MemoryTempfile()
from collections import defaultdict
from copy import deepcopy
from dotmap import DotMap
from operator import itemgetter
from pprint import pprint
from os.path import expanduser
import itertools
import multiprocessing as mp
PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/")+"/"
HOME_DIR = os.path.expanduser("~")
TEMP_DIR = tempfile.gettempdir()+"/"
CURRENT_DIR = os.getcwdb().decode('utf-8')
from PIL import Image
import numpy as np
import pandas as pd
sys.path.append('..')
import utils
from utils_multiprocessing_manager import MultiprocessingManager
def convert_1d_to_2d_arr(arr, length):
arr_2d = np.zeros((arr.shape[0]-length+1, length), dtype=np.uint8)
for i in range(0, length-1):
arr_2d[:, i] = arr[i:-length+1+i]
arr_2d[:, -1] = arr[length-1:]
return arr_2d
lst_int_base_100 = string.printable
# lst_int_base_100 = list("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_.,:;!?#$%&()[]{}/\\ \'\"")
base_100_len = len(lst_int_base_100)
assert base_100_len == 100
dict_base_100_int = {v: i for i, v in enumerate(lst_int_base_100, 0)}
def convert_base_100_to_int(num_base_100):
b = 1
s = 0
for i, v in enumerate(reversed(list(num_base_100)), 0):
n = dict_base_100_int[v]
s += n*b
b *= base_100_len
return s
def | (num_int):
l = []
while num_int > 0:
l.append(num_int % base_100_len)
num_int //= base_100_len
n = list(map(lambda x: lst_int_base_100[x], reversed(l)))
return "".join(n)
def convert_int_to_lst_bin(num_int):
return list(map(int, bin(num_int)[2:]))
def convert_lst_bin_to_int(l_bin):
arr = np.array(l_bin, dtype=object)
length = arr.shape[0]
return np.sum(arr*2**np.arange(length-1, -1, -1).astype(object))
secret_test = "test123%$&/?!-_,:.;"
assert secret_test==convert_int_to_base_100(convert_base_100_to_int(secret_test))
assert 12345678901234567890==convert_base_100_to_int(convert_int_to_base_100(12345678901234567890))
assert 1234567==convert_lst_bin_to_int(convert_int_to_lst_bin(1234567))
assert [1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]==convert_int_to_lst_bin(convert_lst_bin_to_int([1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]))
prefix_int : int = 0xabcd2533
suffix_int : int = 0x34bf4634
arr_prefix : np.ndarray = np.array(convert_int_to_lst_bin(prefix_int), dtype=np.uint8)
arr_suffix : np.ndarray = np.array(convert_int_to_lst_bin(suffix_int), dtype=np.uint8)
len_arr_prefix = arr_prefix.shape[0]
len_arr_suffix = arr_suffix.shape[0]
if __name__ == '__main__':
print('Hello World!')
path_images = os.path.join(PATH_ROOT_DIR, 'images/')
assert os.path.exists(path_images)
img_src_path : str = "images/orig_image_2_no_secret.png"
img_src_new_path : str = "images/orig_image_2_no_secret_new.png"
img_dst_path : str = "images/orig_image_2_with_secret.png"
MAX_WIDTH = 200
MAX_HEIGHT = 150
# MAX_WIDTH = 400
# MAX_HEIGHT = 300
# MAX_WIDTH = 800
# MAX_HEIGHT = 600
img_src_orig : Image = Image.open(img_src_path)
pix : np.ndarray = np.array(img_src_orig)
if not os.path.exists(img_src_new_path):
if len(pix.shape)==3:
# remove alpha channel, if alpha is contained! also save the file again.
if pix.shape[2]==4:
pix = pix[..., :3]
img2 : Image = Image.fromarray(pix)
width, height = img2.size
if width > MAX_WIDTH or height > MAX_HEIGHT:
if width > MAX_WIDTH:
width_new = MAX_WIDTH
height_new = int(width_new * height / width)
elif height > MAX_HEIGHT:
height_new = MAX_HEIGHT
width_new = int(height_new * width / height)
img2 = img2.resize(size=(width_new, height_new), resample=Image.LANCZOS)
img2.save(img_src_new_path)
img_src : Image = Image.open(img_src_new_path)
pix_orig : np.ndarray = np.array(img_src)
assert len(pix_orig.shape) == 3
assert pix_orig.shape[2] == 3
shape_img_src : Tuple[int, int, int] = pix_orig.shape
print("shape_img_src: {}".format(shape_img_src))
pix : np.ndarray = pix_orig.copy()
arr_1_bit : np.ndarray = (pix & 0x1).reshape((-1, ))
arr_1_bit_orig : np.ndarray = arr_1_bit.copy()
len_arr_1_bit : int = arr_1_bit.shape[0]
l_secret_str = [
'hello',
'this is',
'a little test! 123?',
"""def print_some_stuff():\n print(\"Test! 123=!=!=!= xD\")""",
'lolololululululusfsfsdlfjsdlfjsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi',
'lolololululululusfsfsdlfjsdlf',
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi',
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi'*3,
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi'*6,
]
# amount_secrets = 2
# secret_len = 400
# l_secret_str = [''.join(np.random.choice(lst_int_base_100, (secret_len, ))) for _ in range(0, amount_secrets)]
MIN_BITS_LENGTH = 16
JUMP_MIN = 10
JUMP_MAX = 16
def create_secret_bin_content(secret_str : str) -> List[int]:
secret_int : List[int] = convert_base_100_to_int(secret_str)
secret_bin : List[int] = convert_int_to_lst_bin(secret_int)
l_len_secret_bin = convert_int_to_lst_bin(len(secret_bin))
len_l_len_secret_bin = len(l_len_secret_bin)
assert len_l_len_secret_bin <= MIN_BITS_LENGTH
if len_l_len_secret_bin < MIN_BITS_LENGTH:
l_len_secret_bin = [0] * (MIN_BITS_LENGTH - len_l_len_secret_bin) + l_len_secret_bin
return arr_prefix.tolist() + secret_bin + l_len_secret_bin + arr_suffix.tolist()
l_arr_secret_bin_content = [np.array(create_secret_bin_content(secret_str), dtype=np.uint8) for secret_str in l_secret_str]
# TODO: make this into a multiprocessing function too!
def find_best_possible_parameters(n : int=100):
def inner_function():
arr_line_param_offset : np.ndarray = np.random.randint(0, len_arr_1_bit, (len(l_secret_str), ))
arr_line_param_jumps : np.ndarray = np.random.randint(JUMP_MIN, JUMP_MAX+1, (len(l_secret_str), ))
l_arr_pos : List[np.ndarray] = [(np.arange(0, len(l_bin_content)) * jumps + offset) % len_arr_1_bit for l_bin_content, offset, jumps in zip(l_arr_secret_bin_content, arr_line_param_offset, arr_line_param_jumps)]
# check, if any overlaps are there between the position of each secret!
i_1 : int
arr_secret_bin_content_1 : np.ndarray
arr_pos_1 : np.ndarray
for i_1, (arr_secret_bin_content_1, arr_pos_1) in enumerate(zip(l_arr_secret_bin_content[:-1], l_arr_pos[:-1]), 0):
i_2 : int
arr_secret_bin_content_2 : np.ndarray
arr_pos_2 : np.ndarray
for i_2, (arr_secret_bin_content_2, arr_pos_2) in enumerate(zip(l_arr_secret_bin_content[i_1+1:], l_arr_pos[i_1+1:]), i_1+1):
arr_idxs_bool_1 : np.ndarray = np.isin(arr_pos_1, arr_pos_2)
if np.any(arr_idxs_bool_1):
print("Some Equal postiions! i_1: {}, i_2: {}".format(i_1, i_2))
arr_idxs_bool_2 : np.ndarray = np.isin(arr_pos_2, arr_pos_1)
arr_bin_1_part : np.ndarray = arr_secret_bin_content_1[arr_idxs_bool_1]
arr_bin_2_part : np.ndarray = arr_secret_bin_content_2[arr_idxs_bool_2]
if np.any(arr_bin_1_part != arr_bin_2_part):
print("arr_bin_1_part: {}".format(arr_bin_1_part))
print("arr_bin_2_part: {}".format(arr_bin_2_part))
return None
return arr_line_param_offset, arr_line_param_jumps, l_arr_pos
arr_line_param_offset = None
arr_line_param_jumps = None
l_arr_pos = None
for nr_try in range(1, n + 1):
ret = inner_function()
if ret is None:
print(f'Failed to find good params at nr_try {nr_try}!')
continue
print(f'Found params at nr_try {nr_try}!')
arr_line_param_offset, arr_line_param_jumps, l_arr_pos = ret
break
return arr_line_param_offset, arr_line_param_jumps, l_arr_pos
# TODO: make this multiprocessing possible!
arr_line_param_offset, arr_line_param_jumps, l_arr_pos = find_best_possible_parameters(n=1000000)
if arr_line_param_offset is None:
sys.exit('Failed to find good params!')
print("arr_line_param_offset: {}".format(arr_line_param_offset))
print("arr_line_param_jumps: {}".format(arr_line_param_jumps))
l_params = [(jump, offset, arr_secret_bin_content.shape[0]) for jump, offset, arr_secret_bin_content in zip(arr_line_param_jumps, arr_line_param_offset, l_arr_secret_bin_content)]
# apply the bit changes to the pix array!
for arr_pos, arr_secret_bin_content in zip(l_arr_pos, l_arr_secret_bin_content):
arr_1_bit[arr_pos] = arr_secret_bin_content
pix_secret = (pix & 0xF8) | arr_1_bit.reshape(pix.shape)
pix_1_bit_orig = arr_1_bit_orig.reshape(shape_img_src) * 255
pix_1_bit = arr_1_bit.reshape(shape_img_src) * 255
Image.fromarray(pix_1_bit_orig).save('images/img_path_src_1bit_orig.png')
Image.fromarray(pix_1_bit).save('images/img_path_src_1bit_encoded_in.png')
img_secret : Image = Image.fromarray(pix_secret)
img_secret.save(img_dst_path)
img_src = Image.open(img_src_new_path)
img_dst = Image.open(img_dst_path)
pix_src = np.array(img_src)
pix_dst = np.array(img_dst)
pix_src_1bit = (pix_src & 0x1) * 255
pix_dst_1bit = (pix_dst & 0x1) * 255
pix_src_dst_1bit = pix_src_1bit ^ pix_dst_1bit
img_path_src_1bit = 'images/img_path_src_1bit.png'
img_path_dst_1bit = 'images/img_path_dst_1bit.png'
img_path_src_dst_1bit = 'images/img_path_src_dst_1bit.png'
Image.fromarray(pix_src_1bit).save(img_path_src_1bit)
Image.fromarray(pix_dst_1bit).save(img_path_dst_1bit)
Image.fromarray(pix_src_dst_1bit).save(img_path_src_dst_1bit)
# try to find some matches!
img_dst : Image = Image.open(img_dst_path)
pix_dst : np.ndarray = np.array(img_dst)
assert len(pix_dst.shape) == 3
assert pix_dst.shape[2] == 3
arr_dst_1_bit : np.ndarray = (pix_dst & 0x1).reshape((-1, ))
def func_find_possible_params(
arr_dst_1_bit : np.ndarray,
arr_prefix : np.ndarray,
arr_suffix : np.ndarray,
l_jump : List[int],
) -> List[Tuple[int, int, int]]:
len_arr_dst_1_bit : int = arr_dst_1_bit.shape[0]
l_possible_params : List[Tuple[int, int, int]] = []
len_arr_prefix : int = len(arr_prefix)
len_arr_suffix : int = len(arr_suffix)
xs_prefix_basic : np.ndarray = np.arange(0, len_arr_prefix)
xs_suffix_basic : np.ndarray = np.arange(0, len_arr_suffix)
for jump in l_jump:
# for jump in range(JUMP_MIN, JUMP_MAX + 1):
print("jump: {}".format(jump))
xs_jump = (xs_prefix_basic * jump) % len_arr_dst_1_bit
for offset_prefix in range(0, len_arr_dst_1_bit):
xs_prefix = (xs_jump + offset_prefix) % len_arr_dst_1_bit
# first: find the left part (prefix)
if np.all(np.equal(arr_dst_1_bit[xs_prefix], arr_prefix)):
print("offset_prefix: {}".format(offset_prefix))
for offset_jump in range(MIN_BITS_LENGTH + len_arr_prefix, len_arr_dst_1_bit - len_arr_suffix):
offset_suffix = offset_jump * jump
# print("offset_suffix: {}".format(offset_suffix))
xs_suffix = (xs_suffix_basic * jump + offset_prefix + offset_suffix) % len_arr_dst_1_bit
# send: find the right part (suffix)
if np.all(np.equal(arr_dst_1_bit[xs_suffix], arr_suffix)):
arr_pos : np.ndarray = (np.arange(len_arr_prefix, offset_jump) * jump + offset_prefix) % len_arr_dst_1_bit
arr_part : np.ndarray = arr_dst_1_bit[arr_pos]
arr_secret_bin : np.ndarray = arr_part[:-MIN_BITS_LENGTH]
arr_secret_bin_len : np.ndarray = arr_part[-MIN_BITS_LENGTH:]
# third: check, if the content length is the same as the given binary number length!
if arr_secret_bin.shape[0] == convert_lst_bin_to_int(arr_secret_bin_len):
t_params = (jump, offset_prefix, offset_jump + len_arr_suffix)
print("t_params: {}".format(t_params))
l_possible_params.append(t_params)
return l_possible_params
mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count())
mult_proc_mng.test_worker_threads_response()
l_jump_all = list(range(JUMP_MIN, JUMP_MAX+1))
amount_parts = 7
len_l_jump_all = len(l_jump_all)
factor = len_l_jump_all / amount_parts
l_jump_range_one = [0] + [int(factor * j) for j in range(1, amount_parts)] + [len_l_jump_all]
l_l_jump = [l_jump_all[i1:i2] for i1, i2 in zip(l_jump_range_one[:-1], l_jump_range_one[1:])]
print("l_l_jump: {}".format(l_l_jump))
# del mult_proc_mng
# sys.exit()
print('Define new Function!')
mult_proc_mng.define_new_func('find_possible_params', func_find_possible_params)
print('Do the jobs!!')
l_ret : List[List[Tuple[int, int, int]]] = mult_proc_mng.do_new_jobs(
['find_possible_params']*len(l_l_jump),
[
(arr_dst_1_bit, arr_prefix, arr_suffix, l_jump)
for l_jump
in l_l_jump
]
)
print("len(l_ret): {}".format(len(l_ret)))
# print("l_ret: {}".format(l_ret))
mult_proc_mng.test_worker_threads_response()
del mult_proc_mng
l_possible_params : List[Tuple[int, int, int]] = [t for l_possible_params_part in l_ret for t in l_possible_params_part]
l_params = sorted(l_params)
l_possible_params = sorted(l_possible_params)
# only for checking, if the output is correct or not!
assert l_params == l_possible_params
print("l_possible_params:\n{}".format(l_possible_params))
print("l_params:\n{}".format(l_params))
print("len(l_possible_params): {}".format(len(l_possible_params)))
print("len(l_params): {}".format(len(l_params)))
| convert_int_to_base_100 | identifier_name |
save_secret_in_image_better.py | #! /usr/bin/env -S /usr/bin/time /usr/bin/python3.8.6 -i
# -*- coding: utf-8 -*-
import dill
import gzip
import os
import sys
import string
import shutil
from typing import List, Dict, Set, Mapping, Any, Tuple
# import tempfile
from memory_tempfile import MemoryTempfile
tempfile : MemoryTempfile = MemoryTempfile()
from collections import defaultdict
from copy import deepcopy
from dotmap import DotMap
from operator import itemgetter
from pprint import pprint
from os.path import expanduser
import itertools
import multiprocessing as mp
PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/")+"/"
HOME_DIR = os.path.expanduser("~")
TEMP_DIR = tempfile.gettempdir()+"/"
CURRENT_DIR = os.getcwdb().decode('utf-8')
from PIL import Image
import numpy as np
import pandas as pd
sys.path.append('..')
import utils
from utils_multiprocessing_manager import MultiprocessingManager
def convert_1d_to_2d_arr(arr, length):
arr_2d = np.zeros((arr.shape[0]-length+1, length), dtype=np.uint8)
for i in range(0, length-1):
arr_2d[:, i] = arr[i:-length+1+i]
arr_2d[:, -1] = arr[length-1:]
return arr_2d
lst_int_base_100 = string.printable
# lst_int_base_100 = list("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_.,:;!?#$%&()[]{}/\\ \'\"")
base_100_len = len(lst_int_base_100)
assert base_100_len == 100
dict_base_100_int = {v: i for i, v in enumerate(lst_int_base_100, 0)}
def convert_base_100_to_int(num_base_100):
b = 1
s = 0
for i, v in enumerate(reversed(list(num_base_100)), 0):
n = dict_base_100_int[v]
s += n*b
b *= base_100_len
return s
def convert_int_to_base_100(num_int):
l = []
while num_int > 0:
l.append(num_int % base_100_len)
num_int //= base_100_len
n = list(map(lambda x: lst_int_base_100[x], reversed(l)))
return "".join(n)
def convert_int_to_lst_bin(num_int):
return list(map(int, bin(num_int)[2:]))
def convert_lst_bin_to_int(l_bin):
arr = np.array(l_bin, dtype=object)
length = arr.shape[0]
return np.sum(arr*2**np.arange(length-1, -1, -1).astype(object))
secret_test = "test123%$&/?!-_,:.;"
assert secret_test==convert_int_to_base_100(convert_base_100_to_int(secret_test))
assert 12345678901234567890==convert_base_100_to_int(convert_int_to_base_100(12345678901234567890))
assert 1234567==convert_lst_bin_to_int(convert_int_to_lst_bin(1234567))
assert [1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]==convert_int_to_lst_bin(convert_lst_bin_to_int([1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]))
prefix_int : int = 0xabcd2533
suffix_int : int = 0x34bf4634
arr_prefix : np.ndarray = np.array(convert_int_to_lst_bin(prefix_int), dtype=np.uint8)
arr_suffix : np.ndarray = np.array(convert_int_to_lst_bin(suffix_int), dtype=np.uint8)
len_arr_prefix = arr_prefix.shape[0]
len_arr_suffix = arr_suffix.shape[0]
if __name__ == '__main__':
print('Hello World!')
path_images = os.path.join(PATH_ROOT_DIR, 'images/')
assert os.path.exists(path_images)
img_src_path : str = "images/orig_image_2_no_secret.png"
img_src_new_path : str = "images/orig_image_2_no_secret_new.png"
img_dst_path : str = "images/orig_image_2_with_secret.png"
MAX_WIDTH = 200
MAX_HEIGHT = 150
# MAX_WIDTH = 400
# MAX_HEIGHT = 300
# MAX_WIDTH = 800
# MAX_HEIGHT = 600
img_src_orig : Image = Image.open(img_src_path)
pix : np.ndarray = np.array(img_src_orig)
if not os.path.exists(img_src_new_path):
if len(pix.shape)==3:
# remove alpha channel, if alpha is contained! also save the file again.
if pix.shape[2]==4:
pix = pix[..., :3]
img2 : Image = Image.fromarray(pix)
width, height = img2.size
if width > MAX_WIDTH or height > MAX_HEIGHT:
if width > MAX_WIDTH:
width_new = MAX_WIDTH
height_new = int(width_new * height / width)
elif height > MAX_HEIGHT:
height_new = MAX_HEIGHT
width_new = int(height_new * width / height)
img2 = img2.resize(size=(width_new, height_new), resample=Image.LANCZOS)
img2.save(img_src_new_path)
img_src : Image = Image.open(img_src_new_path)
pix_orig : np.ndarray = np.array(img_src)
assert len(pix_orig.shape) == 3
assert pix_orig.shape[2] == 3
shape_img_src : Tuple[int, int, int] = pix_orig.shape
print("shape_img_src: {}".format(shape_img_src))
pix : np.ndarray = pix_orig.copy()
arr_1_bit : np.ndarray = (pix & 0x1).reshape((-1, ))
arr_1_bit_orig : np.ndarray = arr_1_bit.copy()
len_arr_1_bit : int = arr_1_bit.shape[0]
l_secret_str = [
'hello',
'this is',
'a little test! 123?',
"""def print_some_stuff():\n print(\"Test! 123=!=!=!= xD\")""",
'lolololululululusfsfsdlfjsdlfjsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi',
'lolololululululusfsfsdlfjsdlf',
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi',
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi'*3,
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi'*6,
]
# amount_secrets = 2
# secret_len = 400
# l_secret_str = [''.join(np.random.choice(lst_int_base_100, (secret_len, ))) for _ in range(0, amount_secrets)]
MIN_BITS_LENGTH = 16
JUMP_MIN = 10
JUMP_MAX = 16
def create_secret_bin_content(secret_str : str) -> List[int]:
secret_int : List[int] = convert_base_100_to_int(secret_str)
secret_bin : List[int] = convert_int_to_lst_bin(secret_int)
l_len_secret_bin = convert_int_to_lst_bin(len(secret_bin))
len_l_len_secret_bin = len(l_len_secret_bin)
assert len_l_len_secret_bin <= MIN_BITS_LENGTH
if len_l_len_secret_bin < MIN_BITS_LENGTH:
l_len_secret_bin = [0] * (MIN_BITS_LENGTH - len_l_len_secret_bin) + l_len_secret_bin
return arr_prefix.tolist() + secret_bin + l_len_secret_bin + arr_suffix.tolist()
l_arr_secret_bin_content = [np.array(create_secret_bin_content(secret_str), dtype=np.uint8) for secret_str in l_secret_str]
# TODO: make this into a multiprocessing function too!
def find_best_possible_parameters(n : int=100):
def inner_function():
arr_line_param_offset : np.ndarray = np.random.randint(0, len_arr_1_bit, (len(l_secret_str), ))
arr_line_param_jumps : np.ndarray = np.random.randint(JUMP_MIN, JUMP_MAX+1, (len(l_secret_str), ))
l_arr_pos : List[np.ndarray] = [(np.arange(0, len(l_bin_content)) * jumps + offset) % len_arr_1_bit for l_bin_content, offset, jumps in zip(l_arr_secret_bin_content, arr_line_param_offset, arr_line_param_jumps)]
# check, if any overlaps are there between the position of each secret!
i_1 : int
arr_secret_bin_content_1 : np.ndarray
arr_pos_1 : np.ndarray
for i_1, (arr_secret_bin_content_1, arr_pos_1) in enumerate(zip(l_arr_secret_bin_content[:-1], l_arr_pos[:-1]), 0):
i_2 : int
arr_secret_bin_content_2 : np.ndarray
arr_pos_2 : np.ndarray
for i_2, (arr_secret_bin_content_2, arr_pos_2) in enumerate(zip(l_arr_secret_bin_content[i_1+1:], l_arr_pos[i_1+1:]), i_1+1):
arr_idxs_bool_1 : np.ndarray = np.isin(arr_pos_1, arr_pos_2)
if np.any(arr_idxs_bool_1):
print("Some Equal postiions! i_1: {}, i_2: {}".format(i_1, i_2))
arr_idxs_bool_2 : np.ndarray = np.isin(arr_pos_2, arr_pos_1)
arr_bin_1_part : np.ndarray = arr_secret_bin_content_1[arr_idxs_bool_1]
arr_bin_2_part : np.ndarray = arr_secret_bin_content_2[arr_idxs_bool_2]
if np.any(arr_bin_1_part != arr_bin_2_part):
print("arr_bin_1_part: {}".format(arr_bin_1_part))
print("arr_bin_2_part: {}".format(arr_bin_2_part))
return None
return arr_line_param_offset, arr_line_param_jumps, l_arr_pos
arr_line_param_offset = None
arr_line_param_jumps = None
l_arr_pos = None
for nr_try in range(1, n + 1):
ret = inner_function()
if ret is None:
print(f'Failed to find good params at nr_try {nr_try}!')
continue
print(f'Found params at nr_try {nr_try}!')
arr_line_param_offset, arr_line_param_jumps, l_arr_pos = ret
break
return arr_line_param_offset, arr_line_param_jumps, l_arr_pos
# TODO: make this multiprocessing possible!
arr_line_param_offset, arr_line_param_jumps, l_arr_pos = find_best_possible_parameters(n=1000000)
if arr_line_param_offset is None:
sys.exit('Failed to find good params!')
print("arr_line_param_offset: {}".format(arr_line_param_offset))
print("arr_line_param_jumps: {}".format(arr_line_param_jumps))
l_params = [(jump, offset, arr_secret_bin_content.shape[0]) for jump, offset, arr_secret_bin_content in zip(arr_line_param_jumps, arr_line_param_offset, l_arr_secret_bin_content)]
# apply the bit changes to the pix array!
for arr_pos, arr_secret_bin_content in zip(l_arr_pos, l_arr_secret_bin_content):
arr_1_bit[arr_pos] = arr_secret_bin_content
pix_secret = (pix & 0xF8) | arr_1_bit.reshape(pix.shape)
pix_1_bit_orig = arr_1_bit_orig.reshape(shape_img_src) * 255
pix_1_bit = arr_1_bit.reshape(shape_img_src) * 255
Image.fromarray(pix_1_bit_orig).save('images/img_path_src_1bit_orig.png')
Image.fromarray(pix_1_bit).save('images/img_path_src_1bit_encoded_in.png')
img_secret : Image = Image.fromarray(pix_secret)
img_secret.save(img_dst_path)
img_src = Image.open(img_src_new_path)
img_dst = Image.open(img_dst_path)
pix_src = np.array(img_src)
pix_dst = np.array(img_dst)
pix_src_1bit = (pix_src & 0x1) * 255
pix_dst_1bit = (pix_dst & 0x1) * 255
pix_src_dst_1bit = pix_src_1bit ^ pix_dst_1bit
img_path_src_1bit = 'images/img_path_src_1bit.png'
img_path_dst_1bit = 'images/img_path_dst_1bit.png'
img_path_src_dst_1bit = 'images/img_path_src_dst_1bit.png'
Image.fromarray(pix_src_1bit).save(img_path_src_1bit)
Image.fromarray(pix_dst_1bit).save(img_path_dst_1bit)
Image.fromarray(pix_src_dst_1bit).save(img_path_src_dst_1bit)
# try to find some matches!
img_dst : Image = Image.open(img_dst_path)
pix_dst : np.ndarray = np.array(img_dst)
assert len(pix_dst.shape) == 3
assert pix_dst.shape[2] == 3
arr_dst_1_bit : np.ndarray = (pix_dst & 0x1).reshape((-1, ))
def func_find_possible_params(
arr_dst_1_bit : np.ndarray,
arr_prefix : np.ndarray,
arr_suffix : np.ndarray,
l_jump : List[int],
) -> List[Tuple[int, int, int]]:
len_arr_dst_1_bit : int = arr_dst_1_bit.shape[0]
l_possible_params : List[Tuple[int, int, int]] = []
len_arr_prefix : int = len(arr_prefix)
len_arr_suffix : int = len(arr_suffix)
xs_prefix_basic : np.ndarray = np.arange(0, len_arr_prefix)
xs_suffix_basic : np.ndarray = np.arange(0, len_arr_suffix)
for jump in l_jump:
# for jump in range(JUMP_MIN, JUMP_MAX + 1):
print("jump: {}".format(jump))
xs_jump = (xs_prefix_basic * jump) % len_arr_dst_1_bit
for offset_prefix in range(0, len_arr_dst_1_bit):
xs_prefix = (xs_jump + offset_prefix) % len_arr_dst_1_bit
# first: find the left part (prefix)
if np.all(np.equal(arr_dst_1_bit[xs_prefix], arr_prefix)):
print("offset_prefix: {}".format(offset_prefix))
for offset_jump in range(MIN_BITS_LENGTH + len_arr_prefix, len_arr_dst_1_bit - len_arr_suffix):
offset_suffix = offset_jump * jump
# print("offset_suffix: {}".format(offset_suffix))
xs_suffix = (xs_suffix_basic * jump + offset_prefix + offset_suffix) % len_arr_dst_1_bit
# send: find the right part (suffix)
if np.all(np.equal(arr_dst_1_bit[xs_suffix], arr_suffix)):
arr_pos : np.ndarray = (np.arange(len_arr_prefix, offset_jump) * jump + offset_prefix) % len_arr_dst_1_bit
arr_part : np.ndarray = arr_dst_1_bit[arr_pos]
arr_secret_bin : np.ndarray = arr_part[:-MIN_BITS_LENGTH]
arr_secret_bin_len : np.ndarray = arr_part[-MIN_BITS_LENGTH:]
# third: check, if the content length is the same as the given binary number length!
if arr_secret_bin.shape[0] == convert_lst_bin_to_int(arr_secret_bin_len):
|
return l_possible_params
mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count())
mult_proc_mng.test_worker_threads_response()
l_jump_all = list(range(JUMP_MIN, JUMP_MAX+1))
amount_parts = 7
len_l_jump_all = len(l_jump_all)
factor = len_l_jump_all / amount_parts
l_jump_range_one = [0] + [int(factor * j) for j in range(1, amount_parts)] + [len_l_jump_all]
l_l_jump = [l_jump_all[i1:i2] for i1, i2 in zip(l_jump_range_one[:-1], l_jump_range_one[1:])]
print("l_l_jump: {}".format(l_l_jump))
# del mult_proc_mng
# sys.exit()
print('Define new Function!')
mult_proc_mng.define_new_func('find_possible_params', func_find_possible_params)
print('Do the jobs!!')
l_ret : List[List[Tuple[int, int, int]]] = mult_proc_mng.do_new_jobs(
['find_possible_params']*len(l_l_jump),
[
(arr_dst_1_bit, arr_prefix, arr_suffix, l_jump)
for l_jump
in l_l_jump
]
)
print("len(l_ret): {}".format(len(l_ret)))
# print("l_ret: {}".format(l_ret))
mult_proc_mng.test_worker_threads_response()
del mult_proc_mng
l_possible_params : List[Tuple[int, int, int]] = [t for l_possible_params_part in l_ret for t in l_possible_params_part]
l_params = sorted(l_params)
l_possible_params = sorted(l_possible_params)
# only for checking, if the output is correct or not!
assert l_params == l_possible_params
print("l_possible_params:\n{}".format(l_possible_params))
print("l_params:\n{}".format(l_params))
print("len(l_possible_params): {}".format(len(l_possible_params)))
print("len(l_params): {}".format(len(l_params)))
| t_params = (jump, offset_prefix, offset_jump + len_arr_suffix)
print("t_params: {}".format(t_params))
l_possible_params.append(t_params) | conditional_block |
save_secret_in_image_better.py | #! /usr/bin/env -S /usr/bin/time /usr/bin/python3.8.6 -i
# -*- coding: utf-8 -*-
import dill
import gzip
import os
import sys
import string
import shutil
from typing import List, Dict, Set, Mapping, Any, Tuple
# import tempfile
from memory_tempfile import MemoryTempfile
tempfile : MemoryTempfile = MemoryTempfile()
from collections import defaultdict
from copy import deepcopy
from dotmap import DotMap
from operator import itemgetter
from pprint import pprint
from os.path import expanduser
import itertools
import multiprocessing as mp
PATH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/")+"/"
HOME_DIR = os.path.expanduser("~")
TEMP_DIR = tempfile.gettempdir()+"/"
CURRENT_DIR = os.getcwdb().decode('utf-8')
from PIL import Image
import numpy as np
import pandas as pd
sys.path.append('..')
import utils
from utils_multiprocessing_manager import MultiprocessingManager
def convert_1d_to_2d_arr(arr, length):
arr_2d = np.zeros((arr.shape[0]-length+1, length), dtype=np.uint8)
for i in range(0, length-1):
arr_2d[:, i] = arr[i:-length+1+i]
arr_2d[:, -1] = arr[length-1:]
return arr_2d
lst_int_base_100 = string.printable
# lst_int_base_100 = list("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_.,:;!?#$%&()[]{}/\\ \'\"")
base_100_len = len(lst_int_base_100)
assert base_100_len == 100
dict_base_100_int = {v: i for i, v in enumerate(lst_int_base_100, 0)}
def convert_base_100_to_int(num_base_100):
b = 1
s = 0
for i, v in enumerate(reversed(list(num_base_100)), 0):
n = dict_base_100_int[v]
s += n*b
b *= base_100_len
return s
def convert_int_to_base_100(num_int):
l = []
while num_int > 0:
l.append(num_int % base_100_len)
num_int //= base_100_len
n = list(map(lambda x: lst_int_base_100[x], reversed(l)))
return "".join(n)
def convert_int_to_lst_bin(num_int):
return list(map(int, bin(num_int)[2:]))
def convert_lst_bin_to_int(l_bin):
arr = np.array(l_bin, dtype=object)
length = arr.shape[0]
return np.sum(arr*2**np.arange(length-1, -1, -1).astype(object))
secret_test = "test123%$&/?!-_,:.;"
assert secret_test==convert_int_to_base_100(convert_base_100_to_int(secret_test))
assert 12345678901234567890==convert_base_100_to_int(convert_int_to_base_100(12345678901234567890))
assert 1234567==convert_lst_bin_to_int(convert_int_to_lst_bin(1234567))
assert [1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]==convert_int_to_lst_bin(convert_lst_bin_to_int([1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1]))
prefix_int : int = 0xabcd2533
suffix_int : int = 0x34bf4634
arr_prefix : np.ndarray = np.array(convert_int_to_lst_bin(prefix_int), dtype=np.uint8)
arr_suffix : np.ndarray = np.array(convert_int_to_lst_bin(suffix_int), dtype=np.uint8)
len_arr_prefix = arr_prefix.shape[0]
len_arr_suffix = arr_suffix.shape[0]
if __name__ == '__main__':
print('Hello World!')
path_images = os.path.join(PATH_ROOT_DIR, 'images/')
assert os.path.exists(path_images)
img_src_path : str = "images/orig_image_2_no_secret.png"
img_src_new_path : str = "images/orig_image_2_no_secret_new.png"
img_dst_path : str = "images/orig_image_2_with_secret.png"
MAX_WIDTH = 200
MAX_HEIGHT = 150
# MAX_WIDTH = 400
# MAX_HEIGHT = 300
# MAX_WIDTH = 800
# MAX_HEIGHT = 600
img_src_orig : Image = Image.open(img_src_path)
pix : np.ndarray = np.array(img_src_orig)
if not os.path.exists(img_src_new_path):
if len(pix.shape)==3:
# remove alpha channel, if alpha is contained! also save the file again.
if pix.shape[2]==4:
pix = pix[..., :3]
img2 : Image = Image.fromarray(pix)
width, height = img2.size
if width > MAX_WIDTH or height > MAX_HEIGHT:
if width > MAX_WIDTH:
width_new = MAX_WIDTH
height_new = int(width_new * height / width)
elif height > MAX_HEIGHT:
height_new = MAX_HEIGHT
width_new = int(height_new * width / height)
img2 = img2.resize(size=(width_new, height_new), resample=Image.LANCZOS)
img2.save(img_src_new_path)
img_src : Image = Image.open(img_src_new_path)
pix_orig : np.ndarray = np.array(img_src)
assert len(pix_orig.shape) == 3
assert pix_orig.shape[2] == 3
shape_img_src : Tuple[int, int, int] = pix_orig.shape
print("shape_img_src: {}".format(shape_img_src))
pix : np.ndarray = pix_orig.copy()
arr_1_bit : np.ndarray = (pix & 0x1).reshape((-1, ))
arr_1_bit_orig : np.ndarray = arr_1_bit.copy()
len_arr_1_bit : int = arr_1_bit.shape[0]
l_secret_str = [
'hello',
'this is',
'a little test! 123?',
"""def print_some_stuff():\n print(\"Test! 123=!=!=!= xD\")""",
'lolololululululusfsfsdlfjsdlfjsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi',
'lolololululululusfsfsdlfjsdlf',
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi',
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi'*3,
'lolololulululujsdlfjsfjsfjsklfjksjfsjfsfjsdlfjafwefawoi'*6,
]
# amount_secrets = 2
# secret_len = 400
# l_secret_str = [''.join(np.random.choice(lst_int_base_100, (secret_len, ))) for _ in range(0, amount_secrets)]
MIN_BITS_LENGTH = 16
JUMP_MIN = 10
JUMP_MAX = 16
def create_secret_bin_content(secret_str : str) -> List[int]:
secret_int : List[int] = convert_base_100_to_int(secret_str)
secret_bin : List[int] = convert_int_to_lst_bin(secret_int)
l_len_secret_bin = convert_int_to_lst_bin(len(secret_bin))
len_l_len_secret_bin = len(l_len_secret_bin)
assert len_l_len_secret_bin <= MIN_BITS_LENGTH
if len_l_len_secret_bin < MIN_BITS_LENGTH:
l_len_secret_bin = [0] * (MIN_BITS_LENGTH - len_l_len_secret_bin) + l_len_secret_bin
return arr_prefix.tolist() + secret_bin + l_len_secret_bin + arr_suffix.tolist()
l_arr_secret_bin_content = [np.array(create_secret_bin_content(secret_str), dtype=np.uint8) for secret_str in l_secret_str]
# TODO: make this into a multiprocessing function too!
def find_best_possible_parameters(n : int=100):
def inner_function():
arr_line_param_offset : np.ndarray = np.random.randint(0, len_arr_1_bit, (len(l_secret_str), ))
arr_line_param_jumps : np.ndarray = np.random.randint(JUMP_MIN, JUMP_MAX+1, (len(l_secret_str), ))
l_arr_pos : List[np.ndarray] = [(np.arange(0, len(l_bin_content)) * jumps + offset) % len_arr_1_bit for l_bin_content, offset, jumps in zip(l_arr_secret_bin_content, arr_line_param_offset, arr_line_param_jumps)]
# check, if any overlaps are there between the position of each secret!
i_1 : int
arr_secret_bin_content_1 : np.ndarray
arr_pos_1 : np.ndarray
for i_1, (arr_secret_bin_content_1, arr_pos_1) in enumerate(zip(l_arr_secret_bin_content[:-1], l_arr_pos[:-1]), 0):
i_2 : int
arr_secret_bin_content_2 : np.ndarray
arr_pos_2 : np.ndarray
for i_2, (arr_secret_bin_content_2, arr_pos_2) in enumerate(zip(l_arr_secret_bin_content[i_1+1:], l_arr_pos[i_1+1:]), i_1+1):
arr_idxs_bool_1 : np.ndarray = np.isin(arr_pos_1, arr_pos_2)
if np.any(arr_idxs_bool_1):
print("Some Equal postiions! i_1: {}, i_2: {}".format(i_1, i_2))
arr_idxs_bool_2 : np.ndarray = np.isin(arr_pos_2, arr_pos_1)
arr_bin_1_part : np.ndarray = arr_secret_bin_content_1[arr_idxs_bool_1]
arr_bin_2_part : np.ndarray = arr_secret_bin_content_2[arr_idxs_bool_2]
if np.any(arr_bin_1_part != arr_bin_2_part):
print("arr_bin_1_part: {}".format(arr_bin_1_part))
print("arr_bin_2_part: {}".format(arr_bin_2_part))
return None
return arr_line_param_offset, arr_line_param_jumps, l_arr_pos
arr_line_param_offset = None
arr_line_param_jumps = None
l_arr_pos = None
for nr_try in range(1, n + 1):
ret = inner_function()
if ret is None:
print(f'Failed to find good params at nr_try {nr_try}!')
continue
print(f'Found params at nr_try {nr_try}!')
arr_line_param_offset, arr_line_param_jumps, l_arr_pos = ret
break
return arr_line_param_offset, arr_line_param_jumps, l_arr_pos
# TODO: make this multiprocessing possible!
arr_line_param_offset, arr_line_param_jumps, l_arr_pos = find_best_possible_parameters(n=1000000)
if arr_line_param_offset is None:
sys.exit('Failed to find good params!')
print("arr_line_param_offset: {}".format(arr_line_param_offset))
print("arr_line_param_jumps: {}".format(arr_line_param_jumps))
l_params = [(jump, offset, arr_secret_bin_content.shape[0]) for jump, offset, arr_secret_bin_content in zip(arr_line_param_jumps, arr_line_param_offset, l_arr_secret_bin_content)]
# apply the bit changes to the pix array!
for arr_pos, arr_secret_bin_content in zip(l_arr_pos, l_arr_secret_bin_content):
arr_1_bit[arr_pos] = arr_secret_bin_content
pix_secret = (pix & 0xF8) | arr_1_bit.reshape(pix.shape)
pix_1_bit_orig = arr_1_bit_orig.reshape(shape_img_src) * 255
pix_1_bit = arr_1_bit.reshape(shape_img_src) * 255
Image.fromarray(pix_1_bit_orig).save('images/img_path_src_1bit_orig.png') |
img_src = Image.open(img_src_new_path)
img_dst = Image.open(img_dst_path)
pix_src = np.array(img_src)
pix_dst = np.array(img_dst)
pix_src_1bit = (pix_src & 0x1) * 255
pix_dst_1bit = (pix_dst & 0x1) * 255
pix_src_dst_1bit = pix_src_1bit ^ pix_dst_1bit
img_path_src_1bit = 'images/img_path_src_1bit.png'
img_path_dst_1bit = 'images/img_path_dst_1bit.png'
img_path_src_dst_1bit = 'images/img_path_src_dst_1bit.png'
Image.fromarray(pix_src_1bit).save(img_path_src_1bit)
Image.fromarray(pix_dst_1bit).save(img_path_dst_1bit)
Image.fromarray(pix_src_dst_1bit).save(img_path_src_dst_1bit)
# try to find some matches!
img_dst : Image = Image.open(img_dst_path)
pix_dst : np.ndarray = np.array(img_dst)
assert len(pix_dst.shape) == 3
assert pix_dst.shape[2] == 3
arr_dst_1_bit : np.ndarray = (pix_dst & 0x1).reshape((-1, ))
def func_find_possible_params(
arr_dst_1_bit : np.ndarray,
arr_prefix : np.ndarray,
arr_suffix : np.ndarray,
l_jump : List[int],
) -> List[Tuple[int, int, int]]:
len_arr_dst_1_bit : int = arr_dst_1_bit.shape[0]
l_possible_params : List[Tuple[int, int, int]] = []
len_arr_prefix : int = len(arr_prefix)
len_arr_suffix : int = len(arr_suffix)
xs_prefix_basic : np.ndarray = np.arange(0, len_arr_prefix)
xs_suffix_basic : np.ndarray = np.arange(0, len_arr_suffix)
for jump in l_jump:
# for jump in range(JUMP_MIN, JUMP_MAX + 1):
print("jump: {}".format(jump))
xs_jump = (xs_prefix_basic * jump) % len_arr_dst_1_bit
for offset_prefix in range(0, len_arr_dst_1_bit):
xs_prefix = (xs_jump + offset_prefix) % len_arr_dst_1_bit
# first: find the left part (prefix)
if np.all(np.equal(arr_dst_1_bit[xs_prefix], arr_prefix)):
print("offset_prefix: {}".format(offset_prefix))
for offset_jump in range(MIN_BITS_LENGTH + len_arr_prefix, len_arr_dst_1_bit - len_arr_suffix):
offset_suffix = offset_jump * jump
# print("offset_suffix: {}".format(offset_suffix))
xs_suffix = (xs_suffix_basic * jump + offset_prefix + offset_suffix) % len_arr_dst_1_bit
# send: find the right part (suffix)
if np.all(np.equal(arr_dst_1_bit[xs_suffix], arr_suffix)):
arr_pos : np.ndarray = (np.arange(len_arr_prefix, offset_jump) * jump + offset_prefix) % len_arr_dst_1_bit
arr_part : np.ndarray = arr_dst_1_bit[arr_pos]
arr_secret_bin : np.ndarray = arr_part[:-MIN_BITS_LENGTH]
arr_secret_bin_len : np.ndarray = arr_part[-MIN_BITS_LENGTH:]
# third: check, if the content length is the same as the given binary number length!
if arr_secret_bin.shape[0] == convert_lst_bin_to_int(arr_secret_bin_len):
t_params = (jump, offset_prefix, offset_jump + len_arr_suffix)
print("t_params: {}".format(t_params))
l_possible_params.append(t_params)
return l_possible_params
mult_proc_mng = MultiprocessingManager(cpu_count=mp.cpu_count())
mult_proc_mng.test_worker_threads_response()
l_jump_all = list(range(JUMP_MIN, JUMP_MAX+1))
amount_parts = 7
len_l_jump_all = len(l_jump_all)
factor = len_l_jump_all / amount_parts
l_jump_range_one = [0] + [int(factor * j) for j in range(1, amount_parts)] + [len_l_jump_all]
l_l_jump = [l_jump_all[i1:i2] for i1, i2 in zip(l_jump_range_one[:-1], l_jump_range_one[1:])]
print("l_l_jump: {}".format(l_l_jump))
# del mult_proc_mng
# sys.exit()
print('Define new Function!')
mult_proc_mng.define_new_func('find_possible_params', func_find_possible_params)
print('Do the jobs!!')
l_ret : List[List[Tuple[int, int, int]]] = mult_proc_mng.do_new_jobs(
['find_possible_params']*len(l_l_jump),
[
(arr_dst_1_bit, arr_prefix, arr_suffix, l_jump)
for l_jump
in l_l_jump
]
)
print("len(l_ret): {}".format(len(l_ret)))
# print("l_ret: {}".format(l_ret))
mult_proc_mng.test_worker_threads_response()
del mult_proc_mng
l_possible_params : List[Tuple[int, int, int]] = [t for l_possible_params_part in l_ret for t in l_possible_params_part]
l_params = sorted(l_params)
l_possible_params = sorted(l_possible_params)
# only for checking, if the output is correct or not!
assert l_params == l_possible_params
print("l_possible_params:\n{}".format(l_possible_params))
print("l_params:\n{}".format(l_params))
print("len(l_possible_params): {}".format(len(l_possible_params)))
print("len(l_params): {}".format(len(l_params))) | Image.fromarray(pix_1_bit).save('images/img_path_src_1bit_encoded_in.png')
img_secret : Image = Image.fromarray(pix_secret)
img_secret.save(img_dst_path) | random_line_split |
start.py | import os
import sys
import re
from pathlib import Path
from functools import wraps
import signal
import shutil
import socket
import urllib.request, urllib.error
import ssl
import json
# append proper (snap) site-packages path
sys.path.append("/snap/nextbox/current/lib/python3.6/site-packages")
from queue import Queue
from flask import Flask, render_template, request, flash, redirect, Response, \
url_for, send_file, Blueprint, render_template, jsonify, make_response
from nextbox_daemon.utils import get_partitions, error, success, \
tail, parse_backup_line, local_ip, cleanup_certs
from nextbox_daemon.command_runner import CommandRunner
from nextbox_daemon.consts import *
from nextbox_daemon.config import Config, log
from nextbox_daemon.worker import Worker
from nextbox_daemon.jobs import JobManager, TrustedDomainsJob, ProxySSHJob, UpdateJob
# config load
cfg = Config(CONFIG_PATH)
app = Flask(__name__)
app.secret_key = "123456-nextbox-123456" #cfg["secret_key"]
# backup thread handler
backup_proc = None
#@app.before_request
#def limit_remote_addr():
# if request.remote_addr != '10.20.30.40':
# abort(403) # Forbidden
#
### CORS section
@app.after_request
def after_request_func(response):
origin = request.headers.get('Origin')
response.headers.add('Access-Control-Allow-Credentials', 'true')
if request.method == 'OPTIONS':
response = make_response()
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
response.headers.add('Access-Control-Allow-Headers', 'x-csrf-token')
response.headers.add('Access-Control-Allow-Headers', 'requesttoken')
response.headers.add('Access-Control-Allow-Methods',
'GET, POST, OPTIONS, PUT, PATCH, DELETE')
if origin:
response.headers.add('Access-Control-Allow-Origin', origin)
else:
response.headers.add('Access-Control-Allow-Origin', request.remote_addr)
#response.headers.add('Access-Control-Allow-Origin', cfg["config"]["domain"])
#if not origin:
# response.headers.add('Access-Control-Allow-Origin', "192.168.10.129")
# response.headers.add('Access-Control-Allow-Origin', "192.168.10.47")
return response
### end CORS section
# decorator for authenticated access
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if request.remote_addr != "127.0.0.1":
# abort(403)
return error("not allowed")
return f(*args, **kwargs)
return decorated
@app.route("/overview")
def show_overview():
return success(data={
"storage": get_partitions(),
"backup": check_for_backup_process()
})
@app.route("/log")
@app.route("/log/<num_lines>")
@requires_auth
def show_log(num_lines=50):
ret = tail(LOG_FILENAME, num_lines)
return error(f"could not read log: {LOG_FILENAME}") if ret is None \
else success(data=ret[:-1])
@app.route("/system", methods=["POST", "GET"])
@requires_auth
def system_settings():
if request.method == "GET":
return success(data={
"log_lvl": cfg["config"]["log_lvl"],
"expert_mode": cfg["config"]["expert_mode"]
})
elif request.method == "POST":
pass
#
# @app.route("/token/<token>/<allow_ip>")
# def set_token(token, allow_ip):
#
# if request.remote_addr != "127.0.0.1":
# #abort(403)
# return error("not allowed")
#
# cfg["token"]["value"] = token
# cfg["token"]["created"] = time.time()
# cfg["token"]["ip"] = allow_ip
# save_config(cfg, CONFIG_PATH)
#
# return success()
@app.route("/storage")
@requires_auth
def storage():
parts = get_partitions()
return success(data=parts)
@app.route("/storage/mount/<device>")
@app.route("/storage/mount/<device>/<name>")
@requires_auth
def mount_storage(device, name=None):
parts = get_partitions()
if name is None:
print (parts)
for idx in range(1, 11):
_name = f"extra-{idx}"
mount_target = f"/media/{_name}"
if mount_target not in parts["mounted"].values():
name = _name
print(name)
break
if name is None:
return error("cannot determine mount target, too many mounts?")
if ".." in device or "/" in device or name == "nextcloud":
return error("invalid device")
if ".." in name or "/" in name:
return error("invalid name")
mount_target = f"/media/{name}"
mount_device = None
for avail in parts["available"]:
if Path(avail).name == device:
mount_device = avail
if not mount_device:
return error("device to mount not found")
if mount_device == parts["main"]:
return error("will not mount main data partition")
if mount_device in parts["mounted"]:
return error("already mounted")
if mount_target in parts["mounted"].values():
return error(f"target {mount_target} has been already mounted")
if not os.path.exists(mount_target):
os.makedirs(mount_target)
cr = CommandRunner([MOUNT_BIN, mount_device, mount_target], block=True)
if cr.returncode == 0:
return success("Mounting successful", data=cr.output)
else:
cr.log_output()
return error("Failed mounting, check logs...")
@app.route("/storage/umount/<name>")
@requires_auth
def umount_storage(name):
if ".." in name or "/" in name or name == "nextcloud":
return error("invalid name")
mount_target = f"/media/{name}"
parts = get_partitions()
if name == "nextcloud":
return error("will not umount main data partition")
if mount_target not in parts["mounted"].values():
return error("not mounted")
cr = CommandRunner([UMOUNT_BIN, mount_target], block=True)
return success("Unmounting successful", data=cr.output)
def check_for_backup_process():
|
@app.route("/backup")
@requires_auth
def backup():
data = dict(cfg["config"])
data["operation"] = check_for_backup_process()
data["found"] = []
if get_partitions()["backup"] is not None:
for name in os.listdir("/media/backup"):
p = Path("/media/backup") / name
try:
size = (p / "size").open().read().strip().split()[0]
except FileNotFoundError:
continue
data["found"].append({
"name": name,
"created": p.stat().st_ctime,
"size": size
})
data["found"].sort(key=lambda x: x["created"], reverse=True)
return success(data=data)
#@app.route("/backup/cancel")
#def backup_cancel(name):
# global backup_proc
#
# subprocess.check_call(["killall", "nextcloud-nextbox.export"])
# #subprocess.check_call(["killall", "nextcloud-nextbox.import"])
#
# pass
@app.route("/backup/start")
@requires_auth
def backup_start():
global backup_proc
backup_info = check_for_backup_process()
parts = get_partitions()
if backup_info["running"]:
return error("backup/restore operation already running", data=backup_info)
if not parts["backup"]:
return error("no 'backup' storage mounted")
backup_proc = CommandRunner([BACKUP_EXPORT_BIN],
cb_parse=parse_backup_line, block=False)
backup_proc.user_info = "backup"
return success("backup started", data=backup_info)
@app.route("/backup/restore/<name>")
@requires_auth
def backup_restore(name):
global backup_proc
backup_info = check_for_backup_process()
if ".." in name or "/" in name:
return error("invalid name", data=backup_info)
if backup_info["running"]:
return error("backup/restore operation already running", data=backup_info)
directory = f"/media/backup/{name}"
backup_proc = CommandRunner([BACKUP_IMPORT_BIN, directory],
cb_parse=parse_backup_line, block=False)
backup_proc.user_info = "restore"
return success("restore started", data=backup_info)
@app.route("/service/<name>/<operation>")
@requires_auth
def service_operation(name, operation):
if name not in ["ddclient", "nextbox-daemon"]:
return error("not allowed")
if operation not in ["start", "restart", "status", "is-active"]:
return error("not allowed")
if name == "ddclient":
cr = CommandRunner([SYSTEMCTL_BIN, operation, DDCLIENT_SERVICE], block=True)
elif name == "nextbox-daemon":
cr = CommandRunner([SYSTEMCTL_BIN, operation, NEXTBOX_SERVICE], block=True)
else:
return error("not allowed")
output = [x for x in cr.output if x]
return success(data={
"service": name,
"operation": operation,
"return-code": cr.returncode,
"output": output
})
@app.route("/config", methods=["POST", "GET"])
@requires_auth
def handle_config():
if request.method == "GET":
data = dict(cfg["config"])
data["conf"] = Path(DDCLIENT_CONFIG_PATH).read_text("utf-8").split("\n")
return success(data=data)
# save dyndns related values to configuration
elif request.method == "POST":
for key in request.form:
val = request.form.get(key)
if key == "conf":
old_conf = Path(DDCLIENT_CONFIG_PATH).read_text("utf-8")
if old_conf != val:
log.info("writing ddclient config and restarting service")
Path(DDCLIENT_CONFIG_PATH).write_text(val, "utf-8")
service_operation("ddclient", "restart")
elif key in AVAIL_CONFIGS and val is not None:
if key == "dns_mode" and val not in DYNDNS_MODES:
log.warning(f"key: 'dns_mode' has invalid value: {val} - skipping")
continue
elif key == "domain":
job_queue.put("TrustedDomains")
elif val is None:
log.debug(f"skipping key: '{key}' -> no value provided")
continue
if val.lower() in ["true", "false"]:
val = val.lower() == "true"
cfg["config"][key] = val
log.debug(f"saving key: '{key}' with value: '{val}'")
cfg.save()
return success("DynDNS configuration saved")
@app.route("/dyndns/captcha", methods=["POST"])
@requires_auth
def dyndns_captcha():
req = urllib.request.Request(DYNDNS_DESEC_CAPTCHA, method="POST")
data = urllib.request.urlopen(req).read().decode("utf-8")
return success(data=json.loads(data))
@app.route("/dyndns/register", methods=["POST"])
@requires_auth
def dyndns_register():
data = {}
for key in request.form:
if key == "captcha_id":
data.setdefault("captcha", {})["id"] = request.form.get(key)
elif key == "captcha":
data.setdefault("captcha", {})["solution"] = request.form.get(key)
elif key in ["domain", "email"]:
data[key] = request.form.get(key)
data["password"] = None
headers = {"Content-Type": "application/json"}
req = urllib.request.Request(DYNDNS_DESEC_REGISTER,
method="POST", data=json.dumps(data).encode("utf-8"), headers=headers)
try:
res = urllib.request.urlopen(req).read().decode("utf-8")
except urllib.error.HTTPError as e:
desc = e.read()
return error(f"Could not complete registration", data=json.loads(desc))
return success(data=json.loads(res))
@app.route("/dyndns/test/ddclient")
@requires_auth
def test_ddclient():
cr = CommandRunner([DDCLIENT_BIN, "-verbose", "-foreground", "-force"], block=True)
cr.log_output()
for line in cr.output:
if "SUCCESS:" in line:
return success("DDClient test: OK")
if "Request was throttled" in line:
pat = "available in ([0-9]*) seconds"
try:
waitfor = int(re.search(pat, line).groups()[0]) + 5
except:
waitfor = 10
return error("DDClient test: Not OK",
data={"reason": "throttled", "waitfor": waitfor})
return error("DDClient test: Not OK", data={"reason": "unknown"})
@app.route("/dyndns/test/resolve/ipv6")
@app.route("/dyndns/test/resolve/ipv4")
@requires_auth
def test_resolve4():
ip_type = request.path.split("/")[-1]
domain = cfg["config"]["domain"]
resolve_ip = None
ext_ip = None
# to resolve un-cachedx
# we first flush all dns-related caches
CommandRunner([SYSTEMD_RESOLVE_BIN, "--flush-cache"], block=True)
CommandRunner([SYSTEMD_RESOLVE_BIN, "--reset-server-features"], block=True)
# resolving according to ip_type
try:
if ip_type == "ipv4":
resolve_ip = socket.gethostbyname(domain)
else:
resolve_ip = socket.getaddrinfo(domain, None, socket.AF_INET6)[0][-1][0]
except (socket.gaierror, IndexError) as e:
log.error(f"Could not resolve {ip_type}: {domain}")
log.error(f"Exception: {repr(e)}")
try:
url = GET_EXT_IP4_URL if ip_type == "ipv4" else GET_EXT_IP6_URL
ext_ip = urllib.request.urlopen(url).read().decode("utf-8")
except urllib.error.URLError as e:
log.error(f"Could not determine own {ip_type}")
log.error(f"Exception: {repr(e)}")
log.info(f"resolving '{domain}' to IP: {resolve_ip}, external IP: {ext_ip}")
data = {"ip": ext_ip, "resolve_ip": resolve_ip}
# if not both "resolve" and "getip" are successful, we have failed
if resolve_ip is None or ext_ip is None:
log.error(f"failed resolving and/or getting external {ip_type}")
return error("Resolve test: Not OK", data=data)
# resolving to wrong ip
if resolve_ip != ext_ip:
log.warning(f"Resolved {ip_type} does not match external {ip_type}")
log.warning("This might indicate a bad DynDNS configuration")
return error("Resolve test: Not OK", data=data)
# all good!
return success("Resolve test: OK", data=data)
@app.route("/dyndns/test/http")
@app.route("/dyndns/test/https")
@app.route("/dyndns/test/proxy")
@requires_auth
def test_http():
what = request.path.split("/")[-1]
if what == "proxy":
domain = cfg["config"]["proxy_domain"]
what = "https"
else:
domain = cfg["config"]["domain"]
url = f"{what}://{domain}"
try:
content = urllib.request.urlopen(url).read().decode("utf-8")
except urllib.error.URLError as e:
return error(f"Domain ({what}) test: Not OK",
data={"exc": repr(e)})
except ssl.CertificateError as e:
# this very likely is due to a bad certificate, disabling https
# @TODO: handle this case in frontend
return error(f"Domain ({what}) test: Not OK - Certificate Error",
data={"reason": "cert", "exc": repr(e)})
if "Nextcloud" in content:
return success(f"Domain ({what}) test: OK")
else:
return error(f"Domain ({what}) test: Not OK",
data={"exc": "none", "reason": "no Nextcloud in 'content'"})
@app.route("/dyndns/upnp")
@requires_auth
def setup_upnp():
import netifaces
import upnpclient
# get gateway ip
gw_ip = list(netifaces.gateways()['default'].values())[0][0]
# get devices (long operation)
devs = upnpclient.discover(timeout=0.1)
device = None
# filter out gateway
for dev in devs:
if dev._url_base.startswith(f"http://{gw_ip}"):
device = dev
break
if device is None:
return error("cannot find upnp-capable router")
# check for needed service
service = None
for srv in device.services:
if srv.name == "WANIPConn1":
service = srv
break
if service is None:
return error("found upnp-capable router - but w/o the needed service")
eth_ip = local_ip()
http_args = dict(NewRemoteHost='0.0.0.0', NewExternalPort=80,
NewProtocol='TCP', NewInternalPort=80, NewInternalClient=eth_ip,
NewEnabled='1', NewPortMappingDescription='NextBox - HTTP', NewLeaseDuration=0)
https_args = dict(NewRemoteHost='0.0.0.0', NewExternalPort=443,
NewProtocol='TCP', NewInternalPort=443, NewInternalClient=eth_ip,
NewEnabled='1', NewPortMappingDescription='NextBox - HTTPS',
NewLeaseDuration=0)
service.AddPortMapping(**http_args)
service.AddPortMapping(**https_args)
try:
service.GetSpecificPortMappingEntry(**http_args)
service.GetSpecificPortMappingEntry(**https_args)
except upnpclient.soap.SOAPError as e:
return error("failed setting up port-forwarding")
return success("port-forwarding successfully set up")
@app.route("/https/enable", methods=["POST"])
@requires_auth
def https_enable():
cleanup_certs()
domain = cfg.get("config", {}).get("domain")
email = cfg.get("config", {}).get("email")
if not domain or not email:
return error(f"failed, domain: '{domain}' email: '{email}'")
cmd = [ENABLE_HTTPS_BIN, "lets-encrypt", email, domain]
cr = CommandRunner(cmd, block=True)
cr.log_output()
cfg["config"]["https_port"] = 443
cfg.save()
return success("HTTPS enabled")
@app.route("/https/disable", methods=["POST"])
@requires_auth
def https_disable():
cmd = [DISABLE_HTTPS_BIN]
cr = CommandRunner(cmd, block=True)
cr.log_output()
cfg["config"]["https_port"] = None
cfg.save()
cleanup_certs()
return success("HTTPS disabled")
def signal_handler(signal, frame):
print("Exit handler, delivering worker exit job now")
job_queue.put("exit")
w.join()
print("Joined worker - exiting now...")
sys.exit(1)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
job_mgr = JobManager(cfg)
job_mgr.register_job(TrustedDomainsJob)
job_mgr.register_job(ProxySSHJob)
job_mgr.register_job(UpdateJob)
job_queue = Queue()
w = Worker(job_queue, job_mgr)
w.start()
app.run(host="0.0.0.0", port=18585, debug=True, threaded=True, processes=1, use_reloader=False)
# cat /sys/class/thermal/thermal_zone0/temp | global backup_proc
out = dict(cfg["config"])
if backup_proc is None:
out["running"] = False
return out
assert isinstance(backup_proc, CommandRunner)
backup_proc.get_new_output()
if backup_proc.finished:
if backup_proc.returncode == 0:
backup_proc.parsed["state"] = "finished"
cfg["config"]["last_" + backup_proc.user_info] = backup_proc.started
cfg.save()
out["last_" + backup_proc.user_info] = backup_proc.started
log.info("backup/restore process finished successfully")
else:
backup_proc.parsed["state"] = "failed: " + backup_proc.parsed.get("unable", "")
if "target" in backup_proc.parsed:
if os.path.exists(backup_proc.parsed["target"]):
shutil.rmtree(backup_proc.parsed["target"])
log.error("backup/restore process failed, logging output: ")
for line in backup_proc.output[-30:]:
log.error(line.replace("\n", ""))
out.update(dict(backup_proc.parsed))
out["returncode"] = backup_proc.returncode
out["running"] = backup_proc.running
out["what"] = backup_proc.user_info
if backup_proc.finished:
backup_proc = None
return out | identifier_body |
start.py | import os
import sys
import re
from pathlib import Path
from functools import wraps
import signal
import shutil
import socket
import urllib.request, urllib.error
import ssl
import json
# append proper (snap) site-packages path
sys.path.append("/snap/nextbox/current/lib/python3.6/site-packages")
from queue import Queue
from flask import Flask, render_template, request, flash, redirect, Response, \
url_for, send_file, Blueprint, render_template, jsonify, make_response
from nextbox_daemon.utils import get_partitions, error, success, \
tail, parse_backup_line, local_ip, cleanup_certs
from nextbox_daemon.command_runner import CommandRunner
from nextbox_daemon.consts import *
from nextbox_daemon.config import Config, log
from nextbox_daemon.worker import Worker
from nextbox_daemon.jobs import JobManager, TrustedDomainsJob, ProxySSHJob, UpdateJob
# config load
cfg = Config(CONFIG_PATH)
app = Flask(__name__)
app.secret_key = "123456-nextbox-123456" #cfg["secret_key"]
# backup thread handler
backup_proc = None
#@app.before_request
#def limit_remote_addr():
# if request.remote_addr != '10.20.30.40':
# abort(403) # Forbidden
#
### CORS section
@app.after_request
def after_request_func(response):
origin = request.headers.get('Origin')
response.headers.add('Access-Control-Allow-Credentials', 'true')
if request.method == 'OPTIONS':
response = make_response()
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
response.headers.add('Access-Control-Allow-Headers', 'x-csrf-token')
response.headers.add('Access-Control-Allow-Headers', 'requesttoken')
response.headers.add('Access-Control-Allow-Methods',
'GET, POST, OPTIONS, PUT, PATCH, DELETE')
if origin:
response.headers.add('Access-Control-Allow-Origin', origin)
else:
response.headers.add('Access-Control-Allow-Origin', request.remote_addr)
#response.headers.add('Access-Control-Allow-Origin', cfg["config"]["domain"])
#if not origin:
# response.headers.add('Access-Control-Allow-Origin', "192.168.10.129")
# response.headers.add('Access-Control-Allow-Origin', "192.168.10.47")
return response
### end CORS section
# decorator for authenticated access
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if request.remote_addr != "127.0.0.1":
# abort(403)
return error("not allowed")
return f(*args, **kwargs)
return decorated
@app.route("/overview")
def show_overview():
return success(data={
"storage": get_partitions(),
"backup": check_for_backup_process()
})
@app.route("/log")
@app.route("/log/<num_lines>")
@requires_auth
def show_log(num_lines=50):
ret = tail(LOG_FILENAME, num_lines)
return error(f"could not read log: {LOG_FILENAME}") if ret is None \
else success(data=ret[:-1])
@app.route("/system", methods=["POST", "GET"])
@requires_auth
def system_settings():
if request.method == "GET":
return success(data={
"log_lvl": cfg["config"]["log_lvl"],
"expert_mode": cfg["config"]["expert_mode"]
})
elif request.method == "POST":
pass
#
# @app.route("/token/<token>/<allow_ip>")
# def set_token(token, allow_ip):
#
# if request.remote_addr != "127.0.0.1":
# #abort(403)
# return error("not allowed")
#
# cfg["token"]["value"] = token
# cfg["token"]["created"] = time.time()
# cfg["token"]["ip"] = allow_ip
# save_config(cfg, CONFIG_PATH)
#
# return success()
@app.route("/storage")
@requires_auth
def storage():
parts = get_partitions()
return success(data=parts)
@app.route("/storage/mount/<device>")
@app.route("/storage/mount/<device>/<name>")
@requires_auth
def mount_storage(device, name=None):
parts = get_partitions()
if name is None:
print (parts)
for idx in range(1, 11):
_name = f"extra-{idx}"
mount_target = f"/media/{_name}"
if mount_target not in parts["mounted"].values():
name = _name
print(name)
break
if name is None:
return error("cannot determine mount target, too many mounts?")
if ".." in device or "/" in device or name == "nextcloud":
return error("invalid device")
if ".." in name or "/" in name:
return error("invalid name")
mount_target = f"/media/{name}"
mount_device = None
for avail in parts["available"]:
if Path(avail).name == device:
mount_device = avail
if not mount_device:
return error("device to mount not found")
if mount_device == parts["main"]:
return error("will not mount main data partition")
if mount_device in parts["mounted"]:
return error("already mounted")
if mount_target in parts["mounted"].values():
return error(f"target {mount_target} has been already mounted")
if not os.path.exists(mount_target):
os.makedirs(mount_target)
cr = CommandRunner([MOUNT_BIN, mount_device, mount_target], block=True)
if cr.returncode == 0:
return success("Mounting successful", data=cr.output)
else:
cr.log_output()
return error("Failed mounting, check logs...")
@app.route("/storage/umount/<name>")
@requires_auth
def umount_storage(name):
if ".." in name or "/" in name or name == "nextcloud":
return error("invalid name")
mount_target = f"/media/{name}"
parts = get_partitions()
if name == "nextcloud":
return error("will not umount main data partition")
if mount_target not in parts["mounted"].values():
return error("not mounted")
cr = CommandRunner([UMOUNT_BIN, mount_target], block=True)
return success("Unmounting successful", data=cr.output)
def | ():
global backup_proc
out = dict(cfg["config"])
if backup_proc is None:
out["running"] = False
return out
assert isinstance(backup_proc, CommandRunner)
backup_proc.get_new_output()
if backup_proc.finished:
if backup_proc.returncode == 0:
backup_proc.parsed["state"] = "finished"
cfg["config"]["last_" + backup_proc.user_info] = backup_proc.started
cfg.save()
out["last_" + backup_proc.user_info] = backup_proc.started
log.info("backup/restore process finished successfully")
else:
backup_proc.parsed["state"] = "failed: " + backup_proc.parsed.get("unable", "")
if "target" in backup_proc.parsed:
if os.path.exists(backup_proc.parsed["target"]):
shutil.rmtree(backup_proc.parsed["target"])
log.error("backup/restore process failed, logging output: ")
for line in backup_proc.output[-30:]:
log.error(line.replace("\n", ""))
out.update(dict(backup_proc.parsed))
out["returncode"] = backup_proc.returncode
out["running"] = backup_proc.running
out["what"] = backup_proc.user_info
if backup_proc.finished:
backup_proc = None
return out
@app.route("/backup")
@requires_auth
def backup():
data = dict(cfg["config"])
data["operation"] = check_for_backup_process()
data["found"] = []
if get_partitions()["backup"] is not None:
for name in os.listdir("/media/backup"):
p = Path("/media/backup") / name
try:
size = (p / "size").open().read().strip().split()[0]
except FileNotFoundError:
continue
data["found"].append({
"name": name,
"created": p.stat().st_ctime,
"size": size
})
data["found"].sort(key=lambda x: x["created"], reverse=True)
return success(data=data)
#@app.route("/backup/cancel")
#def backup_cancel(name):
# global backup_proc
#
# subprocess.check_call(["killall", "nextcloud-nextbox.export"])
# #subprocess.check_call(["killall", "nextcloud-nextbox.import"])
#
# pass
@app.route("/backup/start")
@requires_auth
def backup_start():
global backup_proc
backup_info = check_for_backup_process()
parts = get_partitions()
if backup_info["running"]:
return error("backup/restore operation already running", data=backup_info)
if not parts["backup"]:
return error("no 'backup' storage mounted")
backup_proc = CommandRunner([BACKUP_EXPORT_BIN],
cb_parse=parse_backup_line, block=False)
backup_proc.user_info = "backup"
return success("backup started", data=backup_info)
@app.route("/backup/restore/<name>")
@requires_auth
def backup_restore(name):
global backup_proc
backup_info = check_for_backup_process()
if ".." in name or "/" in name:
return error("invalid name", data=backup_info)
if backup_info["running"]:
return error("backup/restore operation already running", data=backup_info)
directory = f"/media/backup/{name}"
backup_proc = CommandRunner([BACKUP_IMPORT_BIN, directory],
cb_parse=parse_backup_line, block=False)
backup_proc.user_info = "restore"
return success("restore started", data=backup_info)
@app.route("/service/<name>/<operation>")
@requires_auth
def service_operation(name, operation):
if name not in ["ddclient", "nextbox-daemon"]:
return error("not allowed")
if operation not in ["start", "restart", "status", "is-active"]:
return error("not allowed")
if name == "ddclient":
cr = CommandRunner([SYSTEMCTL_BIN, operation, DDCLIENT_SERVICE], block=True)
elif name == "nextbox-daemon":
cr = CommandRunner([SYSTEMCTL_BIN, operation, NEXTBOX_SERVICE], block=True)
else:
return error("not allowed")
output = [x for x in cr.output if x]
return success(data={
"service": name,
"operation": operation,
"return-code": cr.returncode,
"output": output
})
@app.route("/config", methods=["POST", "GET"])
@requires_auth
def handle_config():
if request.method == "GET":
data = dict(cfg["config"])
data["conf"] = Path(DDCLIENT_CONFIG_PATH).read_text("utf-8").split("\n")
return success(data=data)
# save dyndns related values to configuration
elif request.method == "POST":
for key in request.form:
val = request.form.get(key)
if key == "conf":
old_conf = Path(DDCLIENT_CONFIG_PATH).read_text("utf-8")
if old_conf != val:
log.info("writing ddclient config and restarting service")
Path(DDCLIENT_CONFIG_PATH).write_text(val, "utf-8")
service_operation("ddclient", "restart")
elif key in AVAIL_CONFIGS and val is not None:
if key == "dns_mode" and val not in DYNDNS_MODES:
log.warning(f"key: 'dns_mode' has invalid value: {val} - skipping")
continue
elif key == "domain":
job_queue.put("TrustedDomains")
elif val is None:
log.debug(f"skipping key: '{key}' -> no value provided")
continue
if val.lower() in ["true", "false"]:
val = val.lower() == "true"
cfg["config"][key] = val
log.debug(f"saving key: '{key}' with value: '{val}'")
cfg.save()
return success("DynDNS configuration saved")
@app.route("/dyndns/captcha", methods=["POST"])
@requires_auth
def dyndns_captcha():
req = urllib.request.Request(DYNDNS_DESEC_CAPTCHA, method="POST")
data = urllib.request.urlopen(req).read().decode("utf-8")
return success(data=json.loads(data))
@app.route("/dyndns/register", methods=["POST"])
@requires_auth
def dyndns_register():
data = {}
for key in request.form:
if key == "captcha_id":
data.setdefault("captcha", {})["id"] = request.form.get(key)
elif key == "captcha":
data.setdefault("captcha", {})["solution"] = request.form.get(key)
elif key in ["domain", "email"]:
data[key] = request.form.get(key)
data["password"] = None
headers = {"Content-Type": "application/json"}
req = urllib.request.Request(DYNDNS_DESEC_REGISTER,
method="POST", data=json.dumps(data).encode("utf-8"), headers=headers)
try:
res = urllib.request.urlopen(req).read().decode("utf-8")
except urllib.error.HTTPError as e:
desc = e.read()
return error(f"Could not complete registration", data=json.loads(desc))
return success(data=json.loads(res))
@app.route("/dyndns/test/ddclient")
@requires_auth
def test_ddclient():
cr = CommandRunner([DDCLIENT_BIN, "-verbose", "-foreground", "-force"], block=True)
cr.log_output()
for line in cr.output:
if "SUCCESS:" in line:
return success("DDClient test: OK")
if "Request was throttled" in line:
pat = "available in ([0-9]*) seconds"
try:
waitfor = int(re.search(pat, line).groups()[0]) + 5
except:
waitfor = 10
return error("DDClient test: Not OK",
data={"reason": "throttled", "waitfor": waitfor})
return error("DDClient test: Not OK", data={"reason": "unknown"})
@app.route("/dyndns/test/resolve/ipv6")
@app.route("/dyndns/test/resolve/ipv4")
@requires_auth
def test_resolve4():
ip_type = request.path.split("/")[-1]
domain = cfg["config"]["domain"]
resolve_ip = None
ext_ip = None
# to resolve un-cachedx
# we first flush all dns-related caches
CommandRunner([SYSTEMD_RESOLVE_BIN, "--flush-cache"], block=True)
CommandRunner([SYSTEMD_RESOLVE_BIN, "--reset-server-features"], block=True)
# resolving according to ip_type
try:
if ip_type == "ipv4":
resolve_ip = socket.gethostbyname(domain)
else:
resolve_ip = socket.getaddrinfo(domain, None, socket.AF_INET6)[0][-1][0]
except (socket.gaierror, IndexError) as e:
log.error(f"Could not resolve {ip_type}: {domain}")
log.error(f"Exception: {repr(e)}")
try:
url = GET_EXT_IP4_URL if ip_type == "ipv4" else GET_EXT_IP6_URL
ext_ip = urllib.request.urlopen(url).read().decode("utf-8")
except urllib.error.URLError as e:
log.error(f"Could not determine own {ip_type}")
log.error(f"Exception: {repr(e)}")
log.info(f"resolving '{domain}' to IP: {resolve_ip}, external IP: {ext_ip}")
data = {"ip": ext_ip, "resolve_ip": resolve_ip}
# if not both "resolve" and "getip" are successful, we have failed
if resolve_ip is None or ext_ip is None:
log.error(f"failed resolving and/or getting external {ip_type}")
return error("Resolve test: Not OK", data=data)
# resolving to wrong ip
if resolve_ip != ext_ip:
log.warning(f"Resolved {ip_type} does not match external {ip_type}")
log.warning("This might indicate a bad DynDNS configuration")
return error("Resolve test: Not OK", data=data)
# all good!
return success("Resolve test: OK", data=data)
@app.route("/dyndns/test/http")
@app.route("/dyndns/test/https")
@app.route("/dyndns/test/proxy")
@requires_auth
def test_http():
what = request.path.split("/")[-1]
if what == "proxy":
domain = cfg["config"]["proxy_domain"]
what = "https"
else:
domain = cfg["config"]["domain"]
url = f"{what}://{domain}"
try:
content = urllib.request.urlopen(url).read().decode("utf-8")
except urllib.error.URLError as e:
return error(f"Domain ({what}) test: Not OK",
data={"exc": repr(e)})
except ssl.CertificateError as e:
# this very likely is due to a bad certificate, disabling https
# @TODO: handle this case in frontend
return error(f"Domain ({what}) test: Not OK - Certificate Error",
data={"reason": "cert", "exc": repr(e)})
if "Nextcloud" in content:
return success(f"Domain ({what}) test: OK")
else:
return error(f"Domain ({what}) test: Not OK",
data={"exc": "none", "reason": "no Nextcloud in 'content'"})
@app.route("/dyndns/upnp")
@requires_auth
def setup_upnp():
import netifaces
import upnpclient
# get gateway ip
gw_ip = list(netifaces.gateways()['default'].values())[0][0]
# get devices (long operation)
devs = upnpclient.discover(timeout=0.1)
device = None
# filter out gateway
for dev in devs:
if dev._url_base.startswith(f"http://{gw_ip}"):
device = dev
break
if device is None:
return error("cannot find upnp-capable router")
# check for needed service
service = None
for srv in device.services:
if srv.name == "WANIPConn1":
service = srv
break
if service is None:
return error("found upnp-capable router - but w/o the needed service")
eth_ip = local_ip()
http_args = dict(NewRemoteHost='0.0.0.0', NewExternalPort=80,
NewProtocol='TCP', NewInternalPort=80, NewInternalClient=eth_ip,
NewEnabled='1', NewPortMappingDescription='NextBox - HTTP', NewLeaseDuration=0)
https_args = dict(NewRemoteHost='0.0.0.0', NewExternalPort=443,
NewProtocol='TCP', NewInternalPort=443, NewInternalClient=eth_ip,
NewEnabled='1', NewPortMappingDescription='NextBox - HTTPS',
NewLeaseDuration=0)
service.AddPortMapping(**http_args)
service.AddPortMapping(**https_args)
try:
service.GetSpecificPortMappingEntry(**http_args)
service.GetSpecificPortMappingEntry(**https_args)
except upnpclient.soap.SOAPError as e:
return error("failed setting up port-forwarding")
return success("port-forwarding successfully set up")
@app.route("/https/enable", methods=["POST"])
@requires_auth
def https_enable():
cleanup_certs()
domain = cfg.get("config", {}).get("domain")
email = cfg.get("config", {}).get("email")
if not domain or not email:
return error(f"failed, domain: '{domain}' email: '{email}'")
cmd = [ENABLE_HTTPS_BIN, "lets-encrypt", email, domain]
cr = CommandRunner(cmd, block=True)
cr.log_output()
cfg["config"]["https_port"] = 443
cfg.save()
return success("HTTPS enabled")
@app.route("/https/disable", methods=["POST"])
@requires_auth
def https_disable():
cmd = [DISABLE_HTTPS_BIN]
cr = CommandRunner(cmd, block=True)
cr.log_output()
cfg["config"]["https_port"] = None
cfg.save()
cleanup_certs()
return success("HTTPS disabled")
def signal_handler(signal, frame):
print("Exit handler, delivering worker exit job now")
job_queue.put("exit")
w.join()
print("Joined worker - exiting now...")
sys.exit(1)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
job_mgr = JobManager(cfg)
job_mgr.register_job(TrustedDomainsJob)
job_mgr.register_job(ProxySSHJob)
job_mgr.register_job(UpdateJob)
job_queue = Queue()
w = Worker(job_queue, job_mgr)
w.start()
app.run(host="0.0.0.0", port=18585, debug=True, threaded=True, processes=1, use_reloader=False)
# cat /sys/class/thermal/thermal_zone0/temp | check_for_backup_process | identifier_name |
start.py | import os
import sys
import re
from pathlib import Path
from functools import wraps
import signal
import shutil
import socket
import urllib.request, urllib.error
import ssl
import json
# append proper (snap) site-packages path
sys.path.append("/snap/nextbox/current/lib/python3.6/site-packages")
from queue import Queue
from flask import Flask, render_template, request, flash, redirect, Response, \
url_for, send_file, Blueprint, render_template, jsonify, make_response
from nextbox_daemon.utils import get_partitions, error, success, \
tail, parse_backup_line, local_ip, cleanup_certs
from nextbox_daemon.command_runner import CommandRunner
from nextbox_daemon.consts import *
from nextbox_daemon.config import Config, log
from nextbox_daemon.worker import Worker
from nextbox_daemon.jobs import JobManager, TrustedDomainsJob, ProxySSHJob, UpdateJob
# config load
cfg = Config(CONFIG_PATH)
app = Flask(__name__)
app.secret_key = "123456-nextbox-123456" #cfg["secret_key"]
# backup thread handler
backup_proc = None
#@app.before_request
#def limit_remote_addr():
# if request.remote_addr != '10.20.30.40':
# abort(403) # Forbidden
#
### CORS section
@app.after_request
def after_request_func(response):
origin = request.headers.get('Origin')
response.headers.add('Access-Control-Allow-Credentials', 'true')
if request.method == 'OPTIONS':
response = make_response()
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
response.headers.add('Access-Control-Allow-Headers', 'x-csrf-token')
response.headers.add('Access-Control-Allow-Headers', 'requesttoken')
response.headers.add('Access-Control-Allow-Methods',
'GET, POST, OPTIONS, PUT, PATCH, DELETE')
if origin:
response.headers.add('Access-Control-Allow-Origin', origin)
else:
response.headers.add('Access-Control-Allow-Origin', request.remote_addr)
#response.headers.add('Access-Control-Allow-Origin', cfg["config"]["domain"])
#if not origin:
# response.headers.add('Access-Control-Allow-Origin', "192.168.10.129")
# response.headers.add('Access-Control-Allow-Origin', "192.168.10.47")
return response
### end CORS section
# decorator for authenticated access
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if request.remote_addr != "127.0.0.1":
# abort(403)
return error("not allowed")
return f(*args, **kwargs)
return decorated
@app.route("/overview")
def show_overview():
return success(data={
"storage": get_partitions(),
"backup": check_for_backup_process()
})
@app.route("/log")
@app.route("/log/<num_lines>")
@requires_auth
def show_log(num_lines=50):
ret = tail(LOG_FILENAME, num_lines)
return error(f"could not read log: {LOG_FILENAME}") if ret is None \
else success(data=ret[:-1])
@app.route("/system", methods=["POST", "GET"])
@requires_auth
def system_settings():
if request.method == "GET":
return success(data={
"log_lvl": cfg["config"]["log_lvl"],
"expert_mode": cfg["config"]["expert_mode"]
})
elif request.method == "POST":
pass
#
# @app.route("/token/<token>/<allow_ip>")
# def set_token(token, allow_ip):
#
# if request.remote_addr != "127.0.0.1":
# #abort(403)
# return error("not allowed")
#
# cfg["token"]["value"] = token
# cfg["token"]["created"] = time.time()
# cfg["token"]["ip"] = allow_ip
# save_config(cfg, CONFIG_PATH)
#
# return success()
@app.route("/storage")
@requires_auth
def storage():
parts = get_partitions()
return success(data=parts)
@app.route("/storage/mount/<device>")
@app.route("/storage/mount/<device>/<name>")
@requires_auth
def mount_storage(device, name=None):
parts = get_partitions()
if name is None:
print (parts)
for idx in range(1, 11):
_name = f"extra-{idx}"
mount_target = f"/media/{_name}"
if mount_target not in parts["mounted"].values():
name = _name
print(name)
break
if name is None:
return error("cannot determine mount target, too many mounts?")
if ".." in device or "/" in device or name == "nextcloud":
return error("invalid device")
if ".." in name or "/" in name:
return error("invalid name")
mount_target = f"/media/{name}"
mount_device = None
for avail in parts["available"]:
if Path(avail).name == device:
mount_device = avail
if not mount_device:
return error("device to mount not found")
if mount_device == parts["main"]:
return error("will not mount main data partition")
if mount_device in parts["mounted"]:
return error("already mounted")
if mount_target in parts["mounted"].values():
return error(f"target {mount_target} has been already mounted")
if not os.path.exists(mount_target):
os.makedirs(mount_target)
cr = CommandRunner([MOUNT_BIN, mount_device, mount_target], block=True)
if cr.returncode == 0:
return success("Mounting successful", data=cr.output)
else:
cr.log_output()
return error("Failed mounting, check logs...")
@app.route("/storage/umount/<name>")
@requires_auth
def umount_storage(name):
if ".." in name or "/" in name or name == "nextcloud":
return error("invalid name")
mount_target = f"/media/{name}"
parts = get_partitions()
if name == "nextcloud":
return error("will not umount main data partition")
if mount_target not in parts["mounted"].values():
return error("not mounted")
cr = CommandRunner([UMOUNT_BIN, mount_target], block=True)
return success("Unmounting successful", data=cr.output)
def check_for_backup_process():
global backup_proc
out = dict(cfg["config"])
if backup_proc is None:
out["running"] = False
return out
assert isinstance(backup_proc, CommandRunner)
backup_proc.get_new_output()
if backup_proc.finished:
if backup_proc.returncode == 0:
backup_proc.parsed["state"] = "finished"
cfg["config"]["last_" + backup_proc.user_info] = backup_proc.started
cfg.save()
out["last_" + backup_proc.user_info] = backup_proc.started
log.info("backup/restore process finished successfully")
else:
backup_proc.parsed["state"] = "failed: " + backup_proc.parsed.get("unable", "")
if "target" in backup_proc.parsed:
if os.path.exists(backup_proc.parsed["target"]):
shutil.rmtree(backup_proc.parsed["target"])
log.error("backup/restore process failed, logging output: ")
for line in backup_proc.output[-30:]:
log.error(line.replace("\n", ""))
out.update(dict(backup_proc.parsed))
out["returncode"] = backup_proc.returncode
out["running"] = backup_proc.running
out["what"] = backup_proc.user_info
if backup_proc.finished:
backup_proc = None
return out
@app.route("/backup")
@requires_auth
def backup():
data = dict(cfg["config"])
data["operation"] = check_for_backup_process()
data["found"] = []
if get_partitions()["backup"] is not None:
for name in os.listdir("/media/backup"):
p = Path("/media/backup") / name
try:
size = (p / "size").open().read().strip().split()[0]
except FileNotFoundError:
continue
data["found"].append({
"name": name,
"created": p.stat().st_ctime,
"size": size
})
data["found"].sort(key=lambda x: x["created"], reverse=True)
return success(data=data)
#@app.route("/backup/cancel")
#def backup_cancel(name):
# global backup_proc
#
# subprocess.check_call(["killall", "nextcloud-nextbox.export"])
# #subprocess.check_call(["killall", "nextcloud-nextbox.import"])
#
# pass
@app.route("/backup/start")
@requires_auth
def backup_start():
global backup_proc
backup_info = check_for_backup_process()
parts = get_partitions()
if backup_info["running"]:
return error("backup/restore operation already running", data=backup_info)
if not parts["backup"]:
return error("no 'backup' storage mounted")
backup_proc = CommandRunner([BACKUP_EXPORT_BIN],
cb_parse=parse_backup_line, block=False)
backup_proc.user_info = "backup"
return success("backup started", data=backup_info)
@app.route("/backup/restore/<name>")
@requires_auth
def backup_restore(name):
global backup_proc
backup_info = check_for_backup_process()
if ".." in name or "/" in name:
return error("invalid name", data=backup_info)
if backup_info["running"]:
return error("backup/restore operation already running", data=backup_info)
directory = f"/media/backup/{name}"
backup_proc = CommandRunner([BACKUP_IMPORT_BIN, directory],
cb_parse=parse_backup_line, block=False)
backup_proc.user_info = "restore"
return success("restore started", data=backup_info)
@app.route("/service/<name>/<operation>")
@requires_auth
def service_operation(name, operation):
if name not in ["ddclient", "nextbox-daemon"]:
return error("not allowed")
if operation not in ["start", "restart", "status", "is-active"]:
return error("not allowed")
if name == "ddclient":
cr = CommandRunner([SYSTEMCTL_BIN, operation, DDCLIENT_SERVICE], block=True)
elif name == "nextbox-daemon":
cr = CommandRunner([SYSTEMCTL_BIN, operation, NEXTBOX_SERVICE], block=True)
else:
return error("not allowed")
output = [x for x in cr.output if x]
return success(data={
"service": name,
"operation": operation,
"return-code": cr.returncode,
"output": output
})
@app.route("/config", methods=["POST", "GET"])
@requires_auth
def handle_config():
if request.method == "GET":
|
# save dyndns related values to configuration
elif request.method == "POST":
for key in request.form:
val = request.form.get(key)
if key == "conf":
old_conf = Path(DDCLIENT_CONFIG_PATH).read_text("utf-8")
if old_conf != val:
log.info("writing ddclient config and restarting service")
Path(DDCLIENT_CONFIG_PATH).write_text(val, "utf-8")
service_operation("ddclient", "restart")
elif key in AVAIL_CONFIGS and val is not None:
if key == "dns_mode" and val not in DYNDNS_MODES:
log.warning(f"key: 'dns_mode' has invalid value: {val} - skipping")
continue
elif key == "domain":
job_queue.put("TrustedDomains")
elif val is None:
log.debug(f"skipping key: '{key}' -> no value provided")
continue
if val.lower() in ["true", "false"]:
val = val.lower() == "true"
cfg["config"][key] = val
log.debug(f"saving key: '{key}' with value: '{val}'")
cfg.save()
return success("DynDNS configuration saved")
@app.route("/dyndns/captcha", methods=["POST"])
@requires_auth
def dyndns_captcha():
req = urllib.request.Request(DYNDNS_DESEC_CAPTCHA, method="POST")
data = urllib.request.urlopen(req).read().decode("utf-8")
return success(data=json.loads(data))
@app.route("/dyndns/register", methods=["POST"])
@requires_auth
def dyndns_register():
data = {}
for key in request.form:
if key == "captcha_id":
data.setdefault("captcha", {})["id"] = request.form.get(key)
elif key == "captcha":
data.setdefault("captcha", {})["solution"] = request.form.get(key)
elif key in ["domain", "email"]:
data[key] = request.form.get(key)
data["password"] = None
headers = {"Content-Type": "application/json"}
req = urllib.request.Request(DYNDNS_DESEC_REGISTER,
method="POST", data=json.dumps(data).encode("utf-8"), headers=headers)
try:
res = urllib.request.urlopen(req).read().decode("utf-8")
except urllib.error.HTTPError as e:
desc = e.read()
return error(f"Could not complete registration", data=json.loads(desc))
return success(data=json.loads(res))
@app.route("/dyndns/test/ddclient")
@requires_auth
def test_ddclient():
cr = CommandRunner([DDCLIENT_BIN, "-verbose", "-foreground", "-force"], block=True)
cr.log_output()
for line in cr.output:
if "SUCCESS:" in line:
return success("DDClient test: OK")
if "Request was throttled" in line:
pat = "available in ([0-9]*) seconds"
try:
waitfor = int(re.search(pat, line).groups()[0]) + 5
except:
waitfor = 10
return error("DDClient test: Not OK",
data={"reason": "throttled", "waitfor": waitfor})
return error("DDClient test: Not OK", data={"reason": "unknown"})
@app.route("/dyndns/test/resolve/ipv6")
@app.route("/dyndns/test/resolve/ipv4")
@requires_auth
def test_resolve4():
ip_type = request.path.split("/")[-1]
domain = cfg["config"]["domain"]
resolve_ip = None
ext_ip = None
# to resolve un-cachedx
# we first flush all dns-related caches
CommandRunner([SYSTEMD_RESOLVE_BIN, "--flush-cache"], block=True)
CommandRunner([SYSTEMD_RESOLVE_BIN, "--reset-server-features"], block=True)
# resolving according to ip_type
try:
if ip_type == "ipv4":
resolve_ip = socket.gethostbyname(domain)
else:
resolve_ip = socket.getaddrinfo(domain, None, socket.AF_INET6)[0][-1][0]
except (socket.gaierror, IndexError) as e:
log.error(f"Could not resolve {ip_type}: {domain}")
log.error(f"Exception: {repr(e)}")
try:
url = GET_EXT_IP4_URL if ip_type == "ipv4" else GET_EXT_IP6_URL
ext_ip = urllib.request.urlopen(url).read().decode("utf-8")
except urllib.error.URLError as e:
log.error(f"Could not determine own {ip_type}")
log.error(f"Exception: {repr(e)}")
log.info(f"resolving '{domain}' to IP: {resolve_ip}, external IP: {ext_ip}")
data = {"ip": ext_ip, "resolve_ip": resolve_ip}
# if not both "resolve" and "getip" are successful, we have failed
if resolve_ip is None or ext_ip is None:
log.error(f"failed resolving and/or getting external {ip_type}")
return error("Resolve test: Not OK", data=data)
# resolving to wrong ip
if resolve_ip != ext_ip:
log.warning(f"Resolved {ip_type} does not match external {ip_type}")
log.warning("This might indicate a bad DynDNS configuration")
return error("Resolve test: Not OK", data=data)
# all good!
return success("Resolve test: OK", data=data)
@app.route("/dyndns/test/http")
@app.route("/dyndns/test/https")
@app.route("/dyndns/test/proxy")
@requires_auth
def test_http():
what = request.path.split("/")[-1]
if what == "proxy":
domain = cfg["config"]["proxy_domain"]
what = "https"
else:
domain = cfg["config"]["domain"]
url = f"{what}://{domain}"
try:
content = urllib.request.urlopen(url).read().decode("utf-8")
except urllib.error.URLError as e:
return error(f"Domain ({what}) test: Not OK",
data={"exc": repr(e)})
except ssl.CertificateError as e:
# this very likely is due to a bad certificate, disabling https
# @TODO: handle this case in frontend
return error(f"Domain ({what}) test: Not OK - Certificate Error",
data={"reason": "cert", "exc": repr(e)})
if "Nextcloud" in content:
return success(f"Domain ({what}) test: OK")
else:
return error(f"Domain ({what}) test: Not OK",
data={"exc": "none", "reason": "no Nextcloud in 'content'"})
@app.route("/dyndns/upnp")
@requires_auth
def setup_upnp():
import netifaces
import upnpclient
# get gateway ip
gw_ip = list(netifaces.gateways()['default'].values())[0][0]
# get devices (long operation)
devs = upnpclient.discover(timeout=0.1)
device = None
# filter out gateway
for dev in devs:
if dev._url_base.startswith(f"http://{gw_ip}"):
device = dev
break
if device is None:
return error("cannot find upnp-capable router")
# check for needed service
service = None
for srv in device.services:
if srv.name == "WANIPConn1":
service = srv
break
if service is None:
return error("found upnp-capable router - but w/o the needed service")
eth_ip = local_ip()
http_args = dict(NewRemoteHost='0.0.0.0', NewExternalPort=80,
NewProtocol='TCP', NewInternalPort=80, NewInternalClient=eth_ip,
NewEnabled='1', NewPortMappingDescription='NextBox - HTTP', NewLeaseDuration=0)
https_args = dict(NewRemoteHost='0.0.0.0', NewExternalPort=443,
NewProtocol='TCP', NewInternalPort=443, NewInternalClient=eth_ip,
NewEnabled='1', NewPortMappingDescription='NextBox - HTTPS',
NewLeaseDuration=0)
service.AddPortMapping(**http_args)
service.AddPortMapping(**https_args)
try:
service.GetSpecificPortMappingEntry(**http_args)
service.GetSpecificPortMappingEntry(**https_args)
except upnpclient.soap.SOAPError as e:
return error("failed setting up port-forwarding")
return success("port-forwarding successfully set up")
@app.route("/https/enable", methods=["POST"])
@requires_auth
def https_enable():
cleanup_certs()
domain = cfg.get("config", {}).get("domain")
email = cfg.get("config", {}).get("email")
if not domain or not email:
return error(f"failed, domain: '{domain}' email: '{email}'")
cmd = [ENABLE_HTTPS_BIN, "lets-encrypt", email, domain]
cr = CommandRunner(cmd, block=True)
cr.log_output()
cfg["config"]["https_port"] = 443
cfg.save()
return success("HTTPS enabled")
@app.route("/https/disable", methods=["POST"])
@requires_auth
def https_disable():
cmd = [DISABLE_HTTPS_BIN]
cr = CommandRunner(cmd, block=True)
cr.log_output()
cfg["config"]["https_port"] = None
cfg.save()
cleanup_certs()
return success("HTTPS disabled")
def signal_handler(signal, frame):
print("Exit handler, delivering worker exit job now")
job_queue.put("exit")
w.join()
print("Joined worker - exiting now...")
sys.exit(1)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
job_mgr = JobManager(cfg)
job_mgr.register_job(TrustedDomainsJob)
job_mgr.register_job(ProxySSHJob)
job_mgr.register_job(UpdateJob)
job_queue = Queue()
w = Worker(job_queue, job_mgr)
w.start()
app.run(host="0.0.0.0", port=18585, debug=True, threaded=True, processes=1, use_reloader=False)
# cat /sys/class/thermal/thermal_zone0/temp | data = dict(cfg["config"])
data["conf"] = Path(DDCLIENT_CONFIG_PATH).read_text("utf-8").split("\n")
return success(data=data) | conditional_block |
start.py | import os
import sys
import re
from pathlib import Path
from functools import wraps
import signal
import shutil
import socket
import urllib.request, urllib.error
import ssl
import json
# append proper (snap) site-packages path
sys.path.append("/snap/nextbox/current/lib/python3.6/site-packages")
from queue import Queue
from flask import Flask, render_template, request, flash, redirect, Response, \
url_for, send_file, Blueprint, render_template, jsonify, make_response
from nextbox_daemon.utils import get_partitions, error, success, \
tail, parse_backup_line, local_ip, cleanup_certs
from nextbox_daemon.command_runner import CommandRunner
from nextbox_daemon.consts import *
from nextbox_daemon.config import Config, log
from nextbox_daemon.worker import Worker
from nextbox_daemon.jobs import JobManager, TrustedDomainsJob, ProxySSHJob, UpdateJob
# config load
cfg = Config(CONFIG_PATH)
app = Flask(__name__)
app.secret_key = "123456-nextbox-123456" #cfg["secret_key"]
# backup thread handler
backup_proc = None
#@app.before_request
#def limit_remote_addr():
# if request.remote_addr != '10.20.30.40':
# abort(403) # Forbidden
#
### CORS section
@app.after_request
def after_request_func(response):
origin = request.headers.get('Origin')
response.headers.add('Access-Control-Allow-Credentials', 'true')
if request.method == 'OPTIONS':
response = make_response()
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
response.headers.add('Access-Control-Allow-Headers', 'x-csrf-token')
response.headers.add('Access-Control-Allow-Headers', 'requesttoken')
response.headers.add('Access-Control-Allow-Methods',
'GET, POST, OPTIONS, PUT, PATCH, DELETE')
if origin:
response.headers.add('Access-Control-Allow-Origin', origin)
else:
response.headers.add('Access-Control-Allow-Origin', request.remote_addr)
#response.headers.add('Access-Control-Allow-Origin', cfg["config"]["domain"])
#if not origin:
# response.headers.add('Access-Control-Allow-Origin', "192.168.10.129")
# response.headers.add('Access-Control-Allow-Origin', "192.168.10.47")
return response
### end CORS section
# decorator for authenticated access
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if request.remote_addr != "127.0.0.1":
# abort(403)
return error("not allowed")
return f(*args, **kwargs)
return decorated
@app.route("/overview")
def show_overview():
return success(data={
"storage": get_partitions(),
"backup": check_for_backup_process()
})
@app.route("/log")
@app.route("/log/<num_lines>")
@requires_auth
def show_log(num_lines=50):
ret = tail(LOG_FILENAME, num_lines)
return error(f"could not read log: {LOG_FILENAME}") if ret is None \
else success(data=ret[:-1])
@app.route("/system", methods=["POST", "GET"])
@requires_auth
def system_settings():
if request.method == "GET":
return success(data={
"log_lvl": cfg["config"]["log_lvl"],
"expert_mode": cfg["config"]["expert_mode"]
})
elif request.method == "POST":
pass
#
# @app.route("/token/<token>/<allow_ip>")
# def set_token(token, allow_ip):
#
# if request.remote_addr != "127.0.0.1":
# #abort(403)
# return error("not allowed")
#
# cfg["token"]["value"] = token
# cfg["token"]["created"] = time.time()
# cfg["token"]["ip"] = allow_ip
# save_config(cfg, CONFIG_PATH)
#
# return success()
@app.route("/storage")
@requires_auth
def storage():
parts = get_partitions()
return success(data=parts)
@app.route("/storage/mount/<device>")
@app.route("/storage/mount/<device>/<name>")
@requires_auth
def mount_storage(device, name=None):
parts = get_partitions()
if name is None:
print (parts)
for idx in range(1, 11):
_name = f"extra-{idx}"
mount_target = f"/media/{_name}"
if mount_target not in parts["mounted"].values():
name = _name
print(name)
break
if name is None:
return error("cannot determine mount target, too many mounts?")
if ".." in device or "/" in device or name == "nextcloud":
return error("invalid device")
if ".." in name or "/" in name:
return error("invalid name")
mount_target = f"/media/{name}"
mount_device = None
for avail in parts["available"]:
if Path(avail).name == device:
mount_device = avail
if not mount_device:
return error("device to mount not found")
if mount_device == parts["main"]:
return error("will not mount main data partition")
if mount_device in parts["mounted"]:
return error("already mounted")
if mount_target in parts["mounted"].values():
return error(f"target {mount_target} has been already mounted")
if not os.path.exists(mount_target):
os.makedirs(mount_target)
cr = CommandRunner([MOUNT_BIN, mount_device, mount_target], block=True)
if cr.returncode == 0:
return success("Mounting successful", data=cr.output)
else:
cr.log_output()
return error("Failed mounting, check logs...")
@app.route("/storage/umount/<name>")
@requires_auth
def umount_storage(name):
if ".." in name or "/" in name or name == "nextcloud":
return error("invalid name")
mount_target = f"/media/{name}"
parts = get_partitions()
if name == "nextcloud":
return error("will not umount main data partition")
if mount_target not in parts["mounted"].values():
return error("not mounted")
cr = CommandRunner([UMOUNT_BIN, mount_target], block=True)
return success("Unmounting successful", data=cr.output)
def check_for_backup_process():
global backup_proc
out = dict(cfg["config"])
if backup_proc is None:
out["running"] = False
return out
assert isinstance(backup_proc, CommandRunner)
backup_proc.get_new_output()
if backup_proc.finished:
if backup_proc.returncode == 0:
backup_proc.parsed["state"] = "finished"
cfg["config"]["last_" + backup_proc.user_info] = backup_proc.started
cfg.save()
out["last_" + backup_proc.user_info] = backup_proc.started
log.info("backup/restore process finished successfully")
else:
backup_proc.parsed["state"] = "failed: " + backup_proc.parsed.get("unable", "")
if "target" in backup_proc.parsed:
if os.path.exists(backup_proc.parsed["target"]):
shutil.rmtree(backup_proc.parsed["target"])
log.error("backup/restore process failed, logging output: ")
for line in backup_proc.output[-30:]:
log.error(line.replace("\n", ""))
out.update(dict(backup_proc.parsed))
out["returncode"] = backup_proc.returncode
out["running"] = backup_proc.running
out["what"] = backup_proc.user_info
if backup_proc.finished:
backup_proc = None
return out
@app.route("/backup")
@requires_auth
def backup():
data = dict(cfg["config"])
data["operation"] = check_for_backup_process()
data["found"] = []
if get_partitions()["backup"] is not None:
for name in os.listdir("/media/backup"):
p = Path("/media/backup") / name
try:
size = (p / "size").open().read().strip().split()[0]
except FileNotFoundError:
continue
data["found"].append({
"name": name,
"created": p.stat().st_ctime,
"size": size
})
data["found"].sort(key=lambda x: x["created"], reverse=True)
return success(data=data)
#@app.route("/backup/cancel")
#def backup_cancel(name):
# global backup_proc
#
# subprocess.check_call(["killall", "nextcloud-nextbox.export"])
# #subprocess.check_call(["killall", "nextcloud-nextbox.import"])
#
# pass
@app.route("/backup/start")
@requires_auth
def backup_start():
global backup_proc
backup_info = check_for_backup_process()
parts = get_partitions()
if backup_info["running"]:
return error("backup/restore operation already running", data=backup_info)
if not parts["backup"]:
return error("no 'backup' storage mounted")
backup_proc = CommandRunner([BACKUP_EXPORT_BIN],
cb_parse=parse_backup_line, block=False)
backup_proc.user_info = "backup"
return success("backup started", data=backup_info)
@app.route("/backup/restore/<name>")
@requires_auth
def backup_restore(name):
global backup_proc
backup_info = check_for_backup_process()
if ".." in name or "/" in name:
return error("invalid name", data=backup_info)
if backup_info["running"]:
return error("backup/restore operation already running", data=backup_info)
directory = f"/media/backup/{name}"
backup_proc = CommandRunner([BACKUP_IMPORT_BIN, directory],
cb_parse=parse_backup_line, block=False)
backup_proc.user_info = "restore"
return success("restore started", data=backup_info)
@app.route("/service/<name>/<operation>")
@requires_auth
def service_operation(name, operation):
if name not in ["ddclient", "nextbox-daemon"]:
return error("not allowed")
if operation not in ["start", "restart", "status", "is-active"]:
return error("not allowed")
if name == "ddclient":
cr = CommandRunner([SYSTEMCTL_BIN, operation, DDCLIENT_SERVICE], block=True)
elif name == "nextbox-daemon":
cr = CommandRunner([SYSTEMCTL_BIN, operation, NEXTBOX_SERVICE], block=True)
else:
return error("not allowed")
output = [x for x in cr.output if x]
return success(data={
"service": name,
"operation": operation,
"return-code": cr.returncode,
"output": output
})
@app.route("/config", methods=["POST", "GET"])
@requires_auth
def handle_config():
if request.method == "GET":
data = dict(cfg["config"])
data["conf"] = Path(DDCLIENT_CONFIG_PATH).read_text("utf-8").split("\n")
return success(data=data)
# save dyndns related values to configuration
elif request.method == "POST":
for key in request.form:
val = request.form.get(key)
if key == "conf":
old_conf = Path(DDCLIENT_CONFIG_PATH).read_text("utf-8")
if old_conf != val:
log.info("writing ddclient config and restarting service")
Path(DDCLIENT_CONFIG_PATH).write_text(val, "utf-8")
service_operation("ddclient", "restart")
elif key in AVAIL_CONFIGS and val is not None:
if key == "dns_mode" and val not in DYNDNS_MODES:
log.warning(f"key: 'dns_mode' has invalid value: {val} - skipping")
continue
elif key == "domain":
job_queue.put("TrustedDomains")
elif val is None:
log.debug(f"skipping key: '{key}' -> no value provided")
continue
if val.lower() in ["true", "false"]:
val = val.lower() == "true"
cfg["config"][key] = val
log.debug(f"saving key: '{key}' with value: '{val}'")
cfg.save()
return success("DynDNS configuration saved")
@app.route("/dyndns/captcha", methods=["POST"])
@requires_auth
def dyndns_captcha():
req = urllib.request.Request(DYNDNS_DESEC_CAPTCHA, method="POST")
data = urllib.request.urlopen(req).read().decode("utf-8")
return success(data=json.loads(data))
@app.route("/dyndns/register", methods=["POST"])
@requires_auth
def dyndns_register():
data = {}
for key in request.form:
if key == "captcha_id":
data.setdefault("captcha", {})["id"] = request.form.get(key)
elif key == "captcha":
data.setdefault("captcha", {})["solution"] = request.form.get(key)
elif key in ["domain", "email"]:
data[key] = request.form.get(key)
data["password"] = None
headers = {"Content-Type": "application/json"}
req = urllib.request.Request(DYNDNS_DESEC_REGISTER,
method="POST", data=json.dumps(data).encode("utf-8"), headers=headers)
try:
res = urllib.request.urlopen(req).read().decode("utf-8")
except urllib.error.HTTPError as e:
desc = e.read()
return error(f"Could not complete registration", data=json.loads(desc))
return success(data=json.loads(res))
@app.route("/dyndns/test/ddclient")
@requires_auth
def test_ddclient():
cr = CommandRunner([DDCLIENT_BIN, "-verbose", "-foreground", "-force"], block=True)
cr.log_output()
for line in cr.output:
if "SUCCESS:" in line:
return success("DDClient test: OK")
if "Request was throttled" in line:
pat = "available in ([0-9]*) seconds"
try:
waitfor = int(re.search(pat, line).groups()[0]) + 5
except:
waitfor = 10
return error("DDClient test: Not OK",
data={"reason": "throttled", "waitfor": waitfor})
return error("DDClient test: Not OK", data={"reason": "unknown"})
@app.route("/dyndns/test/resolve/ipv6")
@app.route("/dyndns/test/resolve/ipv4")
@requires_auth
def test_resolve4():
ip_type = request.path.split("/")[-1]
domain = cfg["config"]["domain"]
resolve_ip = None
ext_ip = None
# to resolve un-cachedx
# we first flush all dns-related caches
CommandRunner([SYSTEMD_RESOLVE_BIN, "--flush-cache"], block=True)
CommandRunner([SYSTEMD_RESOLVE_BIN, "--reset-server-features"], block=True)
# resolving according to ip_type
try:
if ip_type == "ipv4":
resolve_ip = socket.gethostbyname(domain)
else:
resolve_ip = socket.getaddrinfo(domain, None, socket.AF_INET6)[0][-1][0]
except (socket.gaierror, IndexError) as e:
log.error(f"Could not resolve {ip_type}: {domain}")
log.error(f"Exception: {repr(e)}")
try:
url = GET_EXT_IP4_URL if ip_type == "ipv4" else GET_EXT_IP6_URL
ext_ip = urllib.request.urlopen(url).read().decode("utf-8")
except urllib.error.URLError as e:
log.error(f"Could not determine own {ip_type}")
log.error(f"Exception: {repr(e)}")
log.info(f"resolving '{domain}' to IP: {resolve_ip}, external IP: {ext_ip}")
data = {"ip": ext_ip, "resolve_ip": resolve_ip}
# if not both "resolve" and "getip" are successful, we have failed
if resolve_ip is None or ext_ip is None:
log.error(f"failed resolving and/or getting external {ip_type}")
return error("Resolve test: Not OK", data=data)
# resolving to wrong ip
if resolve_ip != ext_ip:
log.warning(f"Resolved {ip_type} does not match external {ip_type}")
log.warning("This might indicate a bad DynDNS configuration")
return error("Resolve test: Not OK", data=data)
# all good!
return success("Resolve test: OK", data=data)
| @app.route("/dyndns/test/proxy")
@requires_auth
def test_http():
what = request.path.split("/")[-1]
if what == "proxy":
domain = cfg["config"]["proxy_domain"]
what = "https"
else:
domain = cfg["config"]["domain"]
url = f"{what}://{domain}"
try:
content = urllib.request.urlopen(url).read().decode("utf-8")
except urllib.error.URLError as e:
return error(f"Domain ({what}) test: Not OK",
data={"exc": repr(e)})
except ssl.CertificateError as e:
# this very likely is due to a bad certificate, disabling https
# @TODO: handle this case in frontend
return error(f"Domain ({what}) test: Not OK - Certificate Error",
data={"reason": "cert", "exc": repr(e)})
if "Nextcloud" in content:
return success(f"Domain ({what}) test: OK")
else:
return error(f"Domain ({what}) test: Not OK",
data={"exc": "none", "reason": "no Nextcloud in 'content'"})
@app.route("/dyndns/upnp")
@requires_auth
def setup_upnp():
import netifaces
import upnpclient
# get gateway ip
gw_ip = list(netifaces.gateways()['default'].values())[0][0]
# get devices (long operation)
devs = upnpclient.discover(timeout=0.1)
device = None
# filter out gateway
for dev in devs:
if dev._url_base.startswith(f"http://{gw_ip}"):
device = dev
break
if device is None:
return error("cannot find upnp-capable router")
# check for needed service
service = None
for srv in device.services:
if srv.name == "WANIPConn1":
service = srv
break
if service is None:
return error("found upnp-capable router - but w/o the needed service")
eth_ip = local_ip()
http_args = dict(NewRemoteHost='0.0.0.0', NewExternalPort=80,
NewProtocol='TCP', NewInternalPort=80, NewInternalClient=eth_ip,
NewEnabled='1', NewPortMappingDescription='NextBox - HTTP', NewLeaseDuration=0)
https_args = dict(NewRemoteHost='0.0.0.0', NewExternalPort=443,
NewProtocol='TCP', NewInternalPort=443, NewInternalClient=eth_ip,
NewEnabled='1', NewPortMappingDescription='NextBox - HTTPS',
NewLeaseDuration=0)
service.AddPortMapping(**http_args)
service.AddPortMapping(**https_args)
try:
service.GetSpecificPortMappingEntry(**http_args)
service.GetSpecificPortMappingEntry(**https_args)
except upnpclient.soap.SOAPError as e:
return error("failed setting up port-forwarding")
return success("port-forwarding successfully set up")
@app.route("/https/enable", methods=["POST"])
@requires_auth
def https_enable():
cleanup_certs()
domain = cfg.get("config", {}).get("domain")
email = cfg.get("config", {}).get("email")
if not domain or not email:
return error(f"failed, domain: '{domain}' email: '{email}'")
cmd = [ENABLE_HTTPS_BIN, "lets-encrypt", email, domain]
cr = CommandRunner(cmd, block=True)
cr.log_output()
cfg["config"]["https_port"] = 443
cfg.save()
return success("HTTPS enabled")
@app.route("/https/disable", methods=["POST"])
@requires_auth
def https_disable():
cmd = [DISABLE_HTTPS_BIN]
cr = CommandRunner(cmd, block=True)
cr.log_output()
cfg["config"]["https_port"] = None
cfg.save()
cleanup_certs()
return success("HTTPS disabled")
def signal_handler(signal, frame):
print("Exit handler, delivering worker exit job now")
job_queue.put("exit")
w.join()
print("Joined worker - exiting now...")
sys.exit(1)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
job_mgr = JobManager(cfg)
job_mgr.register_job(TrustedDomainsJob)
job_mgr.register_job(ProxySSHJob)
job_mgr.register_job(UpdateJob)
job_queue = Queue()
w = Worker(job_queue, job_mgr)
w.start()
app.run(host="0.0.0.0", port=18585, debug=True, threaded=True, processes=1, use_reloader=False)
# cat /sys/class/thermal/thermal_zone0/temp | @app.route("/dyndns/test/http")
@app.route("/dyndns/test/https") | random_line_split |
Исследование_1705.py | # ## Определения
from operator import itemgetter
import bz2, json
from collections import defaultdict
import math
from bisect import bisect_left
WORD_LEN_COEFF = 1
THRESHOLD_COEFF = 0.5
DROP = 1.5
DROP_1 = 2
AFFIX_LEN = 1
THRESHOLD_OSTAT = 0.5
N = 5 # шлейфовый порог
trie, voc, words, prob, word_count, average_word_len = None, None, None, None, None, None
prefix_trie, informants, len_search = None, None, None
def main():
global trie, prefix_trie, words, voc, prob, average_word_len, informants, len_search
print("Загружаю словарь...", end='')
voc = load_voc()
words = sorted(list(voc.keys()))
word_count = sum([voc[k] for k in voc])
average_word_len = sum([len(w) * voc[w] for w in words]) / word_count
len_search = int(average_word_len * WORD_LEN_COEFF) # это максимальная разрешенная длина аффикса
print("{} словоформ, {} словоупотреблений, средняя длина слова {} ".format(len(words), word_count, average_word_len))
# загрузка безусловных вероятностей букв и деревьев
print("Загружаю деревья...", end='')
prob = json.load(open("prob.json", encoding="utf-8"))
strie = bz2.BZ2File('trie.json.bz2', 'r').read().decode(encoding='utf-8')
trie = json.loads(strie)
del strie
prefix_trie = json.load(open("prefix_trie.json"))
print(', ok')
print("Безусловные вероятности первых 10 букв:\n========================")
print(", ".join(map(lambda pair: "{}: {:.4f}".format(*pair),
sorted([(letter, nv) for letter, nv in prob.items()],
key=itemgetter(1),
reverse=True)[:10])))
print("Подсчитываю условные вероятности букв...", end='')
cond_prob = build_cond_prob(voc, prob, len_search)
print(', ok')
# информанты - это буквы с макс значением КФ в каждой позиции
informants = find_informants(prob, cond_prob, len_search)
print("ИНФОРМАНТЫ:\n===================")
print(informants)
def load_voc():
# Загрузить словарь количеств из файла.
# Словарь содержит частоты слов в виде {слово: число вхождений в корпус, ... },
# например {"көппөҕү" : 4, "хазар" : 3, ...}
#
# корпус в формате txt занимет 174 МБ, словарь частот в json 10,4МБ,
# после сжатия в формат .bz2 1,7 МБ
svoc = bz2.BZ2File('voc.json.bz2', 'r').read().decode(encoding='utf-8')
voc = json.loads(svoc)
del svoc
return voc
def build_trie_and_prob(voc):
# подсчитываем частоты букв и строим дерево оконочаний
prob = defaultdict(lambda: 0)
trie = {'n': 0}
for w, n in voc.items(): # для каждого слова в списке
word = w[::-1] # переворачиваем слово, читаем слово с конца
current_dict = trie
trie['n'] += n
for letter in word: # для буквы в слове
prob[letter] += n
current_dict = current_dict.setdefault(letter, {'n': 0}) # получить значение из словаря по ключу.
# Автоматически добавляет элемент словаря, если он отсутствует.
current_dict['n'] += n
current_dict['#'] = n
total = sum([n for n in prob.values()]) # 84263863
for k, v in prob.items():
prob[k] = v / total
return trie, prob
def build_cond_prob(voc, prob, len_search):
letters = list(prob.keys())
cond_prob = defaultdict(lambda: 0) # словарь для условных вероятностей
total = defaultdict(lambda: 0)
for word, n in voc.items(): # для слова в словаре
positions = range(-min(len_search, len(word) - 2), 0) # from -7 to 0
for i in positions:
cond_prob[(i, word[i])] += n
total[i] += n # dictionary with prob of char words?
for posChar in cond_prob: # получаем из частот вероятности
i = posChar[0]
cond_prob[posChar] /= total[i]
return cond_prob
def find_informants(prob, cond_prob, len_search):
max_cond = defaultdict(lambda: 0.0)
maxlet = [''] * 8
# для каждой позиции ищем букву с наибольшим значением условной вероятности,
for posChar in cond_prob: # цикл по позициям букв в условной вероятности
aff_len = posChar[0]
if cond_prob[posChar] > max_cond[aff_len]:
max_cond[aff_len] = cond_prob[posChar]
maxlet[-aff_len] = posChar[1]
print("Наиболее частые буквы по позициям:\n============================\n", maxlet[-1:0:-1], "\n")
print("Максимальные вероятности по позициям:\n============================\n", max_cond, "\n")
# порог медиального разбиения - половина условной вероятности , буквы с УВ не меньше порога - верхнее подмножеств
cond_prob_sup = {}
for posChar in cond_prob:
i = posChar[0]
if cond_prob[posChar] > THRESHOLD_COEFF * max_cond[i]:
cond_prob_sup[posChar] = cond_prob[posChar]
# КФ = условная вер по данной позиции / безусл вероятность
cf = {}
for posChar in cond_prob_sup:
char = posChar[1]
cf[posChar] = cond_prob_sup[posChar] / prob[char]
print("КФ для верхних подмножества:\n====================\n");
for aff_len in set(map(itemgetter(0), cf.keys())):
print(aff_len, "**")
for k, v in cf.items():
if k[0] == aff_len:
print(k[1], "{:.4f}".format(v), end=" ")
print("")
# информанты - это буквы с макс значением КФ в каждой позиции
informants = []
for aff_len in range(-len_search, 0):
kmax = max({k for k in cf if k[0] == aff_len}, key=lambda k: cf[k])
informants.append((kmax[1], aff_len, cf[kmax]))
informants.sort(key=itemgetter(2), reverse=True)
return informants
def extend_right(char, pos, cf):
if pos == -1: # если информант в последней позиции, то расширять некуда
return char # возвращаем информант как аффикс
d = defaultdict(int)
for w, n in voc.items(): # для буквы и частоты в словаре
if w[pos:pos + 1] == char: # если буква в позиции равна нашей, то посчитаем это окончание
d[w[pos + 1:]] += n
return char + max(d.keys(), key=lambda end: d[end]) # прибавляем к информанту самое частое окончание
def extend_left(affix, trie, len_search):
# расширяем аффикс влево используя trie
current_dict = trie
for ch in affix[::-1]:
current_dict = current_dict[ch]
aff_len = len(affix)
"""
Для поиска буквы слева:
идем по дереву trie
по две самые частотные буквы делим друг на друга, при мере перепада большей 1.5 прибавляем к информанту более частую из них.
Иначе начинаем рассматривать по две самые частотные буквы/на следующие две,
если мера перепада в одной из них больше двух, то из данной пары берем более частотную и прибавляем ее к аффиксу.
"""
# пока позиция символа в слове больше разрешенной длины аффикса
while aff_len < len_search:
# составляем список всех букв предшествующих аффиксу с количествами
L = [(l, current_dict[l]["n"]) for l in current_dict.keys() if l not in '#n']
# сортируем по количествам
L.sort(key=itemgetter(1), reverse=True)
# if affix=='нан':
# import pdb
# pdb.set_trace()
ch = L[0][0]
if L[0][1] > DROP * L[1][1]:
affix = ch + affix
current_dict = current_dict[ch]
else:
if (L[0][1] + L[1][1]) / (L[2][1] + L[3][1]) > 2.:
affix = ch + affix
current_dict = current_dict[ch]
else:
break
aff_len += 1
return affix
# узел trie, соответствующий окончанию aff
def affix_node(aff):
global trie
current_node = trie
for char in aff[::-1]:
current_node = current_node[char]
return current_node
# узел trie, соответствующий префиксу prf
def prefix_node(prf):
global prefix_trie
current_node = prefix_trie
for char in prf:
current_node = current_node[char]
return current_node
# рекурсивно возвращает все основы, растущие из данного узла
def word_dfs(node, ending=''):
result = [ending] if '#' in node else []
for ch in node:
if ch in ['#', 'n']: continue
result += word_dfs(node[ch], ch + ending)
return result
def num_prefix(prf):
return prefix_node(prf)['n']
# все основы, растущие из данного узла
def bases_with_affix(aff):
global prefix_trie
return sorted([b for b in word_dfs(affix_node(aff)) if len(b) > 2 and voc[b + aff] > 1 or num_prefix(b) < 100])
# суммарная встречаемость основы b с любыми остатками
def build_freq_bases(b):
freq = 0
for w in words[bisect_left(words, b):]:
if not w.startswith(b): break
freq += voc[w]
return freq
def build_ost(bases):
global words, voc
ostat = defaultdict(int)
for i,b in enumerate(bases):
affix_pos = len(b)
for w in words[bisect_left(words, b):]:
if not w.startswith(b): break
if not w[affix_pos:] in affix:
ostat[w[affix_pos:]] += voc[w] # вариант с подсчетом словоупотреблений
# ostat[w[affix_pos:]] += 1 # вариант с подсчетом словоформ
return ostat
def fast_alg(bases, specter, freq_bases, step, del_aff):
max_ost_val = max(specter.values())
# те пары к в у которых к больше макс
inf_zveno = {ost: v for ost, v in specter.items() if v > max_ost_val * 0.5}
print("Звено: ", inf_zveno)
# дольше нужна сочетаемость с некоторой группой контрольных основ
# верхнее подмножество баз очередного вида
next_base_freq = {}
max_nb_freq = 0
freq_cur_bases = {b: sum([voc.get(b + ost, 0) for ost in specter]) for b in bases}
max_freq_cur = max(freq_cur_bases.values())
print("Макс частотность базы:", max_freq_cur)
# верхнее подмножество баз очередного вида
control_bases = [b for b, freq in freq_cur_bases.items() if freq >= max_freq_cur / 3]
if len(control_bases) == 1:
lower = [(b, freq) for b, freq in freq_cur_bases.items() if freq < max_freq_cur]
control_bases.append(max(lower, key=itemgetter(1))[0])
print("Контрольные базы:", control_bases)
# Первый критерий принадлежности к парадигме - сочетаемость остатков в звене с основами control_bases
keep_ost = [ost for ost in inf_zveno if all([b + ost in voc for b in control_bases])]
removed_ost = [ost for ost in inf_zveno if ost not in keep_ost]
print("!!Удалены из звена:", removed_ost)
print("Остаются в звене:", keep_ost)
next_bases = [b for b in bases if all([b + aff in voc for aff in keep_ost]) and
freq_cur_bases[b] > step]
if removed_ost:
del_aff += destiny_of_affix(removed_ost, next_bases, voc)
for x in del_aff:
del specter[x]
return keep_ost
def destiny_of_affix(removed_ost, next_bases, voc):
# проверка на меру децентрации
# если >=1/2 синтагматической вероятности падает на парадигматически малую(0,1) часть баз - то аффикс искл из парадигмы до конца рассм
# иначе - аффикс выводится из звена, но сохраняется в спектре остатков
removed_aff = []
for aff in removed_ost:
freq_b = sorted([(base, voc.get(base + aff, 0)) for base in next_bases], key=itemgetter(1), reverse=True)
L = len(freq_b) // 10
S = sum(map(itemgetter(1), freq_b))
if sum(map(itemgetter(1), freq_b[:L])) >= 1 / 2 * S:
removed_aff.append(aff)
return removed_aff
# проверка сочетаемости синтагматической аероятности аффикса с количеством оставшихся после групповой редукции баз, принимающих данный аффикс
def direct_alg(bases, specter, false_affixes):
global prob, voc
# верхнее подмножество остатков текущего вида
m = max(specter.values()) * THRESHOLD_OSTAT
upper_set = {ost: val for ost, val in specter.items() if val > m}
if not [ost for ost in upper_set if ost not in false_affixes]:
sp_list = sorted([(ost, val) for ost, val in specter.items()], key=itemgetter(1), reverse=True)
for ost, val in sp_list:
if ost not in false_affixes:
break
upper_set[ost] = val
print("Верхнее подмножество остатков текущего вида,", len(upper_set), "шт.")
# ВЫЧИСЛИТЬ незав для остатков из upper_set
nv = {}
summ_kol = 0
for ost, kol in specter.items(): # ostat - defdict
summ_kol += kol
nezav_ver = 1
for ch in ost:
nezav_ver *= prob[ch]
nv[ost] = nezav_ver
# усл вероятности
uv = {}
for ost, kol in specter.items():
uv[ost] = kol / summ_kol
# КФ - отношение условной вероятности к безусловной
corr_func = {}
for ost in upper_set:
corr_func[ost] = uv[ost] / nv[ost]
corr_func = [(ost, cf) for ost, cf in corr_func.items() if ost not in false_affixes]
corr_func = sorted(corr_func, key=itemgetter(1), reverse=True)
print("Коррелятивная функция: ", repr(corr_func)[:70])
if not corr_func: # суперпороговые редукции исчерпали спектр остатков
print("Остались только ложные остатки ")
return []
# найти след информант
informant = corr_func[0][0]
print("Аффикс-кандидат (информант):", informant)
return [informant]
def check_agglut_part():
return 0
def build_class(bootstrap_affix):
global words, average_word_len, THRESHOLD_OSTAT, thres_reduction, false_affixes, specter
k = 10 ** (math.log(average_word_len, 10) / (1 + 0.02 * math.log(len(voc), 10)))
# коэффициент редукции
print("Поправочный коэффициент:", k)
thres_reduction = 1 / average_word_len # порог редукции
print("Порог редукции:", thres_reduction)
affixes = [bootstrap_affix] # найденные аффиксы парадигмы
false_affixes = [] # список отвергнутых аффиксов, давших ложный шаг
bases = [bases_with_affix(bootstrap_affix)]
specter = [build_ost(bases[0])]
freq_bases = {b: build_freq_bases(b) for b in bases[0]}
step = 1
fast = False
while True:
print("\n*** шаг", step)
print("Аффиксы парадигмы:", affixes)
print("Основы {}-го вида: {} шт.".format(step, len(bases[-1])))
print("Спектр остатков {}-го вида: {} шт.".format(step, len(specter[-1])))
if not specter[-1]: # исчерпаны все остатки в спектре
print("Исчерпаны все остатки в спектре")
break
if not fast: | print("* Ускоренный ход!!! *")
del_aff = []
next_affixes = fast_alg(bases[-1], specter[-1], freq_bases, step, del_aff)
if not next_affixes:
fast = False
continue
# основы следующего вида
next_bases = [b for b in bases[-1] if all([b + aff in voc for aff in next_affixes]) and
freq_bases[b] > step]
# Поправочный коэффициент
# увеличивается во столько раз сколько аффиксов было сохранено в звене
K = k * len(next_affixes)
# Мера редукции
# доля основ текущего вида, не принимающих остатки следующего вида
N = len(bases[-1])
reduction = (N - len(next_bases)) / (K * N)
print("Мера редукции: ", reduction)
if reduction > thres_reduction: # суперпороговая редукция
false_affixes += next_affixes
print("ОТВЕРГАЕТСЯ! Суперпороговая редукция, ложные остатки", false_affixes)
if len(false_affixes) > average_word_len:
print("Cуперпороговая редукция повторяется большее число раз, чем средняя длина словоформы")
break
fast = False
else:
print("ПРИНИМАЕТСЯ!")
step += 1
false_affixes = []
affixes += next_affixes
# спектр остатков следующего вида
next_specter = {ost: sum([voc.get(b + ost, 0) for b in next_bases]) for ost in specter[-1]}
next_specter = {ost: v for ost, v in next_specter.items() if v > 0 and ost not in next_affixes}
bases.append(next_bases)
specter.append(next_specter)
if (len(next_bases) <= 2):
print("Остались две базы очередного вида")
break
if reduction < thres_reduction / 5: # если редукция < порога редукции/2(порог устойчивости)
fast = True
else:
fast = False
return bases[-1], affixes
main()
classes = []
for affix in informants:
# отправной аффикс начинаем строить с информанта имеющего max КФ
bootstrap_affix = extend_right(*affix)
bootstrap_affix = extend_left(bootstrap_affix, trie, len_search)
print(bootstrap_affix)
if any([c['aff'][0]==bootstrap_affix for c in classes]):
print("Аффикс уже обработан!")
continue
else:
print("КЛАСС", len(classes)+1, "\nОТПРАВНОЙ АФФИКС:", bootstrap_affix, "\n============================")
bases, affixes = build_class(bootstrap_affix)
while True:
first_letter = affixes[0][0]
if all([aff.startswith(first_letter) for aff in affixes]):
for i, b in enumerate(bases):
bases[i] = b + first_letter
for i, aff in enumerate(affixes):
affixes[i] = aff[1:]
else:
break
print("Основы: {} шт. {}".format(len(bases), bases))
print("Аффиксы: {} шт. {}".format(len(affixes), affixes))
classes.append({'b': bases, 'aff': affixes}) | print("* Прямой ход * ")
next_affixes = direct_alg(bases[-1], specter[-1], false_affixes)
if not next_affixes:
break
else: | random_line_split |
Исследование_1705.py | # ## Определения
from operator import itemgetter
import bz2, json
from collections import defaultdict
import math
from bisect import bisect_left
WORD_LEN_COEFF = 1
THRESHOLD_COEFF = 0.5
DROP = 1.5
DROP_1 = 2
AFFIX_LEN = 1
THRESHOLD_OSTAT = 0.5
N = 5 # шлейфовый порог
trie, voc, words, prob, word_count, average_word_len = None, None, None, None, None, None
prefix_trie, informants, len_search = None, None, None
def main():
global trie, prefix_trie, words, voc, prob, average_word_len, informants, len_search
print("Загружаю словарь...", end='')
voc = load_voc()
words = sorted(list(voc.keys()))
word_count = sum([voc[k] for k in voc])
average_word_len = sum([len(w) * voc[w] for w in words]) / word_count
len_search = int(average_word_len * WORD_LEN_COEFF) # это максимальная разрешенная длина аффикса
print("{} словоформ, {} словоупотреблений, средняя длина слова {} ".format(len(words), word_count, average_word_len))
# загрузка безусловных вероятностей букв и деревьев
print("Загружаю деревья...", end='')
prob = json.load(open("prob.json", encoding="utf-8"))
strie = bz2.BZ2File('trie.json.bz2', 'r').read().decode(encoding='utf-8')
trie = json.loads(strie)
del strie
prefix_trie = json.load(open("prefix_trie.json"))
print(', ok')
print("Безусловные вероятности первых 10 букв:\n========================")
print(", ".join(map(lambda pair: "{}: {:.4f}".format(*pair),
sorted([(letter, nv) for letter, nv in prob.items()],
key=itemgetter(1),
reverse=True)[:10])))
print("Подсчитываю условные вероятности букв...", end='')
cond_prob = build_cond_prob(voc, prob, len_search)
print(', ok')
# информанты - это буквы с макс значением КФ в каждой позиции
informants = find_informants(prob, cond_prob, len_search)
print("ИНФОРМАНТЫ:\n===================")
print(informants)
def load_voc():
# Загрузить словарь количеств из файла.
# Словарь содержит частоты слов в виде {слово: число вхождений в корпус, ... },
# например {"көппөҕү" : 4, "хазар" : 3, ...}
#
# корпус в формате txt занимет 174 МБ, словарь частот в json 10,4МБ,
# после сжатия в формат .bz2 1,7 МБ
svoc = bz2.BZ2File('voc.json.bz2', 'r').read().decode(encoding='utf-8')
voc = json.loads(svoc)
del svoc
return voc
def build_trie_and_prob(voc):
# подсчитываем частоты букв и строим дерево оконочаний
prob = defaultdict(lambda: 0)
trie = {'n': 0}
for w, n in voc.items(): # для каждого слова в списке
word = w[::-1] # переворачиваем слово, читаем слово с конца
current_dict = trie
trie['n'] += n
for letter in word: # для буквы в слове
prob[letter] += n
current_dict = current_dict.setdefault(letter, {'n': 0}) # получить значение из словаря по ключу.
# Автоматически добавляет элемент словаря, если он отсутствует.
current_dict['n'] += n
current_dict['#'] = n
total = sum([n for n in prob.values()]) # 84263863
for k, v in prob.items():
prob[k] = v / total
return trie, prob
def build_cond_prob(voc, prob, len_search):
letters = list(prob.keys())
cond_prob = defaultdict(lambda: 0) # словарь для условных вероятностей
total = defaultdict(lambda: 0)
for word, n in voc.items(): # для слова в словаре
positions = range(-min(len_search, len(word) - 2), 0) # from -7 to 0
for i in positions:
cond_prob[(i, word[i])] += n
total[i] += n # dictionary with prob of char words?
for posChar in cond_prob: # получаем из частот вероятности
i = posChar[0]
cond_prob[posChar] /= total[i]
return cond_prob
def find_informants(prob, cond_prob, len_search):
max_cond = defaultdict(lambda: 0.0)
maxlet = [''] * 8
# для каждой позиции ищем букву с наибольшим значением условной вероятности,
for posChar in cond_prob: # цикл по позициям букв в условной вероятности
aff_len = posChar[0]
if cond_prob[posChar] > max_cond[aff_len]:
max_cond[aff_len] = cond_prob[posChar]
maxlet[-aff_len] = posChar[1]
print("Наиболее частые буквы по позициям:\n============================\n", maxlet[-1:0:-1], "\n")
print("Максимальные вероятности по позициям:\n============================\n", max_cond, "\n")
# порог медиального разбиения - половина условной вероятности , буквы с УВ не меньше порога - верхнее подмножеств
cond_prob_sup = {}
for posChar in cond_prob:
i = posChar[0]
if cond_prob[posChar] > THRESHOLD_COEFF * max_cond[i]:
cond_prob_sup[posChar] = cond_prob[posChar]
# КФ = условная вер по данной позиции / безусл вероятность
cf = {}
for posChar in cond_prob_sup:
char = posChar[1]
cf[posChar] = cond_prob_sup[posChar] / prob[char]
print("КФ для верхних подмножества:\n====================\n");
for aff_len in set(map(itemgetter(0), cf.keys())):
print(aff_len, "**")
for k, v in cf.items():
if k[0] == aff_len:
print(k[1], "{:.4f}".format(v), end=" ")
print("")
# информанты - это буквы с макс значением КФ в каждой позиции
informants = []
for aff_len in range(-len_search, 0):
kmax = max({k for k in cf if k[0] == aff_len}, key=lambda k: cf[k])
informants.append((kmax[1], aff_len, cf[kmax]))
informants.sort(key=itemgetter(2), reverse=True)
return informants
def extend_right(char, pos, cf):
if pos == -1: # если информант в последней позиции, то расширять некуда
return char # возвращаем информант как аффикс
d = defaultdict(int)
for w, n in voc.items(): # для буквы и частоты в словаре
if w[pos:pos + 1] == char: # если буква в позиции равна нашей, то посчитаем это окончание
d[w[pos + 1:]] += n
return char + max(d.keys(), key=lambda end: d[end]) # прибавляем к информанту самое частое окончание
def extend_left(affix, trie, len_search):
# расширяем аффикс влево используя trie
current_dict = trie
for ch in affix[::-1]:
current_dict = current_dict[ch]
aff_len = len(affix)
"""
Для поиска буквы слева:
идем по дереву trie
по две самые частотные буквы делим друг на друга, при мере перепада большей 1.5 прибавляем к информанту более частую из них.
Иначе начинаем рассматривать по две самые частотные буквы/на следующие две,
если мера перепада в одной из них больше двух, то из данной пары берем более частотную и прибавляем ее к аффиксу.
"""
# пока позиция символа в слове больше разрешенной длины аффикса
while aff_len < len_search:
# составляем список всех букв предшествующих аффиксу с количествами
L = [(l, current_dict[l]["n"]) for l in current_dict.keys() if l not in '#n']
# сортируем по количествам
L.sort(key=itemgetter(1), reverse=True)
# if affix=='нан':
# import pdb
# pdb.set_trace()
ch = L[0][0]
if L[0][1] > DROP * L[1][1]:
affix = ch + affix
current_dict = current_dict[ch]
else:
if (L[0][1] + L[1][1]) / (L[2][1] + L[3][1]) > 2.:
affix = ch + affix
current_dict = current_dict[ch]
else:
break
aff_len += 1
return affix
# узел trie, соответствующий окончанию aff
def affix_node(aff):
global trie
current_node = trie
for char in aff[::-1]:
current_node = current_node[char]
return current_node
# узел trie, соответствующий префиксу prf
def prefix_node(prf):
global prefix_trie
current_node = prefix_trie
for char in prf:
current_node = current_node[char]
return current_node
# рекурсивно возвращает все основы, растущие из данного узла
def word_dfs(node, ending=''):
result = [ending] if '#' in node else []
for ch in node:
if ch in ['#', 'n']: continue
result += word_dfs(node[ch], ch + ending)
return result
def num_prefix(prf):
return prefix_node(prf)['n']
# все основы, растущие из данного узла
def bases_with_affix(aff):
global prefix_trie
return sorted([b for b in word_dfs(affix_node(aff)) if len(b) > 2 and voc[b + aff] > 1 or num_prefix(b) < 100])
# суммарная встречаемость основы b с любыми остатками
def build_freq_bases(b):
freq = 0
for w in words[bisect_left(words, b):]:
if not w.startswith(b): break
freq += voc[w]
return freq
def build_ost(bases):
global words, voc
ostat = defaultdict(int)
for i,b in enumerate(bases):
affix_pos = len(b)
for w in words[bisect_left(words, b):]:
if not w.startswith(b): break
if not w[affix_pos:] in affix:
ostat[w[affix_pos:]] += voc[w] # вариант с подсчетом словоупотреблений
# ostat[w[affix_pos:]] += 1 # вариант с подсчетом словоформ
return ostat
def fast_alg(bases, specter, freq_bases, step, del_aff):
max_ost_val = max(specter.values())
# те пары к в у которых к больше макс
inf_zveno = {ost: v for ost, v in specter.items() if v > max_ost_val * 0.5}
print("Звено: ", inf_zveno)
# дольше нужна сочетаемость с некоторой группой контрольных основ
# верхнее подмножество баз очередного вида
next_base_freq = {}
max_nb_freq = 0
freq_cur_bases = {b: sum([voc.get(b + ost, 0) for ost in specter]) for b in bases}
max_freq_cur = max(freq_cur_bases.values())
print("Макс частотность базы:", max_freq_cur)
# верхнее подмножество баз очередного вида
control_bases = [b for b, freq in freq_cur_bases.items() if freq >= max_freq_cur / 3]
| ontrol_bases) == 1:
lower = [(b, freq) for b, freq in freq_cur_bases.items() if freq < max_freq_cur]
control_bases.append(max(lower, key=itemgetter(1))[0])
print("Контрольные базы:", control_bases)
# Первый критерий принадлежности к парадигме - сочетаемость остатков в звене с основами control_bases
keep_ost = [ost for ost in inf_zveno if all([b + ost in voc for b in control_bases])]
removed_ost = [ost for ost in inf_zveno if ost not in keep_ost]
print("!!Удалены из звена:", removed_ost)
print("Остаются в звене:", keep_ost)
next_bases = [b for b in bases if all([b + aff in voc for aff in keep_ost]) and
freq_cur_bases[b] > step]
if removed_ost:
del_aff += destiny_of_affix(removed_ost, next_bases, voc)
for x in del_aff:
del specter[x]
return keep_ost
def destiny_of_affix(removed_ost, next_bases, voc):
# проверка на меру децентрации
# если >=1/2 синтагматической вероятности падает на парадигматически малую(0,1) часть баз - то аффикс искл из парадигмы до конца рассм
# иначе - аффикс выводится из звена, но сохраняется в спектре остатков
removed_aff = []
for aff in removed_ost:
freq_b = sorted([(base, voc.get(base + aff, 0)) for base in next_bases], key=itemgetter(1), reverse=True)
L = len(freq_b) // 10
S = sum(map(itemgetter(1), freq_b))
if sum(map(itemgetter(1), freq_b[:L])) >= 1 / 2 * S:
removed_aff.append(aff)
return removed_aff
# проверка сочетаемости синтагматической аероятности аффикса с количеством оставшихся после групповой редукции баз, принимающих данный аффикс
def direct_alg(bases, specter, false_affixes):
global prob, voc
# верхнее подмножество остатков текущего вида
m = max(specter.values()) * THRESHOLD_OSTAT
upper_set = {ost: val for ost, val in specter.items() if val > m}
if not [ost for ost in upper_set if ost not in false_affixes]:
sp_list = sorted([(ost, val) for ost, val in specter.items()], key=itemgetter(1), reverse=True)
for ost, val in sp_list:
if ost not in false_affixes:
break
upper_set[ost] = val
print("Верхнее подмножество остатков текущего вида,", len(upper_set), "шт.")
# ВЫЧИСЛИТЬ незав для остатков из upper_set
nv = {}
summ_kol = 0
for ost, kol in specter.items(): # ostat - defdict
summ_kol += kol
nezav_ver = 1
for ch in ost:
nezav_ver *= prob[ch]
nv[ost] = nezav_ver
# усл вероятности
uv = {}
for ost, kol in specter.items():
uv[ost] = kol / summ_kol
# КФ - отношение условной вероятности к безусловной
corr_func = {}
for ost in upper_set:
corr_func[ost] = uv[ost] / nv[ost]
corr_func = [(ost, cf) for ost, cf in corr_func.items() if ost not in false_affixes]
corr_func = sorted(corr_func, key=itemgetter(1), reverse=True)
print("Коррелятивная функция: ", repr(corr_func)[:70])
if not corr_func: # суперпороговые редукции исчерпали спектр остатков
print("Остались только ложные остатки ")
return []
# найти след информант
informant = corr_func[0][0]
print("Аффикс-кандидат (информант):", informant)
return [informant]
def check_agglut_part():
return 0
def build_class(bootstrap_affix):
global words, average_word_len, THRESHOLD_OSTAT, thres_reduction, false_affixes, specter
k = 10 ** (math.log(average_word_len, 10) / (1 + 0.02 * math.log(len(voc), 10)))
# коэффициент редукции
print("Поправочный коэффициент:", k)
thres_reduction = 1 / average_word_len # порог редукции
print("Порог редукции:", thres_reduction)
affixes = [bootstrap_affix] # найденные аффиксы парадигмы
false_affixes = [] # список отвергнутых аффиксов, давших ложный шаг
bases = [bases_with_affix(bootstrap_affix)]
specter = [build_ost(bases[0])]
freq_bases = {b: build_freq_bases(b) for b in bases[0]}
step = 1
fast = False
while True:
print("\n*** шаг", step)
print("Аффиксы парадигмы:", affixes)
print("Основы {}-го вида: {} шт.".format(step, len(bases[-1])))
print("Спектр остатков {}-го вида: {} шт.".format(step, len(specter[-1])))
if not specter[-1]: # исчерпаны все остатки в спектре
print("Исчерпаны все остатки в спектре")
break
if not fast:
print("* Прямой ход * ")
next_affixes = direct_alg(bases[-1], specter[-1], false_affixes)
if not next_affixes:
break
else:
print("* Ускоренный ход!!! *")
del_aff = []
next_affixes = fast_alg(bases[-1], specter[-1], freq_bases, step, del_aff)
if not next_affixes:
fast = False
continue
# основы следующего вида
next_bases = [b for b in bases[-1] if all([b + aff in voc for aff in next_affixes]) and
freq_bases[b] > step]
# Поправочный коэффициент
# увеличивается во столько раз сколько аффиксов было сохранено в звене
K = k * len(next_affixes)
# Мера редукции
# доля основ текущего вида, не принимающих остатки следующего вида
N = len(bases[-1])
reduction = (N - len(next_bases)) / (K * N)
print("Мера редукции: ", reduction)
if reduction > thres_reduction: # суперпороговая редукция
false_affixes += next_affixes
print("ОТВЕРГАЕТСЯ! Суперпороговая редукция, ложные остатки", false_affixes)
if len(false_affixes) > average_word_len:
print("Cуперпороговая редукция повторяется большее число раз, чем средняя длина словоформы")
break
fast = False
else:
print("ПРИНИМАЕТСЯ!")
step += 1
false_affixes = []
affixes += next_affixes
# спектр остатков следующего вида
next_specter = {ost: sum([voc.get(b + ost, 0) for b in next_bases]) for ost in specter[-1]}
next_specter = {ost: v for ost, v in next_specter.items() if v > 0 and ost not in next_affixes}
bases.append(next_bases)
specter.append(next_specter)
if (len(next_bases) <= 2):
print("Остались две базы очередного вида")
break
if reduction < thres_reduction / 5: # если редукция < порога редукции/2(порог устойчивости)
fast = True
else:
fast = False
return bases[-1], affixes
main()
classes = []
for affix in informants:
# отправной аффикс начинаем строить с информанта имеющего max КФ
bootstrap_affix = extend_right(*affix)
bootstrap_affix = extend_left(bootstrap_affix, trie, len_search)
print(bootstrap_affix)
if any([c['aff'][0]==bootstrap_affix for c in classes]):
print("Аффикс уже обработан!")
continue
else:
print("КЛАСС", len(classes)+1, "\nОТПРАВНОЙ АФФИКС:", bootstrap_affix, "\n============================")
bases, affixes = build_class(bootstrap_affix)
while True:
first_letter = affixes[0][0]
if all([aff.startswith(first_letter) for aff in affixes]):
for i, b in enumerate(bases):
bases[i] = b + first_letter
for i, aff in enumerate(affixes):
affixes[i] = aff[1:]
else:
break
print("Основы: {} шт. {}".format(len(bases), bases))
print("Аффиксы: {} шт. {}".format(len(affixes), affixes))
classes.append({'b': bases, 'aff': affixes})
| if len(c | identifier_name |
Исследование_1705.py | # ## Определения
from operator import itemgetter
import bz2, json
from collections import defaultdict
import math
from bisect import bisect_left
WORD_LEN_COEFF = 1
THRESHOLD_COEFF = 0.5
DROP = 1.5
DROP_1 = 2
AFFIX_LEN = 1
THRESHOLD_OSTAT = 0.5
N = 5 # шлейфовый порог
trie, voc, words, prob, word_count, average_word_len = None, None, None, None, None, None
prefix_trie, informants, len_search = None, None, None
def main():
global trie, prefix_trie, words, voc, prob, average_word_len, informants, len_search
print("Загружаю словарь...", end='')
voc = load_voc()
words = sorted(list(voc.keys()))
word_count = sum([voc[k] for k in voc])
average_word_len = sum([len(w) * voc[w] for w in words]) / word_count
len_search = int(average_word_len * WORD_LEN_COEFF) # это максимальная разрешенная длина аффикса
print("{} словоформ, {} словоупотреблений, средняя длина слова {} ".format(len(words), word_count, average_word_len))
# загрузка безусловных вероятностей букв и деревьев
print("Загружаю деревья...", end='')
prob = json.load(open("prob.json", encoding="utf-8"))
strie = bz2.BZ2File('trie.json.bz2', 'r').read().decode(encoding='utf-8')
trie = json.loads(strie)
del strie
prefix_trie = json.load(open("prefix_trie.json"))
print(', ok')
print("Безусловные вероятности первых 10 букв:\n========================")
print(", ".join(map(lambda pair: "{}: {:.4f}".format(*pair),
sorted([(letter, nv) for letter, nv in prob.items()],
key=itemgetter(1),
reverse=True)[:10])))
print("Подсчитываю условные вероятности букв...", end='')
cond_prob = build_cond_prob(voc, prob, len_search)
print(', ok')
# информанты - это буквы с макс значением КФ в каждой позиции
informants = find_informants(prob, cond_prob, len_search)
print("ИНФОРМАНТЫ:\n===================")
print(informants)
def load_voc():
# Загрузить словарь количеств из файла.
# Словарь содержит частоты слов в виде {слово: число вхождений в корпус, ... },
# например {"көппөҕү" : 4, "хазар" : 3, ...}
#
# корпус в формате txt занимет 174 МБ, словарь частот в json 10,4МБ,
# после сжатия в формат .bz2 1,7 МБ
svoc = bz2.BZ2File('voc.json.bz2', 'r').read().decode(encoding='utf-8')
voc = json.loads(svoc)
del svoc
return voc
def build_trie_and_prob(voc):
# подсчитываем частоты букв и строим дерево оконочаний
prob = defaultdict(lambda: 0)
trie = {'n': 0}
for w, n in voc.items(): # для каждого слова в списке
word = w[::-1] # переворачиваем слово, читаем слово с конца
current_dict = trie
trie['n'] += n
for letter in word: # для буквы в слове
prob[letter] += n
current_dict = current_dict.setdefault(letter, {'n': 0}) # получить значение из словаря по ключу.
# Автоматически добавляет элемент словаря, если он отсутствует.
current_dict['n'] += n
current_dict['#'] = n
total = sum([n for n in prob.values()]) # 84263863
for k, v in prob.items():
prob[k] = v / total
return trie, prob
def build_cond_prob(voc, prob, len_search):
letters = list(prob.keys())
cond_prob = defaultdict(lambda: 0) # словарь для условных вероятностей
total = defaultdict(lambda: 0)
for word, n in voc.items(): # для слова в словаре
positions = range(-min(len_search, len(word) - 2), 0) # from -7 to 0
for i in positions:
cond_prob[(i, word[i])] += n
total[i] += n # dictionary with prob of char words?
for posChar in cond_prob: # получаем из частот вероятности
i = posChar[0]
cond_prob[posChar] /= total[i]
return cond_prob
def find_informants(prob, cond_prob, len_search):
max_cond = defaultdict(lambda: 0.0)
maxlet = [''] * 8
# для каждой позиции ищем букву с наибольшим значением условной вероятности,
for posChar in cond_prob: # цикл по позициям букв в условной вероятности
aff_len = posChar[0]
if cond_prob[posChar] > max_cond[aff_len]:
max_cond[aff_len] = cond_prob[posChar]
maxlet[-aff_len] = posChar[1]
print("Наиболее частые буквы по позициям:\n============================\n", maxlet[-1:0:-1], "\n")
print("Максимальные вероятности по позициям:\n============================\n", max_cond, "\n")
# порог медиального разбиения - половина условной вероятности , буквы с УВ не меньше порога - верхнее подмножеств
cond_prob_sup = {}
for posChar in cond_prob:
i = posChar[0]
if cond_prob[posChar] > THRESHOLD_COEFF * max_cond[i]:
cond_prob_sup[posChar] = cond_prob[posChar]
# КФ = условная вер по данной позиции / безусл вероятность
cf = {}
for posChar in cond_prob_sup:
char = posChar[1]
cf[posChar] = cond_prob_sup[posChar] / prob[char]
print("КФ для верхних подмножества:\n====================\n");
for aff_len in set(map(itemgetter(0), cf.keys())):
print(aff_len, "**")
for k, v in cf.items():
if k[0] == aff_len:
print(k[1], "{:.4f}".format(v), end=" ")
print("")
# информанты - это буквы с макс значением КФ в каждой позиции
informants = []
for aff_len in range(-len_search, 0):
kmax = max({k for k in cf if k[0] == aff_len}, key=lambda k: cf[k])
informants.append((kmax[1], aff_len, cf[kmax]))
informants.sort(key=itemgetter(2), reverse=True)
return informants
def extend_right(char, pos, cf):
if pos == -1: # если информант в последней позиции, то расширять некуда
return char # возвращаем информант как аффикс
d = defaultdict(int)
for w, n in voc.items(): # для буквы и частоты в словаре
if w[pos:pos + 1] == char: # если буква в позиции равна нашей, то посчитаем это окончание
d[w[pos + 1:]] += n
return char + max(d.keys(), key=lambda end: d[end]) # прибавляем к информанту самое частое окончание
def extend_left(affix, trie, len_search):
# расширяем аффикс влево используя trie
current_dict = trie
for ch in affix[::-1]:
current_dict = current_dict[ch]
aff_len = len(affix)
"""
Для поиска буквы слева:
идем по дереву trie
по две самые частотные буквы делим друг на друга, при мере перепада большей 1.5 прибавляем к информанту более частую из них.
Иначе начинаем рассматривать по две самые частотные буквы/на следующие две,
если мера перепада в одной из них больше двух, то из данной пары берем более частотную и прибавляем ее к аффиксу.
"""
# пока позиция символа в слове больше разрешенной | current_dict[ch]
else:
break
aff_len += 1
return affix
# узел trie, соответствующий окончанию aff
def affix_node(aff):
global trie
current_node = trie
for char in aff[::-1]:
current_node = current_node[char]
return current_node
# узел trie, соответствующий префиксу prf
def prefix_node(prf):
global prefix_trie
current_node = prefix_trie
for char in prf:
current_node = current_node[char]
return current_node
# рекурсивно возвращает все основы, растущие из данного узла
def word_dfs(node, ending=''):
result = [ending] if '#' in node else []
for ch in node:
if ch in ['#', 'n']: continue
result += word_dfs(node[ch], ch + ending)
return result
def num_prefix(prf):
return prefix_node(prf)['n']
# все основы, растущие из данного узла
def bases_with_affix(aff):
global prefix_trie
return sorted([b for b in word_dfs(affix_node(aff)) if len(b) > 2 and voc[b + aff] > 1 or num_prefix(b) < 100])
# суммарная встречаемость основы b с любыми остатками
def build_freq_bases(b):
freq = 0
for w in words[bisect_left(words, b):]:
if not w.startswith(b): break
freq += voc[w]
return freq
def build_ost(bases):
global words, voc
ostat = defaultdict(int)
for i,b in enumerate(bases):
affix_pos = len(b)
for w in words[bisect_left(words, b):]:
if not w.startswith(b): break
if not w[affix_pos:] in affix:
ostat[w[affix_pos:]] += voc[w] # вариант с подсчетом словоупотреблений
# ostat[w[affix_pos:]] += 1 # вариант с подсчетом словоформ
return ostat
def fast_alg(bases, specter, freq_bases, step, del_aff):
max_ost_val = max(specter.values())
# те пары к в у которых к больше макс
inf_zveno = {ost: v for ost, v in specter.items() if v > max_ost_val * 0.5}
print("Звено: ", inf_zveno)
# дольше нужна сочетаемость с некоторой группой контрольных основ
# верхнее подмножество баз очередного вида
next_base_freq = {}
max_nb_freq = 0
freq_cur_bases = {b: sum([voc.get(b + ost, 0) for ost in specter]) for b in bases}
max_freq_cur = max(freq_cur_bases.values())
print("Макс частотность базы:", max_freq_cur)
# верхнее подмножество баз очередного вида
control_bases = [b for b, freq in freq_cur_bases.items() if freq >= max_freq_cur / 3]
if len(control_bases) == 1:
lower = [(b, freq) for b, freq in freq_cur_bases.items() if freq < max_freq_cur]
control_bases.append(max(lower, key=itemgetter(1))[0])
print("Контрольные базы:", control_bases)
# Первый критерий принадлежности к парадигме - сочетаемость остатков в звене с основами control_bases
keep_ost = [ost for ost in inf_zveno if all([b + ost in voc for b in control_bases])]
removed_ost = [ost for ost in inf_zveno if ost not in keep_ost]
print("!!Удалены из звена:", removed_ost)
print("Остаются в звене:", keep_ost)
next_bases = [b for b in bases if all([b + aff in voc for aff in keep_ost]) and
freq_cur_bases[b] > step]
if removed_ost:
del_aff += destiny_of_affix(removed_ost, next_bases, voc)
for x in del_aff:
del specter[x]
return keep_ost
def destiny_of_affix(removed_ost, next_bases, voc):
# проверка на меру децентрации
# если >=1/2 синтагматической вероятности падает на парадигматически малую(0,1) часть баз - то аффикс искл из парадигмы до конца рассм
# иначе - аффикс выводится из звена, но сохраняется в спектре остатков
removed_aff = []
for aff in removed_ost:
freq_b = sorted([(base, voc.get(base + aff, 0)) for base in next_bases], key=itemgetter(1), reverse=True)
L = len(freq_b) // 10
S = sum(map(itemgetter(1), freq_b))
if sum(map(itemgetter(1), freq_b[:L])) >= 1 / 2 * S:
removed_aff.append(aff)
return removed_aff
# проверка сочетаемости синтагматической аероятности аффикса с количеством оставшихся после групповой редукции баз, принимающих данный аффикс
def direct_alg(bases, specter, false_affixes):
global prob, voc
# верхнее подмножество остатков текущего вида
m = max(specter.values()) * THRESHOLD_OSTAT
upper_set = {ost: val for ost, val in specter.items() if val > m}
if not [ost for ost in upper_set if ost not in false_affixes]:
sp_list = sorted([(ost, val) for ost, val in specter.items()], key=itemgetter(1), reverse=True)
for ost, val in sp_list:
if ost not in false_affixes:
break
upper_set[ost] = val
print("Верхнее подмножество остатков текущего вида,", len(upper_set), "шт.")
# ВЫЧИСЛИТЬ незав для остатков из upper_set
nv = {}
summ_kol = 0
for ost, kol in specter.items(): # ostat - defdict
summ_kol += kol
nezav_ver = 1
for ch in ost:
nezav_ver *= prob[ch]
nv[ost] = nezav_ver
# усл вероятности
uv = {}
for ost, kol in specter.items():
uv[ost] = kol / summ_kol
# КФ - отношение условной вероятности к безусловной
corr_func = {}
for ost in upper_set:
corr_func[ost] = uv[ost] / nv[ost]
corr_func = [(ost, cf) for ost, cf in corr_func.items() if ost not in false_affixes]
corr_func = sorted(corr_func, key=itemgetter(1), reverse=True)
print("Коррелятивная функция: ", repr(corr_func)[:70])
if not corr_func: # суперпороговые редукции исчерпали спектр остатков
print("Остались только ложные остатки ")
return []
# найти след информант
informant = corr_func[0][0]
print("Аффикс-кандидат (информант):", informant)
return [informant]
def check_agglut_part():
return 0
def build_class(bootstrap_affix):
global words, average_word_len, THRESHOLD_OSTAT, thres_reduction, false_affixes, specter
k = 10 ** (math.log(average_word_len, 10) / (1 + 0.02 * math.log(len(voc), 10)))
# коэффициент редукции
print("Поправочный коэффициент:", k)
thres_reduction = 1 / average_word_len # порог редукции
print("Порог редукции:", thres_reduction)
affixes = [bootstrap_affix] # найденные аффиксы парадигмы
false_affixes = [] # список отвергнутых аффиксов, давших ложный шаг
bases = [bases_with_affix(bootstrap_affix)]
specter = [build_ost(bases[0])]
freq_bases = {b: build_freq_bases(b) for b in bases[0]}
step = 1
fast = False
while True:
print("\n*** шаг", step)
print("Аффиксы парадигмы:", affixes)
print("Основы {}-го вида: {} шт.".format(step, len(bases[-1])))
print("Спектр остатков {}-го вида: {} шт.".format(step, len(specter[-1])))
if not specter[-1]: # исчерпаны все остатки в спектре
print("Исчерпаны все остатки в спектре")
break
if not fast:
print("* Прямой ход * ")
next_affixes = direct_alg(bases[-1], specter[-1], false_affixes)
if not next_affixes:
break
else:
print("* Ускоренный ход!!! *")
del_aff = []
next_affixes = fast_alg(bases[-1], specter[-1], freq_bases, step, del_aff)
if not next_affixes:
fast = False
continue
# основы следующего вида
next_bases = [b for b in bases[-1] if all([b + aff in voc for aff in next_affixes]) and
freq_bases[b] > step]
# Поправочный коэффициент
# увеличивается во столько раз сколько аффиксов было сохранено в звене
K = k * len(next_affixes)
# Мера редукции
# доля основ текущего вида, не принимающих остатки следующего вида
N = len(bases[-1])
reduction = (N - len(next_bases)) / (K * N)
print("Мера редукции: ", reduction)
if reduction > thres_reduction: # суперпороговая редукция
false_affixes += next_affixes
print("ОТВЕРГАЕТСЯ! Суперпороговая редукция, ложные остатки", false_affixes)
if len(false_affixes) > average_word_len:
print("Cуперпороговая редукция повторяется большее число раз, чем средняя длина словоформы")
break
fast = False
else:
print("ПРИНИМАЕТСЯ!")
step += 1
false_affixes = []
affixes += next_affixes
# спектр остатков следующего вида
next_specter = {ost: sum([voc.get(b + ost, 0) for b in next_bases]) for ost in specter[-1]}
next_specter = {ost: v for ost, v in next_specter.items() if v > 0 and ost not in next_affixes}
bases.append(next_bases)
specter.append(next_specter)
if (len(next_bases) <= 2):
print("Остались две базы очередного вида")
break
if reduction < thres_reduction / 5: # если редукция < порога редукции/2(порог устойчивости)
fast = True
else:
fast = False
return bases[-1], affixes
main()
classes = []
for affix in informants:
# отправной аффикс начинаем строить с информанта имеющего max КФ
bootstrap_affix = extend_right(*affix)
bootstrap_affix = extend_left(bootstrap_affix, trie, len_search)
print(bootstrap_affix)
if any([c['aff'][0]==bootstrap_affix for c in classes]):
print("Аффикс уже обработан!")
continue
else:
print("КЛАСС", len(classes)+1, "\nОТПРАВНОЙ АФФИКС:", bootstrap_affix, "\n============================")
bases, affixes = build_class(bootstrap_affix)
while True:
first_letter = affixes[0][0]
if all([aff.startswith(first_letter) for aff in affixes]):
for i, b in enumerate(bases):
bases[i] = b + first_letter
for i, aff in enumerate(affixes):
affixes[i] = aff[1:]
else:
break
print("Основы: {} шт. {}".format(len(bases), bases))
print("Аффиксы: {} шт. {}".format(len(affixes), affixes))
classes.append({'b': bases, 'aff': affixes})
| длины аффикса
while aff_len < len_search:
# составляем список всех букв предшествующих аффиксу с количествами
L = [(l, current_dict[l]["n"]) for l in current_dict.keys() if l not in '#n']
# сортируем по количествам
L.sort(key=itemgetter(1), reverse=True)
# if affix=='нан':
# import pdb
# pdb.set_trace()
ch = L[0][0]
if L[0][1] > DROP * L[1][1]:
affix = ch + affix
current_dict = current_dict[ch]
else:
if (L[0][1] + L[1][1]) / (L[2][1] + L[3][1]) > 2.:
affix = ch + affix
current_dict = | identifier_body |
Исследование_1705.py | # ## Определения
from operator import itemgetter
import bz2, json
from collections import defaultdict
import math
from bisect import bisect_left
WORD_LEN_COEFF = 1
THRESHOLD_COEFF = 0.5
DROP = 1.5
DROP_1 = 2
AFFIX_LEN = 1
THRESHOLD_OSTAT = 0.5
N = 5 # шлейфовый порог
trie, voc, words, prob, word_count, average_word_len = None, None, None, None, None, None
prefix_trie, informants, len_search = None, None, None
def main():
global trie, prefix_trie, words, voc, prob, average_word_len, informants, len_search
print("Загружаю словарь...", end='')
voc = load_voc()
words = sorted(list(voc.keys()))
word_count = sum([voc[k] for k in voc])
average_word_len = sum([len(w) * voc[w] for w in words]) / word_count
len_search = int(average_word_len * WORD_LEN_COEFF) # это максимальная разрешенная длина аффикса
print("{} словоформ, {} словоупотреблений, средняя длина слова {} ".format(len(words), word_count, average_word_len))
# загрузка безусловных вероятностей букв и деревьев
print("Загружаю деревья...", end='')
prob = json.load(open("prob.json", encoding="utf-8"))
strie = bz2.BZ2File('trie.json.bz2', 'r').read().decode(encoding='utf-8')
trie = json.loads(strie)
del strie
prefix_trie = json.load(open("prefix_trie.json"))
print(', ok')
print("Безусловные вероятности первых 10 букв:\n========================")
print(", ".join(map(lambda pair: "{}: {:.4f}".format(*pair),
sorted([(letter, nv) for letter, nv in prob.items()],
key=itemgetter(1),
reverse=True)[:10])))
print("Подсчитываю условные вероятности букв...", end='')
cond_prob = build_cond_prob(voc, prob, len_search)
print(', ok')
# информанты - это буквы с макс значением КФ в каждой позиции
informants = find_informants(prob, cond_prob, len_search)
print("ИНФОРМАНТЫ:\n===================")
print(informants)
def load_voc():
# Загрузить словарь количеств из файла.
# Словарь содержит частоты слов в виде {слово: число вхождений в корпус, ... },
# например {"көппөҕү" : 4, "хазар" : 3, ...}
#
# корпус в формате txt занимет 174 МБ, словарь частот в json 10,4МБ,
# после сжатия в формат .bz2 1,7 МБ
svoc = bz2.BZ2File('voc.json.bz2', 'r').read().decode(encoding='utf-8')
voc = json.loads(svoc)
del svoc
return voc
def build_trie_and_prob(voc):
# подсчитываем частоты букв и строим дерево оконочаний
prob = defaultdict(lambda: 0)
trie = {'n': 0}
for w, n in voc.items(): # для каждого слова в списке
word = w[::-1] # переворачиваем слово, читаем слово с конца
current_dict = trie
trie['n'] += n
for letter in word: # для буквы в слове
prob[letter] += n
current_dict = current_dict.setdefault(letter, {'n': 0}) # получить значение из словаря по ключу.
# Автоматически добавляет элемент словаря, если он отсутствует.
current_dict['n'] += n
current_dict['#'] = n
total = sum([n for n in prob.values()]) # 84263863
for k, v in prob.items():
prob[k] = v / total
return trie, prob
def build_cond_prob(voc, prob, len_search):
letters = list(prob.keys())
cond_prob = defaultdict(lambda: 0) # словарь для условных вероятностей
total = defaultdict(lambda: 0)
for word, n in voc.items(): # для слова в словаре
positions = range(-min(len_search, len(word) - 2), 0) # from -7 to 0
for i in positions:
cond_prob[(i, word[i])] += n
total[i] += n # dictionary with prob of char words?
for posChar in cond_prob: # получаем из частот вероятности
i = posChar[0]
cond_prob[posChar] /= total[i]
return cond_prob
def find_informants(prob, cond_prob, len_search):
max_cond = defaultdict(lambda: 0.0)
maxlet = [''] * 8
# для каждой позиции ищем букву с наибольшим значением условной вероятности,
for posChar in cond_prob: # цикл по позициям букв в условной вероятности
aff_len = posChar[0]
if cond_prob[posChar] > max_cond[aff_len]:
max_cond[aff_len] = cond_prob[posChar]
maxlet[-aff_len] = posChar[1]
print("Наиболее частые буквы по позициям:\n============================\n", maxlet[-1:0:-1], "\n")
print("Максимальные вероятности по позициям:\n============================\n", max_cond, "\n")
# порог медиального разбиения - половина условной вероятности , буквы с УВ не меньше порога - верхнее подмножеств
cond_prob_sup = {}
for posChar in cond_prob:
i = posChar[0]
if cond_prob[posChar] > THRESHOLD_COEFF * max_cond[i]:
cond_prob_sup[posChar] = cond_prob[posChar]
# КФ = условная вер по данной позиции / безусл вероятность
cf = {}
for posChar in cond_prob_sup:
char = posChar[1]
cf[posChar] = cond_prob_sup[posChar] / prob[char]
print("КФ для верхних подмножества:\n====================\n");
for aff_len in set(map(itemgetter(0), cf.keys())):
print(aff_len, "**")
for k, v in cf.items():
if k[0] == aff_len:
print(k[1], "{:.4f}".format(v), end=" ")
print("")
# информанты - это буквы с макс значением КФ в каждой позиции
informants = []
for aff_len in range(-len_search, 0):
kmax = max({k for k in cf if k[0] == aff_len}, key=lambda k: cf[k])
informants.append((kmax[1], aff_len, cf[kmax]))
informants.sort(key=itemgetter(2), reverse=True)
return informants
def extend_right(char, pos, cf):
if pos == -1: # если информант в последней позиции, то расширять некуда
return char # возвращаем информант как аффикс
d = defaultdict(int)
for w, n in voc.items(): # для буквы и частоты в словаре
if w[pos:pos + 1] == char: # если буква в позиции равна нашей, то посчитаем это окончание
d[w[pos + 1:]] += n
return char + max(d.keys(), key=lambda end: d[end]) # прибавляем к информанту самое частое окончание
def extend_left(affix, trie, len_search):
# расширяем аффикс влево используя trie
current_dict = trie
for ch in affix[::-1]:
current_dict = current_dict[ch]
aff_len = len(affix)
"""
Для поиска буквы слева:
идем по дереву trie
по две самые частотные буквы делим друг на друга, при мере перепада большей 1.5 прибавляем к информанту более частую из них.
Иначе начинаем рассматривать по две самые частотные буквы/на следующие две,
если мера перепада в одной из них больше двух, то из данной пары берем более частотную и прибавляем ее к аффиксу.
"""
# пока позиция символа в слове больше разрешенной длины аффикса
while aff_len < len_search:
# составляем список всех букв предшествующих аффиксу с количествами
L = [(l, current_dict[l]["n"]) for l in current_dict.keys() if l not in '#n']
# сортируем по количествам
L.sort(key=itemgetter(1), reverse=True)
# if affix=='нан':
# import pdb
# pdb.set_trace()
ch = L[0][0]
if L[0][1] > DROP * L[1][1]:
affix = ch + affix
current_dict = current_dict[ch]
else:
if (L[0][1] + L[1][1]) / (L[2][1] + L[3][1]) > 2.:
affix = ch + affix
current_dict = current_dict[ch]
else:
break
aff_len += 1
return affix
# узел trie, соответствующий окончанию aff
def affix_node(aff):
global trie
current_node = trie
for char in aff[::-1]:
current_node = current_node[char]
return current_node
# узел trie, соответствующий префиксу prf
def prefix_node(prf):
global prefix_trie
current_node = prefix_trie
for char in prf:
current_node = current_node[char]
return current_node
# рекурсивно возвращает все основы, растущие из данного узла
def word_dfs(node, ending=''):
result = [ending] if '#' in node else []
for ch in node:
if ch in ['#', 'n']: continue
result += word_dfs(node[ch], ch + ending)
return result
def num_prefix(prf):
return prefix_node(prf)['n']
# все основы, растущие из данного узла
def bases_with_affix(aff):
global prefix_trie
return sorted([b for b in word_dfs(affix_node(aff)) if len(b) > 2 and voc[b + aff] > 1 or num_prefix(b) < 100])
# суммарная встречаемость основы b с любыми остатками
def build_freq_bases(b):
freq = 0
for w in words[bisect_left(words, b):]:
if not w.startswith(b): break
freq += voc[w]
return freq
def build_ost(bases):
global words, voc
ostat = defaultdict(int)
for i,b in enumerate(bases):
affix_pos = len(b)
for w in words[bisect_left(words, b):]:
if not w.startswith(b): break
if not w[affix_pos:] in affix:
ostat[w[affix_pos:]] += voc[w] # вариант с подсчетом словоупотреблений
# ostat[w[affix_pos:]] += 1 # вариант с подсчетом словоформ
return ostat
def fast_alg(bases, specter, freq_bases, step, del_aff):
max_ost_val = max(specter.values())
# те пары к в у которых к больше макс
inf_zveno = {ost: v for ost, v in specter.items() if v > max_ost_val * 0.5}
print("Звено: ", inf_zveno)
# дольше нужна сочетаемость с некоторой группой контрольных основ
# верхнее подмножество баз очередного вида
next_base_freq = {}
max_nb_freq = 0
freq_cur_bases = {b: sum([voc.get(b + ost, 0) for ost in specter]) for b in bases}
max_freq_cur = max(freq_cur_bases.values())
print("Макс частотность базы:", max_freq_cur)
# верхнее подмножество баз очередного вида
control_bases = [b for b, freq in freq_cur_bases.items() if freq >= max_freq_cur / 3]
if len(control_bases) == 1:
lower = [(b, freq) for b, freq in freq_cur_bases.items() if freq < max_freq_cur]
| rol_bases.append(max(lower, key=itemgetter(1))[0])
print("Контрольные базы:", control_bases)
# Первый критерий принадлежности к парадигме - сочетаемость остатков в звене с основами control_bases
keep_ost = [ost for ost in inf_zveno if all([b + ost in voc for b in control_bases])]
removed_ost = [ost for ost in inf_zveno if ost not in keep_ost]
print("!!Удалены из звена:", removed_ost)
print("Остаются в звене:", keep_ost)
next_bases = [b for b in bases if all([b + aff in voc for aff in keep_ost]) and
freq_cur_bases[b] > step]
if removed_ost:
del_aff += destiny_of_affix(removed_ost, next_bases, voc)
for x in del_aff:
del specter[x]
return keep_ost
def destiny_of_affix(removed_ost, next_bases, voc):
# проверка на меру децентрации
# если >=1/2 синтагматической вероятности падает на парадигматически малую(0,1) часть баз - то аффикс искл из парадигмы до конца рассм
# иначе - аффикс выводится из звена, но сохраняется в спектре остатков
removed_aff = []
for aff in removed_ost:
freq_b = sorted([(base, voc.get(base + aff, 0)) for base in next_bases], key=itemgetter(1), reverse=True)
L = len(freq_b) // 10
S = sum(map(itemgetter(1), freq_b))
if sum(map(itemgetter(1), freq_b[:L])) >= 1 / 2 * S:
removed_aff.append(aff)
return removed_aff
# проверка сочетаемости синтагматической аероятности аффикса с количеством оставшихся после групповой редукции баз, принимающих данный аффикс
def direct_alg(bases, specter, false_affixes):
global prob, voc
# верхнее подмножество остатков текущего вида
m = max(specter.values()) * THRESHOLD_OSTAT
upper_set = {ost: val for ost, val in specter.items() if val > m}
if not [ost for ost in upper_set if ost not in false_affixes]:
sp_list = sorted([(ost, val) for ost, val in specter.items()], key=itemgetter(1), reverse=True)
for ost, val in sp_list:
if ost not in false_affixes:
break
upper_set[ost] = val
print("Верхнее подмножество остатков текущего вида,", len(upper_set), "шт.")
# ВЫЧИСЛИТЬ незав для остатков из upper_set
nv = {}
summ_kol = 0
for ost, kol in specter.items(): # ostat - defdict
summ_kol += kol
nezav_ver = 1
for ch in ost:
nezav_ver *= prob[ch]
nv[ost] = nezav_ver
# усл вероятности
uv = {}
for ost, kol in specter.items():
uv[ost] = kol / summ_kol
# КФ - отношение условной вероятности к безусловной
corr_func = {}
for ost in upper_set:
corr_func[ost] = uv[ost] / nv[ost]
corr_func = [(ost, cf) for ost, cf in corr_func.items() if ost not in false_affixes]
corr_func = sorted(corr_func, key=itemgetter(1), reverse=True)
print("Коррелятивная функция: ", repr(corr_func)[:70])
if not corr_func: # суперпороговые редукции исчерпали спектр остатков
print("Остались только ложные остатки ")
return []
# найти след информант
informant = corr_func[0][0]
print("Аффикс-кандидат (информант):", informant)
return [informant]
def check_agglut_part():
return 0
def build_class(bootstrap_affix):
global words, average_word_len, THRESHOLD_OSTAT, thres_reduction, false_affixes, specter
k = 10 ** (math.log(average_word_len, 10) / (1 + 0.02 * math.log(len(voc), 10)))
# коэффициент редукции
print("Поправочный коэффициент:", k)
thres_reduction = 1 / average_word_len # порог редукции
print("Порог редукции:", thres_reduction)
affixes = [bootstrap_affix] # найденные аффиксы парадигмы
false_affixes = [] # список отвергнутых аффиксов, давших ложный шаг
bases = [bases_with_affix(bootstrap_affix)]
specter = [build_ost(bases[0])]
freq_bases = {b: build_freq_bases(b) for b in bases[0]}
step = 1
fast = False
while True:
print("\n*** шаг", step)
print("Аффиксы парадигмы:", affixes)
print("Основы {}-го вида: {} шт.".format(step, len(bases[-1])))
print("Спектр остатков {}-го вида: {} шт.".format(step, len(specter[-1])))
if not specter[-1]: # исчерпаны все остатки в спектре
print("Исчерпаны все остатки в спектре")
break
if not fast:
print("* Прямой ход * ")
next_affixes = direct_alg(bases[-1], specter[-1], false_affixes)
if not next_affixes:
break
else:
print("* Ускоренный ход!!! *")
del_aff = []
next_affixes = fast_alg(bases[-1], specter[-1], freq_bases, step, del_aff)
if not next_affixes:
fast = False
continue
# основы следующего вида
next_bases = [b for b in bases[-1] if all([b + aff in voc for aff in next_affixes]) and
freq_bases[b] > step]
# Поправочный коэффициент
# увеличивается во столько раз сколько аффиксов было сохранено в звене
K = k * len(next_affixes)
# Мера редукции
# доля основ текущего вида, не принимающих остатки следующего вида
N = len(bases[-1])
reduction = (N - len(next_bases)) / (K * N)
print("Мера редукции: ", reduction)
if reduction > thres_reduction: # суперпороговая редукция
false_affixes += next_affixes
print("ОТВЕРГАЕТСЯ! Суперпороговая редукция, ложные остатки", false_affixes)
if len(false_affixes) > average_word_len:
print("Cуперпороговая редукция повторяется большее число раз, чем средняя длина словоформы")
break
fast = False
else:
print("ПРИНИМАЕТСЯ!")
step += 1
false_affixes = []
affixes += next_affixes
# спектр остатков следующего вида
next_specter = {ost: sum([voc.get(b + ost, 0) for b in next_bases]) for ost in specter[-1]}
next_specter = {ost: v for ost, v in next_specter.items() if v > 0 and ost not in next_affixes}
bases.append(next_bases)
specter.append(next_specter)
if (len(next_bases) <= 2):
print("Остались две базы очередного вида")
break
if reduction < thres_reduction / 5: # если редукция < порога редукции/2(порог устойчивости)
fast = True
else:
fast = False
return bases[-1], affixes
main()
classes = []
for affix in informants:
# отправной аффикс начинаем строить с информанта имеющего max КФ
bootstrap_affix = extend_right(*affix)
bootstrap_affix = extend_left(bootstrap_affix, trie, len_search)
print(bootstrap_affix)
if any([c['aff'][0]==bootstrap_affix for c in classes]):
print("Аффикс уже обработан!")
continue
else:
print("КЛАСС", len(classes)+1, "\nОТПРАВНОЙ АФФИКС:", bootstrap_affix, "\n============================")
bases, affixes = build_class(bootstrap_affix)
while True:
first_letter = affixes[0][0]
if all([aff.startswith(first_letter) for aff in affixes]):
for i, b in enumerate(bases):
bases[i] = b + first_letter
for i, aff in enumerate(affixes):
affixes[i] = aff[1:]
else:
break
print("Основы: {} шт. {}".format(len(bases), bases))
print("Аффиксы: {} шт. {}".format(len(affixes), affixes))
classes.append({'b': bases, 'aff': affixes})
| cont | conditional_block |
Player.js | import React, { useState, useEffect, useRef, useLayoutEffect } from 'react';
import { useDragObservable } from '../../hooks/eventHooks';
import Card from '../card/Card';
import css from './Player.sass';
const Player = ({ cards, onSelect }) => { | const [ draggingLeft, setDraggingLeft ] = useState(0);
const [ jokerSelect, setJokerSelect ] = useState(undefined);
const [ jokerValue, setJokerValue ] = useState({rank: 'two', suit: 'spades'});
const [ cardWidth, setCardWidth ] = useState(0);
const playerWidth = useRef(0);
const draggingRef = useRef([]);
const selectedRef = useRef([]);
// Need the ref to be able to get and set current value in effect
// Need state to make react rerender (changing ref doesn't trigger rerender)
const scrollRef = useRef(0);
const [ scroll, setScroll] = useState(0);
const cardOverlapRef = useRef(30);
const [ cardOverlap, setCardOverlap ] = useState(30);
const playerRef = useRef(null);
const dragObservables = useDragObservable(playerRef.current);
useEffect(() => {
const width = parseInt(getComputedStyle(document.documentElement).getPropertyValue('--card-width'), 10);
setCardWidth(width);
cardOverlapRef.current = (document.querySelector('#container').offsetWidth - width) / cards.length;
setCardOverlap(cardOverlapRef.current);
playerWidth.current = cards.length * cardOverlapRef.current + (width - cardOverlapRef.current);
}, []);
useEffect(() => {
setSelected([]);
setOrder(order.filter(order => cards.some(card => card.id === order)));
}, [cards]);
// Selected Card Callback
useEffect(() => {
if (onSelect) {
onSelect(selected);
}
selectedRef.current = selected.map(card => card.id);
}, [selected]);
// Reorder stuff
useEffect(() => {
if (!dragObservables) return;
const { start$, drag$, end$ } = dragObservables;
let _dragging = false;
let _startX = 0;
let _lastX = 0;
let _lastTouches = [];
const getOffset = e => {
return e.targetTouches && e.targetTouches.length
? e.targetTouches[0].pageX - e.target.getBoundingClientRect().left
: e.offsetX;
}
const updateScroll = scrolled => {
scrollRef.current = clampScroll(scrollRef.current + scrolled);
setScroll(scrollRef.current);
}
const getGesture = (touches, lastTouches) => {
if (touches.length > 1 && lastTouches.length > 1) {
let point1 = -1;
let point2 = -1;
for (let i = 0; i < lastTouches.length; i++) {
if (lastTouches[i].identifier === touches[0].identifier) { point1 = i; }
if (lastTouches[i].identifier === touches[1].identifier) { point2 = i; }
}
if (point1 >= 0 && point2 >= 0) {
const touchOneChange = touches[0].pageX - lastTouches[point1].pageX;
const touchTwoChange = touches[1].pageX - lastTouches[point2].pageX;
// check if swipe
if (touchOneChange * touchTwoChange > 0) {
return { type: "swipe", value: touches[0].pageX - lastTouches[point1].pageX };
}
const diff1 = Math.abs(touches[1].pageX - touches[0].pageX);
const diff2 = Math.abs(lastTouches[point2].pageX - lastTouches[point1].pageX);
if (Math.abs(diff1 - diff2) > 1 && diff1 !== diff2) {
return { type: "pinch", value: (diff1 - diff2) / 7 };
}
return { type: "none" };
}
}
}
const startSubscription = start$.subscribe(start => {
if (start.touches && start.touches.length > 1) {
start.preventDefault();
_lastTouches = start.touches;
return;
}
_startX = getOffset(start);
_lastX = 0;
});
const dragSubscription = drag$.subscribe(move => {
if (move.touches && move.touches.length > 1) {
move.preventDefault();
const gesture = getGesture(move.touches, _lastTouches);
switch (gesture.type) {
case "swipe":
updateScroll(gesture.value);
break;
case "pinch":
cardOverlapRef.current = Math.max((document.querySelector('#container').offsetWidth - cardWidth) / cards.length, Math.min(cardWidth, cardOverlapRef.current + gesture.value));
playerWidth.current = cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current);
setCardOverlap(cardOverlapRef.current);
updateScroll(0);
default:
break;
}
_lastTouches = [...move.touches];
return;
}
if (!_dragging) {
if (selectedRef.current.length) {
draggingRef.current = selectedRef.current;
_dragging = true;
} else {
updateScroll(getOffset(move) - _startX);
}
} else {
const mouseX = getMouseX(move);
const cardLeft = Math.min(
playerWidth.current - (draggingRef.current.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current)),
Math.max(
0,
mouseX - _startX
)
);
setDraggingLeft(cardLeft);
// Scroll when dragging card to edges
const rightThreshold = playerRef.current.offsetWidth - scrollRef.current - (cardWidth * 1.25);
const leftThreshold = -scrollRef.current + (cardWidth / 2);
if (_lastX && cardLeft - _lastX > 0 && cardLeft > rightThreshold) {
updateScroll(Math.floor(rightThreshold - cardLeft));
} else if (_lastX && cardLeft - _lastX < 0 && cardLeft < leftThreshold) {
updateScroll(Math.floor(leftThreshold - cardLeft));
}
_lastX = cardLeft;
}
});
const endSubscription = end$.subscribe(end => {
if (_dragging) {
setSelected([]);
}
draggingRef.current = [];
_dragging = false;
});
return () => {
startSubscription.unsubscribe();
dragSubscription.unsubscribe();
endSubscription.unsubscribe();
};
},[dragObservables, cards]);
useEffect(() => {
let moveIndex = getCardIndex(draggingLeft + (cardOverlapRef.current / 2));
if (draggingRef.current.length === 0 || moveIndex === order.findIndex(id => id === draggingRef.current[0])) return;
let newOrder = order.filter(id => !draggingRef.current.includes(id));
newOrder.splice(moveIndex, 0, ...draggingRef.current);
setOrder(newOrder);
}, [draggingLeft]);
function getMouseX(e) {
const element = e.currentTarget;
const pageX = e.touches && e.touches.length
? e.touches[0].pageX
: e.pageX;
const x = pageX - element.getBoundingClientRect().left;
return x;
}
function getCardIndex(x) {
return Math.max(0, Math.min(Math.floor(x / cardOverlapRef.current), cards.length - 1));
}
function clampScroll(scroll) {
const minScroll = playerRef.current
? playerRef.current.parentElement.offsetWidth - (cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current))
: -Infinity;
return Math.min(0, Math.max(minScroll, scroll));
}
function selectCard(card) {
// make all jokers two of spades (until select feature done)
// const joker = { rank: 'two', suit: 'spades' };
// const playedCard = {
// ...joker,
// ...card,
// is_joker: card.type === 'joker'
// };
const filteredSelect = selected.filter(selectedCard => selectedCard.id !== card.id);
// if card wasn't already selected, select it!
if (filteredSelect.length === selected.length) {
// joker select
if (card.type === 'joker') {
setJokerSelect(card);
return;
}
setSelected([...selected, {...card, is_joker: false}]);
} else {
setSelected(filteredSelect);
}
}
if (jokerSelect) {
const ranks = [
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'jack',
'queen',
'king',
'ace'
];
const suits = [
'clubs',
'hearts',
'diamonds',
'spades'
];
return (
<div
className={css.player}
>
<select
onChange={({target}) => setJokerValue(value => ({...value, rank: target.value}))}
value={jokerValue.rank}
>
{ranks.map(rank => <option key={rank} value={rank}>{rank}</option>)}
</select>
<select
onChange={({target}) => setJokerValue(value => ({...value, suit: target.value}))}
value={jokerValue.suit}
>
{suits.map(suit => <option key={suit} value={suit}>{suit}</option>)}
</select>
<button onClick={() => { setSelected([...selected, {id: jokerSelect.id, ...jokerValue, is_joker: true}]); setJokerSelect(undefined); }}>
OK
</button>
</div>
);
}
return (
<div
ref={playerRef}
className={`${css.player} ${draggingRef.current.length ? css.reordering : ''}`}
style={{
width: (cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current)) + 'px',
transform: `translate(${scroll}px)`,
}}
>
{
cards.map((card, index) => {
const position = order.findIndex(key => key === card.id);
const draggingPosition = draggingRef.current.findIndex(key => key === card.id);
const cardDragging = draggingRef.current.includes(card.id);
const left = cardDragging ? draggingLeft + (draggingPosition * cardOverlapRef.current) : position * cardOverlapRef.current;
return (
<Card
key={card.id}
card={card}
selected={selectedRef.current.includes(card.id)}
dragging={cardDragging}
reordering={draggingRef.current.length}
style={{zIndex: position, left: left + 'px'}}
onClick={() => selectCard(card)}
/>
);
})
}
</div>
);
}
export default Player; |
const [ selected, setSelected ] = useState([]);
const [ order, setOrder ] = useState(cards.map(card => card.id)); | random_line_split |
Player.js | import React, { useState, useEffect, useRef, useLayoutEffect } from 'react';
import { useDragObservable } from '../../hooks/eventHooks';
import Card from '../card/Card';
import css from './Player.sass';
const Player = ({ cards, onSelect }) => {
const [ selected, setSelected ] = useState([]);
const [ order, setOrder ] = useState(cards.map(card => card.id));
const [ draggingLeft, setDraggingLeft ] = useState(0);
const [ jokerSelect, setJokerSelect ] = useState(undefined);
const [ jokerValue, setJokerValue ] = useState({rank: 'two', suit: 'spades'});
const [ cardWidth, setCardWidth ] = useState(0);
const playerWidth = useRef(0);
const draggingRef = useRef([]);
const selectedRef = useRef([]);
// Need the ref to be able to get and set current value in effect
// Need state to make react rerender (changing ref doesn't trigger rerender)
const scrollRef = useRef(0);
const [ scroll, setScroll] = useState(0);
const cardOverlapRef = useRef(30);
const [ cardOverlap, setCardOverlap ] = useState(30);
const playerRef = useRef(null);
const dragObservables = useDragObservable(playerRef.current);
useEffect(() => {
const width = parseInt(getComputedStyle(document.documentElement).getPropertyValue('--card-width'), 10);
setCardWidth(width);
cardOverlapRef.current = (document.querySelector('#container').offsetWidth - width) / cards.length;
setCardOverlap(cardOverlapRef.current);
playerWidth.current = cards.length * cardOverlapRef.current + (width - cardOverlapRef.current);
}, []);
useEffect(() => {
setSelected([]);
setOrder(order.filter(order => cards.some(card => card.id === order)));
}, [cards]);
// Selected Card Callback
useEffect(() => {
if (onSelect) {
onSelect(selected);
}
selectedRef.current = selected.map(card => card.id);
}, [selected]);
// Reorder stuff
useEffect(() => {
if (!dragObservables) return;
const { start$, drag$, end$ } = dragObservables;
let _dragging = false;
let _startX = 0;
let _lastX = 0;
let _lastTouches = [];
const getOffset = e => {
return e.targetTouches && e.targetTouches.length
? e.targetTouches[0].pageX - e.target.getBoundingClientRect().left
: e.offsetX;
}
const updateScroll = scrolled => {
scrollRef.current = clampScroll(scrollRef.current + scrolled);
setScroll(scrollRef.current);
}
const getGesture = (touches, lastTouches) => {
if (touches.length > 1 && lastTouches.length > 1) {
let point1 = -1;
let point2 = -1;
for (let i = 0; i < lastTouches.length; i++) {
if (lastTouches[i].identifier === touches[0].identifier) { point1 = i; }
if (lastTouches[i].identifier === touches[1].identifier) |
}
if (point1 >= 0 && point2 >= 0) {
const touchOneChange = touches[0].pageX - lastTouches[point1].pageX;
const touchTwoChange = touches[1].pageX - lastTouches[point2].pageX;
// check if swipe
if (touchOneChange * touchTwoChange > 0) {
return { type: "swipe", value: touches[0].pageX - lastTouches[point1].pageX };
}
const diff1 = Math.abs(touches[1].pageX - touches[0].pageX);
const diff2 = Math.abs(lastTouches[point2].pageX - lastTouches[point1].pageX);
if (Math.abs(diff1 - diff2) > 1 && diff1 !== diff2) {
return { type: "pinch", value: (diff1 - diff2) / 7 };
}
return { type: "none" };
}
}
}
const startSubscription = start$.subscribe(start => {
if (start.touches && start.touches.length > 1) {
start.preventDefault();
_lastTouches = start.touches;
return;
}
_startX = getOffset(start);
_lastX = 0;
});
const dragSubscription = drag$.subscribe(move => {
if (move.touches && move.touches.length > 1) {
move.preventDefault();
const gesture = getGesture(move.touches, _lastTouches);
switch (gesture.type) {
case "swipe":
updateScroll(gesture.value);
break;
case "pinch":
cardOverlapRef.current = Math.max((document.querySelector('#container').offsetWidth - cardWidth) / cards.length, Math.min(cardWidth, cardOverlapRef.current + gesture.value));
playerWidth.current = cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current);
setCardOverlap(cardOverlapRef.current);
updateScroll(0);
default:
break;
}
_lastTouches = [...move.touches];
return;
}
if (!_dragging) {
if (selectedRef.current.length) {
draggingRef.current = selectedRef.current;
_dragging = true;
} else {
updateScroll(getOffset(move) - _startX);
}
} else {
const mouseX = getMouseX(move);
const cardLeft = Math.min(
playerWidth.current - (draggingRef.current.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current)),
Math.max(
0,
mouseX - _startX
)
);
setDraggingLeft(cardLeft);
// Scroll when dragging card to edges
const rightThreshold = playerRef.current.offsetWidth - scrollRef.current - (cardWidth * 1.25);
const leftThreshold = -scrollRef.current + (cardWidth / 2);
if (_lastX && cardLeft - _lastX > 0 && cardLeft > rightThreshold) {
updateScroll(Math.floor(rightThreshold - cardLeft));
} else if (_lastX && cardLeft - _lastX < 0 && cardLeft < leftThreshold) {
updateScroll(Math.floor(leftThreshold - cardLeft));
}
_lastX = cardLeft;
}
});
const endSubscription = end$.subscribe(end => {
if (_dragging) {
setSelected([]);
}
draggingRef.current = [];
_dragging = false;
});
return () => {
startSubscription.unsubscribe();
dragSubscription.unsubscribe();
endSubscription.unsubscribe();
};
},[dragObservables, cards]);
useEffect(() => {
let moveIndex = getCardIndex(draggingLeft + (cardOverlapRef.current / 2));
if (draggingRef.current.length === 0 || moveIndex === order.findIndex(id => id === draggingRef.current[0])) return;
let newOrder = order.filter(id => !draggingRef.current.includes(id));
newOrder.splice(moveIndex, 0, ...draggingRef.current);
setOrder(newOrder);
}, [draggingLeft]);
function getMouseX(e) {
const element = e.currentTarget;
const pageX = e.touches && e.touches.length
? e.touches[0].pageX
: e.pageX;
const x = pageX - element.getBoundingClientRect().left;
return x;
}
function getCardIndex(x) {
return Math.max(0, Math.min(Math.floor(x / cardOverlapRef.current), cards.length - 1));
}
function clampScroll(scroll) {
const minScroll = playerRef.current
? playerRef.current.parentElement.offsetWidth - (cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current))
: -Infinity;
return Math.min(0, Math.max(minScroll, scroll));
}
function selectCard(card) {
// make all jokers two of spades (until select feature done)
// const joker = { rank: 'two', suit: 'spades' };
// const playedCard = {
// ...joker,
// ...card,
// is_joker: card.type === 'joker'
// };
const filteredSelect = selected.filter(selectedCard => selectedCard.id !== card.id);
// if card wasn't already selected, select it!
if (filteredSelect.length === selected.length) {
// joker select
if (card.type === 'joker') {
setJokerSelect(card);
return;
}
setSelected([...selected, {...card, is_joker: false}]);
} else {
setSelected(filteredSelect);
}
}
if (jokerSelect) {
const ranks = [
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'jack',
'queen',
'king',
'ace'
];
const suits = [
'clubs',
'hearts',
'diamonds',
'spades'
];
return (
<div
className={css.player}
>
<select
onChange={({target}) => setJokerValue(value => ({...value, rank: target.value}))}
value={jokerValue.rank}
>
{ranks.map(rank => <option key={rank} value={rank}>{rank}</option>)}
</select>
<select
onChange={({target}) => setJokerValue(value => ({...value, suit: target.value}))}
value={jokerValue.suit}
>
{suits.map(suit => <option key={suit} value={suit}>{suit}</option>)}
</select>
<button onClick={() => { setSelected([...selected, {id: jokerSelect.id, ...jokerValue, is_joker: true}]); setJokerSelect(undefined); }}>
OK
</button>
</div>
);
}
return (
<div
ref={playerRef}
className={`${css.player} ${draggingRef.current.length ? css.reordering : ''}`}
style={{
width: (cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current)) + 'px',
transform: `translate(${scroll}px)`,
}}
>
{
cards.map((card, index) => {
const position = order.findIndex(key => key === card.id);
const draggingPosition = draggingRef.current.findIndex(key => key === card.id);
const cardDragging = draggingRef.current.includes(card.id);
const left = cardDragging ? draggingLeft + (draggingPosition * cardOverlapRef.current) : position * cardOverlapRef.current;
return (
<Card
key={card.id}
card={card}
selected={selectedRef.current.includes(card.id)}
dragging={cardDragging}
reordering={draggingRef.current.length}
style={{zIndex: position, left: left + 'px'}}
onClick={() => selectCard(card)}
/>
);
})
}
</div>
);
}
export default Player;
| { point2 = i; } | conditional_block |
Player.js | import React, { useState, useEffect, useRef, useLayoutEffect } from 'react';
import { useDragObservable } from '../../hooks/eventHooks';
import Card from '../card/Card';
import css from './Player.sass';
const Player = ({ cards, onSelect }) => {
const [ selected, setSelected ] = useState([]);
const [ order, setOrder ] = useState(cards.map(card => card.id));
const [ draggingLeft, setDraggingLeft ] = useState(0);
const [ jokerSelect, setJokerSelect ] = useState(undefined);
const [ jokerValue, setJokerValue ] = useState({rank: 'two', suit: 'spades'});
const [ cardWidth, setCardWidth ] = useState(0);
const playerWidth = useRef(0);
const draggingRef = useRef([]);
const selectedRef = useRef([]);
// Need the ref to be able to get and set current value in effect
// Need state to make react rerender (changing ref doesn't trigger rerender)
const scrollRef = useRef(0);
const [ scroll, setScroll] = useState(0);
const cardOverlapRef = useRef(30);
const [ cardOverlap, setCardOverlap ] = useState(30);
const playerRef = useRef(null);
const dragObservables = useDragObservable(playerRef.current);
useEffect(() => {
const width = parseInt(getComputedStyle(document.documentElement).getPropertyValue('--card-width'), 10);
setCardWidth(width);
cardOverlapRef.current = (document.querySelector('#container').offsetWidth - width) / cards.length;
setCardOverlap(cardOverlapRef.current);
playerWidth.current = cards.length * cardOverlapRef.current + (width - cardOverlapRef.current);
}, []);
useEffect(() => {
setSelected([]);
setOrder(order.filter(order => cards.some(card => card.id === order)));
}, [cards]);
// Selected Card Callback
useEffect(() => {
if (onSelect) {
onSelect(selected);
}
selectedRef.current = selected.map(card => card.id);
}, [selected]);
// Reorder stuff
useEffect(() => {
if (!dragObservables) return;
const { start$, drag$, end$ } = dragObservables;
let _dragging = false;
let _startX = 0;
let _lastX = 0;
let _lastTouches = [];
const getOffset = e => {
return e.targetTouches && e.targetTouches.length
? e.targetTouches[0].pageX - e.target.getBoundingClientRect().left
: e.offsetX;
}
const updateScroll = scrolled => {
scrollRef.current = clampScroll(scrollRef.current + scrolled);
setScroll(scrollRef.current);
}
const getGesture = (touches, lastTouches) => {
if (touches.length > 1 && lastTouches.length > 1) {
let point1 = -1;
let point2 = -1;
for (let i = 0; i < lastTouches.length; i++) {
if (lastTouches[i].identifier === touches[0].identifier) { point1 = i; }
if (lastTouches[i].identifier === touches[1].identifier) { point2 = i; }
}
if (point1 >= 0 && point2 >= 0) {
const touchOneChange = touches[0].pageX - lastTouches[point1].pageX;
const touchTwoChange = touches[1].pageX - lastTouches[point2].pageX;
// check if swipe
if (touchOneChange * touchTwoChange > 0) {
return { type: "swipe", value: touches[0].pageX - lastTouches[point1].pageX };
}
const diff1 = Math.abs(touches[1].pageX - touches[0].pageX);
const diff2 = Math.abs(lastTouches[point2].pageX - lastTouches[point1].pageX);
if (Math.abs(diff1 - diff2) > 1 && diff1 !== diff2) {
return { type: "pinch", value: (diff1 - diff2) / 7 };
}
return { type: "none" };
}
}
}
const startSubscription = start$.subscribe(start => {
if (start.touches && start.touches.length > 1) {
start.preventDefault();
_lastTouches = start.touches;
return;
}
_startX = getOffset(start);
_lastX = 0;
});
const dragSubscription = drag$.subscribe(move => {
if (move.touches && move.touches.length > 1) {
move.preventDefault();
const gesture = getGesture(move.touches, _lastTouches);
switch (gesture.type) {
case "swipe":
updateScroll(gesture.value);
break;
case "pinch":
cardOverlapRef.current = Math.max((document.querySelector('#container').offsetWidth - cardWidth) / cards.length, Math.min(cardWidth, cardOverlapRef.current + gesture.value));
playerWidth.current = cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current);
setCardOverlap(cardOverlapRef.current);
updateScroll(0);
default:
break;
}
_lastTouches = [...move.touches];
return;
}
if (!_dragging) {
if (selectedRef.current.length) {
draggingRef.current = selectedRef.current;
_dragging = true;
} else {
updateScroll(getOffset(move) - _startX);
}
} else {
const mouseX = getMouseX(move);
const cardLeft = Math.min(
playerWidth.current - (draggingRef.current.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current)),
Math.max(
0,
mouseX - _startX
)
);
setDraggingLeft(cardLeft);
// Scroll when dragging card to edges
const rightThreshold = playerRef.current.offsetWidth - scrollRef.current - (cardWidth * 1.25);
const leftThreshold = -scrollRef.current + (cardWidth / 2);
if (_lastX && cardLeft - _lastX > 0 && cardLeft > rightThreshold) {
updateScroll(Math.floor(rightThreshold - cardLeft));
} else if (_lastX && cardLeft - _lastX < 0 && cardLeft < leftThreshold) {
updateScroll(Math.floor(leftThreshold - cardLeft));
}
_lastX = cardLeft;
}
});
const endSubscription = end$.subscribe(end => {
if (_dragging) {
setSelected([]);
}
draggingRef.current = [];
_dragging = false;
});
return () => {
startSubscription.unsubscribe();
dragSubscription.unsubscribe();
endSubscription.unsubscribe();
};
},[dragObservables, cards]);
useEffect(() => {
let moveIndex = getCardIndex(draggingLeft + (cardOverlapRef.current / 2));
if (draggingRef.current.length === 0 || moveIndex === order.findIndex(id => id === draggingRef.current[0])) return;
let newOrder = order.filter(id => !draggingRef.current.includes(id));
newOrder.splice(moveIndex, 0, ...draggingRef.current);
setOrder(newOrder);
}, [draggingLeft]);
function getMouseX(e) {
const element = e.currentTarget;
const pageX = e.touches && e.touches.length
? e.touches[0].pageX
: e.pageX;
const x = pageX - element.getBoundingClientRect().left;
return x;
}
function getCardIndex(x) {
return Math.max(0, Math.min(Math.floor(x / cardOverlapRef.current), cards.length - 1));
}
function clampScroll(scroll) |
function selectCard(card) {
// make all jokers two of spades (until select feature done)
// const joker = { rank: 'two', suit: 'spades' };
// const playedCard = {
// ...joker,
// ...card,
// is_joker: card.type === 'joker'
// };
const filteredSelect = selected.filter(selectedCard => selectedCard.id !== card.id);
// if card wasn't already selected, select it!
if (filteredSelect.length === selected.length) {
// joker select
if (card.type === 'joker') {
setJokerSelect(card);
return;
}
setSelected([...selected, {...card, is_joker: false}]);
} else {
setSelected(filteredSelect);
}
}
if (jokerSelect) {
const ranks = [
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'jack',
'queen',
'king',
'ace'
];
const suits = [
'clubs',
'hearts',
'diamonds',
'spades'
];
return (
<div
className={css.player}
>
<select
onChange={({target}) => setJokerValue(value => ({...value, rank: target.value}))}
value={jokerValue.rank}
>
{ranks.map(rank => <option key={rank} value={rank}>{rank}</option>)}
</select>
<select
onChange={({target}) => setJokerValue(value => ({...value, suit: target.value}))}
value={jokerValue.suit}
>
{suits.map(suit => <option key={suit} value={suit}>{suit}</option>)}
</select>
<button onClick={() => { setSelected([...selected, {id: jokerSelect.id, ...jokerValue, is_joker: true}]); setJokerSelect(undefined); }}>
OK
</button>
</div>
);
}
return (
<div
ref={playerRef}
className={`${css.player} ${draggingRef.current.length ? css.reordering : ''}`}
style={{
width: (cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current)) + 'px',
transform: `translate(${scroll}px)`,
}}
>
{
cards.map((card, index) => {
const position = order.findIndex(key => key === card.id);
const draggingPosition = draggingRef.current.findIndex(key => key === card.id);
const cardDragging = draggingRef.current.includes(card.id);
const left = cardDragging ? draggingLeft + (draggingPosition * cardOverlapRef.current) : position * cardOverlapRef.current;
return (
<Card
key={card.id}
card={card}
selected={selectedRef.current.includes(card.id)}
dragging={cardDragging}
reordering={draggingRef.current.length}
style={{zIndex: position, left: left + 'px'}}
onClick={() => selectCard(card)}
/>
);
})
}
</div>
);
}
export default Player;
| {
const minScroll = playerRef.current
? playerRef.current.parentElement.offsetWidth - (cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current))
: -Infinity;
return Math.min(0, Math.max(minScroll, scroll));
} | identifier_body |
Player.js | import React, { useState, useEffect, useRef, useLayoutEffect } from 'react';
import { useDragObservable } from '../../hooks/eventHooks';
import Card from '../card/Card';
import css from './Player.sass';
const Player = ({ cards, onSelect }) => {
const [ selected, setSelected ] = useState([]);
const [ order, setOrder ] = useState(cards.map(card => card.id));
const [ draggingLeft, setDraggingLeft ] = useState(0);
const [ jokerSelect, setJokerSelect ] = useState(undefined);
const [ jokerValue, setJokerValue ] = useState({rank: 'two', suit: 'spades'});
const [ cardWidth, setCardWidth ] = useState(0);
const playerWidth = useRef(0);
const draggingRef = useRef([]);
const selectedRef = useRef([]);
// Need the ref to be able to get and set current value in effect
// Need state to make react rerender (changing ref doesn't trigger rerender)
const scrollRef = useRef(0);
const [ scroll, setScroll] = useState(0);
const cardOverlapRef = useRef(30);
const [ cardOverlap, setCardOverlap ] = useState(30);
const playerRef = useRef(null);
const dragObservables = useDragObservable(playerRef.current);
useEffect(() => {
const width = parseInt(getComputedStyle(document.documentElement).getPropertyValue('--card-width'), 10);
setCardWidth(width);
cardOverlapRef.current = (document.querySelector('#container').offsetWidth - width) / cards.length;
setCardOverlap(cardOverlapRef.current);
playerWidth.current = cards.length * cardOverlapRef.current + (width - cardOverlapRef.current);
}, []);
useEffect(() => {
setSelected([]);
setOrder(order.filter(order => cards.some(card => card.id === order)));
}, [cards]);
// Selected Card Callback
useEffect(() => {
if (onSelect) {
onSelect(selected);
}
selectedRef.current = selected.map(card => card.id);
}, [selected]);
// Reorder stuff
useEffect(() => {
if (!dragObservables) return;
const { start$, drag$, end$ } = dragObservables;
let _dragging = false;
let _startX = 0;
let _lastX = 0;
let _lastTouches = [];
const getOffset = e => {
return e.targetTouches && e.targetTouches.length
? e.targetTouches[0].pageX - e.target.getBoundingClientRect().left
: e.offsetX;
}
const updateScroll = scrolled => {
scrollRef.current = clampScroll(scrollRef.current + scrolled);
setScroll(scrollRef.current);
}
const getGesture = (touches, lastTouches) => {
if (touches.length > 1 && lastTouches.length > 1) {
let point1 = -1;
let point2 = -1;
for (let i = 0; i < lastTouches.length; i++) {
if (lastTouches[i].identifier === touches[0].identifier) { point1 = i; }
if (lastTouches[i].identifier === touches[1].identifier) { point2 = i; }
}
if (point1 >= 0 && point2 >= 0) {
const touchOneChange = touches[0].pageX - lastTouches[point1].pageX;
const touchTwoChange = touches[1].pageX - lastTouches[point2].pageX;
// check if swipe
if (touchOneChange * touchTwoChange > 0) {
return { type: "swipe", value: touches[0].pageX - lastTouches[point1].pageX };
}
const diff1 = Math.abs(touches[1].pageX - touches[0].pageX);
const diff2 = Math.abs(lastTouches[point2].pageX - lastTouches[point1].pageX);
if (Math.abs(diff1 - diff2) > 1 && diff1 !== diff2) {
return { type: "pinch", value: (diff1 - diff2) / 7 };
}
return { type: "none" };
}
}
}
const startSubscription = start$.subscribe(start => {
if (start.touches && start.touches.length > 1) {
start.preventDefault();
_lastTouches = start.touches;
return;
}
_startX = getOffset(start);
_lastX = 0;
});
const dragSubscription = drag$.subscribe(move => {
if (move.touches && move.touches.length > 1) {
move.preventDefault();
const gesture = getGesture(move.touches, _lastTouches);
switch (gesture.type) {
case "swipe":
updateScroll(gesture.value);
break;
case "pinch":
cardOverlapRef.current = Math.max((document.querySelector('#container').offsetWidth - cardWidth) / cards.length, Math.min(cardWidth, cardOverlapRef.current + gesture.value));
playerWidth.current = cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current);
setCardOverlap(cardOverlapRef.current);
updateScroll(0);
default:
break;
}
_lastTouches = [...move.touches];
return;
}
if (!_dragging) {
if (selectedRef.current.length) {
draggingRef.current = selectedRef.current;
_dragging = true;
} else {
updateScroll(getOffset(move) - _startX);
}
} else {
const mouseX = getMouseX(move);
const cardLeft = Math.min(
playerWidth.current - (draggingRef.current.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current)),
Math.max(
0,
mouseX - _startX
)
);
setDraggingLeft(cardLeft);
// Scroll when dragging card to edges
const rightThreshold = playerRef.current.offsetWidth - scrollRef.current - (cardWidth * 1.25);
const leftThreshold = -scrollRef.current + (cardWidth / 2);
if (_lastX && cardLeft - _lastX > 0 && cardLeft > rightThreshold) {
updateScroll(Math.floor(rightThreshold - cardLeft));
} else if (_lastX && cardLeft - _lastX < 0 && cardLeft < leftThreshold) {
updateScroll(Math.floor(leftThreshold - cardLeft));
}
_lastX = cardLeft;
}
});
const endSubscription = end$.subscribe(end => {
if (_dragging) {
setSelected([]);
}
draggingRef.current = [];
_dragging = false;
});
return () => {
startSubscription.unsubscribe();
dragSubscription.unsubscribe();
endSubscription.unsubscribe();
};
},[dragObservables, cards]);
useEffect(() => {
let moveIndex = getCardIndex(draggingLeft + (cardOverlapRef.current / 2));
if (draggingRef.current.length === 0 || moveIndex === order.findIndex(id => id === draggingRef.current[0])) return;
let newOrder = order.filter(id => !draggingRef.current.includes(id));
newOrder.splice(moveIndex, 0, ...draggingRef.current);
setOrder(newOrder);
}, [draggingLeft]);
function getMouseX(e) {
const element = e.currentTarget;
const pageX = e.touches && e.touches.length
? e.touches[0].pageX
: e.pageX;
const x = pageX - element.getBoundingClientRect().left;
return x;
}
function getCardIndex(x) {
return Math.max(0, Math.min(Math.floor(x / cardOverlapRef.current), cards.length - 1));
}
function clampScroll(scroll) {
const minScroll = playerRef.current
? playerRef.current.parentElement.offsetWidth - (cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current))
: -Infinity;
return Math.min(0, Math.max(minScroll, scroll));
}
function | (card) {
// make all jokers two of spades (until select feature done)
// const joker = { rank: 'two', suit: 'spades' };
// const playedCard = {
// ...joker,
// ...card,
// is_joker: card.type === 'joker'
// };
const filteredSelect = selected.filter(selectedCard => selectedCard.id !== card.id);
// if card wasn't already selected, select it!
if (filteredSelect.length === selected.length) {
// joker select
if (card.type === 'joker') {
setJokerSelect(card);
return;
}
setSelected([...selected, {...card, is_joker: false}]);
} else {
setSelected(filteredSelect);
}
}
if (jokerSelect) {
const ranks = [
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'jack',
'queen',
'king',
'ace'
];
const suits = [
'clubs',
'hearts',
'diamonds',
'spades'
];
return (
<div
className={css.player}
>
<select
onChange={({target}) => setJokerValue(value => ({...value, rank: target.value}))}
value={jokerValue.rank}
>
{ranks.map(rank => <option key={rank} value={rank}>{rank}</option>)}
</select>
<select
onChange={({target}) => setJokerValue(value => ({...value, suit: target.value}))}
value={jokerValue.suit}
>
{suits.map(suit => <option key={suit} value={suit}>{suit}</option>)}
</select>
<button onClick={() => { setSelected([...selected, {id: jokerSelect.id, ...jokerValue, is_joker: true}]); setJokerSelect(undefined); }}>
OK
</button>
</div>
);
}
return (
<div
ref={playerRef}
className={`${css.player} ${draggingRef.current.length ? css.reordering : ''}`}
style={{
width: (cards.length * cardOverlapRef.current + (cardWidth - cardOverlapRef.current)) + 'px',
transform: `translate(${scroll}px)`,
}}
>
{
cards.map((card, index) => {
const position = order.findIndex(key => key === card.id);
const draggingPosition = draggingRef.current.findIndex(key => key === card.id);
const cardDragging = draggingRef.current.includes(card.id);
const left = cardDragging ? draggingLeft + (draggingPosition * cardOverlapRef.current) : position * cardOverlapRef.current;
return (
<Card
key={card.id}
card={card}
selected={selectedRef.current.includes(card.id)}
dragging={cardDragging}
reordering={draggingRef.current.length}
style={{zIndex: position, left: left + 'px'}}
onClick={() => selectCard(card)}
/>
);
})
}
</div>
);
}
export default Player;
| selectCard | identifier_name |
wd.rs | //! WebDriver types and declarations.
use crate::error;
#[cfg(doc)]
use crate::Client;
use http::Method;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use std::convert::TryFrom;
use std::fmt;
use std::fmt::Debug;
use std::time::Duration;
use url::{ParseError, Url};
use webdriver::command::TimeoutsParameters;
/// A command that can be sent to the WebDriver.
///
/// Anything that implements this command can be sent to [`Client::issue_cmd()`] in order
/// to send custom commands to the WebDriver instance.
pub trait WebDriverCompatibleCommand: Debug {
/// The endpoint to send the request to.
fn endpoint(
&self,
base_url: &url::Url,
session_id: Option<&str>,
) -> Result<url::Url, url::ParseError>;
/// The HTTP request method to use, and the request body for the request.
///
/// The `url` will be the one returned from the `endpoint()` method above.
fn method_and_body(&self, request_url: &url::Url) -> (http::Method, Option<String>);
/// Return true if this command starts a new WebDriver session.
fn is_new_session(&self) -> bool {
false | }
/// Return true if this session should only support the legacy webdriver protocol.
///
/// This only applies to the obsolete JSON Wire Protocol and should return `false`
/// for all implementations that follow the W3C specification.
///
/// See <https://www.selenium.dev/documentation/legacy/json_wire_protocol/> for more
/// details about JSON Wire Protocol.
fn is_legacy(&self) -> bool {
false
}
}
/// Blanket implementation for &T, for better ergonomics.
impl<T> WebDriverCompatibleCommand for &T
where
T: WebDriverCompatibleCommand,
{
fn endpoint(&self, base_url: &Url, session_id: Option<&str>) -> Result<Url, ParseError> {
T::endpoint(self, base_url, session_id)
}
fn method_and_body(&self, request_url: &Url) -> (Method, Option<String>) {
T::method_and_body(self, request_url)
}
fn is_new_session(&self) -> bool {
T::is_new_session(self)
}
fn is_legacy(&self) -> bool {
T::is_legacy(self)
}
}
/// Blanket implementation for Box<T>, for better ergonomics.
impl<T> WebDriverCompatibleCommand for Box<T>
where
T: WebDriverCompatibleCommand,
{
fn endpoint(&self, base_url: &Url, session_id: Option<&str>) -> Result<Url, ParseError> {
T::endpoint(self, base_url, session_id)
}
fn method_and_body(&self, request_url: &Url) -> (Method, Option<String>) {
T::method_and_body(self, request_url)
}
fn is_new_session(&self) -> bool {
T::is_new_session(self)
}
fn is_legacy(&self) -> bool {
T::is_legacy(self)
}
}
/// A [handle][1] to a browser window.
///
/// Should be obtained it via [`Client::window()`] method (or similar).
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct WindowHandle(String);
impl From<WindowHandle> for String {
fn from(w: WindowHandle) -> Self {
w.0
}
}
impl<'a> TryFrom<Cow<'a, str>> for WindowHandle {
type Error = error::InvalidWindowHandle;
/// Makes the given string a [`WindowHandle`].
///
/// Avoids allocation if possible.
///
/// # Errors
///
/// If the given string is [`"current"`][1].
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles
fn try_from(s: Cow<'a, str>) -> Result<Self, Self::Error> {
if s != "current" {
Ok(Self(s.into_owned()))
} else {
Err(error::InvalidWindowHandle)
}
}
}
impl TryFrom<String> for WindowHandle {
type Error = error::InvalidWindowHandle;
/// Makes the given [`String`] a [`WindowHandle`].
///
/// # Errors
///
/// If the given [`String`] is [`"current"`][1].
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles
fn try_from(s: String) -> Result<Self, Self::Error> {
Self::try_from(Cow::Owned(s))
}
}
impl TryFrom<&str> for WindowHandle {
type Error = error::InvalidWindowHandle;
/// Makes the given string a [`WindowHandle`].
///
/// Allocates if succeeds.
///
/// # Errors
///
/// If the given string is [`"current"`][1].
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles
fn try_from(s: &str) -> Result<Self, Self::Error> {
Self::try_from(Cow::Borrowed(s))
}
}
/// A type of a new browser window.
///
/// Returned by [`Client::new_window()`] method.
///
/// [`Client::new_window()`]: crate::Client::new_window
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum NewWindowType {
/// Opened in a tab.
Tab,
/// Opened in a separate window.
Window,
}
impl fmt::Display for NewWindowType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Tab => write!(f, "tab"),
Self::Window => write!(f, "window"),
}
}
}
/// Dynamic set of [WebDriver capabilities][1].
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-capability
pub type Capabilities = serde_json::Map<String, serde_json::Value>;
/// An element locator.
///
/// See [the specification][1] for more details.
///
/// [1]: https://www.w3.org/TR/webdriver1/#locator-strategies
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
pub enum Locator<'a> {
/// Find an element matching the given [CSS selector][1].
///
/// [1]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors
Css(&'a str),
/// Find an element using the given [`id`][1].
///
/// [1]: https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/id
Id(&'a str),
/// Find a link element with the given link text.
///
/// The text matching is exact.
LinkText(&'a str),
/// Find an element using the given [XPath expression][1].
///
/// You can address pretty much any element this way, if you're willing to
/// put in the time to find the right XPath.
///
/// [1]: https://developer.mozilla.org/en-US/docs/Web/XPath
XPath(&'a str),
}
impl<'a> Locator<'a> {
pub(crate) fn into_parameters(self) -> webdriver::command::LocatorParameters {
use webdriver::command::LocatorParameters;
use webdriver::common::LocatorStrategy;
match self {
Locator::Css(s) => LocatorParameters {
using: LocatorStrategy::CSSSelector,
value: s.to_string(),
},
Locator::Id(s) => LocatorParameters {
using: LocatorStrategy::XPath,
value: format!("//*[@id=\"{}\"]", s),
},
Locator::XPath(s) => LocatorParameters {
using: LocatorStrategy::XPath,
value: s.to_string(),
},
Locator::LinkText(s) => LocatorParameters {
using: LocatorStrategy::LinkText,
value: s.to_string(),
},
}
}
}
/// The WebDriver status as returned by [`Client::status()`].
///
/// See [8.3 Status](https://www.w3.org/TR/webdriver1/#status) of the WebDriver standard.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WebDriverStatus {
/// True if the webdriver is ready to start a new session.
///
/// NOTE: Geckodriver will return `false` if a session has already started, since it
/// only supports a single session.
pub ready: bool,
/// The current status message.
pub message: String,
}
/// Timeout configuration, for various timeout settings.
///
/// Used by [`Client::get_timeouts()`] and [`Client::update_timeouts()`].
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct TimeoutConfiguration {
#[serde(skip_serializing_if = "Option::is_none")]
script: Option<u64>,
#[serde(rename = "pageLoad", skip_serializing_if = "Option::is_none")]
page_load: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
implicit: Option<u64>,
}
impl Default for TimeoutConfiguration {
fn default() -> Self {
TimeoutConfiguration::new(
Some(Duration::from_secs(60)),
Some(Duration::from_secs(60)),
Some(Duration::from_secs(0)),
)
}
}
impl TimeoutConfiguration {
/// Create new timeout configuration.
///
/// The various settings are as follows:
/// - script Determines when to interrupt a script that is being evaluated.
/// Default is 60 seconds.
/// - page_load Provides the timeout limit used to interrupt navigation of the browsing
/// context. Default is 60 seconds.
/// - implicit Gives the timeout of when to abort locating an element. Default is 0 seconds.
///
/// NOTE: It is recommended to leave the `implicit` timeout at 0 seconds, because that makes
/// it possible to check for the non-existence of an element without an implicit delay.
/// Also see [`Client::wait()`] for element polling functionality.
pub fn new(
script: Option<Duration>,
page_load: Option<Duration>,
implicit: Option<Duration>,
) -> Self {
TimeoutConfiguration {
script: script.map(|x| x.as_millis() as u64),
page_load: page_load.map(|x| x.as_millis() as u64),
implicit: implicit.map(|x| x.as_millis() as u64),
}
}
/// Get the script timeout.
pub fn script(&self) -> Option<Duration> {
self.script.map(Duration::from_millis)
}
/// Set the script timeout.
pub fn set_script(&mut self, timeout: Option<Duration>) {
self.script = timeout.map(|x| x.as_millis() as u64);
}
/// Get the page load timeout.
pub fn page_load(&self) -> Option<Duration> {
self.page_load.map(Duration::from_millis)
}
/// Set the page load timeout.
pub fn set_page_load(&mut self, timeout: Option<Duration>) {
self.page_load = timeout.map(|x| x.as_millis() as u64);
}
/// Get the implicit wait timeout.
pub fn implicit(&self) -> Option<Duration> {
self.implicit.map(Duration::from_millis)
}
/// Set the implicit wait timeout.
pub fn set_implicit(&mut self, timeout: Option<Duration>) {
self.implicit = timeout.map(|x| x.as_millis() as u64);
}
}
impl TimeoutConfiguration {
pub(crate) fn into_params(self) -> TimeoutsParameters {
TimeoutsParameters {
script: self.script.map(Some),
page_load: self.page_load,
implicit: self.implicit,
}
}
} | random_line_split | |
wd.rs | //! WebDriver types and declarations.
use crate::error;
#[cfg(doc)]
use crate::Client;
use http::Method;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use std::convert::TryFrom;
use std::fmt;
use std::fmt::Debug;
use std::time::Duration;
use url::{ParseError, Url};
use webdriver::command::TimeoutsParameters;
/// A command that can be sent to the WebDriver.
///
/// Anything that implements this command can be sent to [`Client::issue_cmd()`] in order
/// to send custom commands to the WebDriver instance.
pub trait WebDriverCompatibleCommand: Debug {
/// The endpoint to send the request to.
fn endpoint(
&self,
base_url: &url::Url,
session_id: Option<&str>,
) -> Result<url::Url, url::ParseError>;
/// The HTTP request method to use, and the request body for the request.
///
/// The `url` will be the one returned from the `endpoint()` method above.
fn method_and_body(&self, request_url: &url::Url) -> (http::Method, Option<String>);
/// Return true if this command starts a new WebDriver session.
fn is_new_session(&self) -> bool {
false
}
/// Return true if this session should only support the legacy webdriver protocol.
///
/// This only applies to the obsolete JSON Wire Protocol and should return `false`
/// for all implementations that follow the W3C specification.
///
/// See <https://www.selenium.dev/documentation/legacy/json_wire_protocol/> for more
/// details about JSON Wire Protocol.
fn is_legacy(&self) -> bool {
false
}
}
/// Blanket implementation for &T, for better ergonomics.
impl<T> WebDriverCompatibleCommand for &T
where
T: WebDriverCompatibleCommand,
{
fn endpoint(&self, base_url: &Url, session_id: Option<&str>) -> Result<Url, ParseError> {
T::endpoint(self, base_url, session_id)
}
fn method_and_body(&self, request_url: &Url) -> (Method, Option<String>) {
T::method_and_body(self, request_url)
}
fn is_new_session(&self) -> bool {
T::is_new_session(self)
}
fn is_legacy(&self) -> bool {
T::is_legacy(self)
}
}
/// Blanket implementation for Box<T>, for better ergonomics.
impl<T> WebDriverCompatibleCommand for Box<T>
where
T: WebDriverCompatibleCommand,
{
fn endpoint(&self, base_url: &Url, session_id: Option<&str>) -> Result<Url, ParseError> {
T::endpoint(self, base_url, session_id)
}
fn | (&self, request_url: &Url) -> (Method, Option<String>) {
T::method_and_body(self, request_url)
}
fn is_new_session(&self) -> bool {
T::is_new_session(self)
}
fn is_legacy(&self) -> bool {
T::is_legacy(self)
}
}
/// A [handle][1] to a browser window.
///
/// Should be obtained it via [`Client::window()`] method (or similar).
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct WindowHandle(String);
impl From<WindowHandle> for String {
fn from(w: WindowHandle) -> Self {
w.0
}
}
impl<'a> TryFrom<Cow<'a, str>> for WindowHandle {
type Error = error::InvalidWindowHandle;
/// Makes the given string a [`WindowHandle`].
///
/// Avoids allocation if possible.
///
/// # Errors
///
/// If the given string is [`"current"`][1].
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles
fn try_from(s: Cow<'a, str>) -> Result<Self, Self::Error> {
if s != "current" {
Ok(Self(s.into_owned()))
} else {
Err(error::InvalidWindowHandle)
}
}
}
impl TryFrom<String> for WindowHandle {
type Error = error::InvalidWindowHandle;
/// Makes the given [`String`] a [`WindowHandle`].
///
/// # Errors
///
/// If the given [`String`] is [`"current"`][1].
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles
fn try_from(s: String) -> Result<Self, Self::Error> {
Self::try_from(Cow::Owned(s))
}
}
impl TryFrom<&str> for WindowHandle {
type Error = error::InvalidWindowHandle;
/// Makes the given string a [`WindowHandle`].
///
/// Allocates if succeeds.
///
/// # Errors
///
/// If the given string is [`"current"`][1].
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles
fn try_from(s: &str) -> Result<Self, Self::Error> {
Self::try_from(Cow::Borrowed(s))
}
}
/// A type of a new browser window.
///
/// Returned by [`Client::new_window()`] method.
///
/// [`Client::new_window()`]: crate::Client::new_window
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum NewWindowType {
/// Opened in a tab.
Tab,
/// Opened in a separate window.
Window,
}
impl fmt::Display for NewWindowType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Tab => write!(f, "tab"),
Self::Window => write!(f, "window"),
}
}
}
/// Dynamic set of [WebDriver capabilities][1].
///
/// [1]: https://www.w3.org/TR/webdriver/#dfn-capability
pub type Capabilities = serde_json::Map<String, serde_json::Value>;
/// An element locator.
///
/// See [the specification][1] for more details.
///
/// [1]: https://www.w3.org/TR/webdriver1/#locator-strategies
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
pub enum Locator<'a> {
/// Find an element matching the given [CSS selector][1].
///
/// [1]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors
Css(&'a str),
/// Find an element using the given [`id`][1].
///
/// [1]: https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/id
Id(&'a str),
/// Find a link element with the given link text.
///
/// The text matching is exact.
LinkText(&'a str),
/// Find an element using the given [XPath expression][1].
///
/// You can address pretty much any element this way, if you're willing to
/// put in the time to find the right XPath.
///
/// [1]: https://developer.mozilla.org/en-US/docs/Web/XPath
XPath(&'a str),
}
impl<'a> Locator<'a> {
pub(crate) fn into_parameters(self) -> webdriver::command::LocatorParameters {
use webdriver::command::LocatorParameters;
use webdriver::common::LocatorStrategy;
match self {
Locator::Css(s) => LocatorParameters {
using: LocatorStrategy::CSSSelector,
value: s.to_string(),
},
Locator::Id(s) => LocatorParameters {
using: LocatorStrategy::XPath,
value: format!("//*[@id=\"{}\"]", s),
},
Locator::XPath(s) => LocatorParameters {
using: LocatorStrategy::XPath,
value: s.to_string(),
},
Locator::LinkText(s) => LocatorParameters {
using: LocatorStrategy::LinkText,
value: s.to_string(),
},
}
}
}
/// The WebDriver status as returned by [`Client::status()`].
///
/// See [8.3 Status](https://www.w3.org/TR/webdriver1/#status) of the WebDriver standard.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WebDriverStatus {
/// True if the webdriver is ready to start a new session.
///
/// NOTE: Geckodriver will return `false` if a session has already started, since it
/// only supports a single session.
pub ready: bool,
/// The current status message.
pub message: String,
}
/// Timeout configuration, for various timeout settings.
///
/// Used by [`Client::get_timeouts()`] and [`Client::update_timeouts()`].
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct TimeoutConfiguration {
#[serde(skip_serializing_if = "Option::is_none")]
script: Option<u64>,
#[serde(rename = "pageLoad", skip_serializing_if = "Option::is_none")]
page_load: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
implicit: Option<u64>,
}
impl Default for TimeoutConfiguration {
fn default() -> Self {
TimeoutConfiguration::new(
Some(Duration::from_secs(60)),
Some(Duration::from_secs(60)),
Some(Duration::from_secs(0)),
)
}
}
impl TimeoutConfiguration {
/// Create new timeout configuration.
///
/// The various settings are as follows:
/// - script Determines when to interrupt a script that is being evaluated.
/// Default is 60 seconds.
/// - page_load Provides the timeout limit used to interrupt navigation of the browsing
/// context. Default is 60 seconds.
/// - implicit Gives the timeout of when to abort locating an element. Default is 0 seconds.
///
/// NOTE: It is recommended to leave the `implicit` timeout at 0 seconds, because that makes
/// it possible to check for the non-existence of an element without an implicit delay.
/// Also see [`Client::wait()`] for element polling functionality.
pub fn new(
script: Option<Duration>,
page_load: Option<Duration>,
implicit: Option<Duration>,
) -> Self {
TimeoutConfiguration {
script: script.map(|x| x.as_millis() as u64),
page_load: page_load.map(|x| x.as_millis() as u64),
implicit: implicit.map(|x| x.as_millis() as u64),
}
}
/// Get the script timeout.
pub fn script(&self) -> Option<Duration> {
self.script.map(Duration::from_millis)
}
/// Set the script timeout.
pub fn set_script(&mut self, timeout: Option<Duration>) {
self.script = timeout.map(|x| x.as_millis() as u64);
}
/// Get the page load timeout.
pub fn page_load(&self) -> Option<Duration> {
self.page_load.map(Duration::from_millis)
}
/// Set the page load timeout.
pub fn set_page_load(&mut self, timeout: Option<Duration>) {
self.page_load = timeout.map(|x| x.as_millis() as u64);
}
/// Get the implicit wait timeout.
pub fn implicit(&self) -> Option<Duration> {
self.implicit.map(Duration::from_millis)
}
/// Set the implicit wait timeout.
pub fn set_implicit(&mut self, timeout: Option<Duration>) {
self.implicit = timeout.map(|x| x.as_millis() as u64);
}
}
impl TimeoutConfiguration {
pub(crate) fn into_params(self) -> TimeoutsParameters {
TimeoutsParameters {
script: self.script.map(Some),
page_load: self.page_load,
implicit: self.implicit,
}
}
}
| method_and_body | identifier_name |
plotting_utils.py | import numpy as np
from get_luminosity import *
from astropy.cosmology import Planck15
import matplotlib.pyplot as pl
pl.ioff()
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.ticker import MaxNLocator
import scipy.ndimage
from copy import deepcopy
# Contains a few routines for making output plots, including
# triangle plots of parameter covariances and the best-fit SED.
__all__ = ['marginalize_2d','marginalize_1d','plot_sed','text_summary','triangleplot',
'make_outputplot']
def marginalize_2d(x,y,axobj,*args,**kwargs):
"""
Routine to plot 2D confidence intervals between two parameters given arrays
of MCMC samples.
Inputs:
x,y:
Arrays of MCMC chain values.
axobj:
A matplotlib Axes object on which to plot.
extent:
List of [xmin,xmax,ymin,ymax] values to be used as plot axis limits.
If not provided, something sensible is chosen.
bins:
Number of bins to put the chains into.
levs:
Contour levels, in sigma. Default is 1,2,3sigma regions.
Returns:
axobj:
The same axis object passed here, now with the regions plotted.
"""
# Get values of various possible kwargs
bins = kwargs.pop('bins',50)
levs = kwargs.pop('levs',[1.,2.,3.])
extent = kwargs.pop('extent',[x.min(),x.max(),y.min(),y.max()])
cmap = kwargs.pop('cmap','Greys')
cmap = cm.get_cmap(cmap.capitalize())
cmap = cmap(np.linspace(0,1,np.asarray(levs).size))
Xbins = np.linspace(extent[0],extent[1],bins+1)
Ybins = np.linspace(extent[2],extent[3],bins+1)
# Bin up the samples. Will fail if x or y has no dynamic range
try:
H,X,Y = np.histogram2d(x.flatten(),y.flatten(),bins=(Xbins,Ybins))
except ValueError: return ValueError("One of your columns has no dynamic range... check it.")
# Generate contour levels, sort probabilities from most to least likely
V = 1.0 - np.exp(-0.5*np.asarray(levs)**2.)
# Here we slightly smooth the contours to account for the finite number
# of MCMC samples. can adjust the 0.7 below, but too small is artificial
# and looks like shit.
H = scipy.ndimage.filters.gaussian_filter(H,0.2*np.log10(x.size))
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
# Find the probability levels that encompass each sigma's worth of likelihood
for i,v0 in enumerate(V):
try: V[i] = Hflat[sm <= v0][-1]
except: V[i] = Hflat[0]
V = V[::-1]
clevs = np.append(V,Hflat.max())
X1, Y1 = 0.5*(X[1:] + X[:-1]), 0.5*(Y[1:]+Y[:-1])
if kwargs.get('plotfilled',True): axobj.contourf(X1,Y1,H.T,clevs,colors=cmap)
axobj.contour(X1,Y1,H.T,clevs,colors=kwargs.get('colors','k'),linewidths=kwargs.get('linewidths',1.5),\
linestyles=kwargs.get('linestyles','solid'))
axobj.set_xlim(extent[0],extent[1])
axobj.set_ylim(extent[2],extent[3])
return axobj
def marginalize_1d(x,axobj,*args,**kwargs):
"""
Plot a histogram of x, with a few tweaks for corner plot pleasantry.
Inputs:
x:
Array of MCMC samples to plot up.
axobj:
Axes object on which to plot.
"""
bins = kwargs.pop('bins',50)
extent = kwargs.pop('extent',[x.min(),x.max()])
fillcolor = kwargs.pop('color','gray')
axobj.hist(x,bins=bins,range=extent,histtype='stepfilled',color=fillcolor)
axobj.yaxis.tick_right()
pl.setp(axobj.get_yticklabels(),visible=False)
axobj.set_xlim(extent[0],extent[1])
return axobj
def | (um_rest,Sfit,um_data,mjy_data,mjy_errors,axobj,*args,**kwargs):
"""
Plot an SED
"""
sedcolor = kwargs.pop('sedcolor','b')
axobj.set_axis_on()
axobj.plot(um_rest,Sfit,color=sedcolor,ls='-')
axobj.set_xscale('log'); axobj.set_yscale('log')
axobj.set_xlabel('$\\lambda_{obs}$, $\\mu m$')
axobj.set_ylabel('S$_{\\nu}$, mJy')
axobj.set_xlim(1.1**um_rest.max(),0.8*(1+z)*um_rest.min())
axobj.set_ylim(0.5*mjy_data[mjy_data>0.].min(),2*np.max(Sfit))
altax = axobj.twiny()
altax.set_xlim(axobj.get_xlim()[0],axobj.get_xlim()[1])
altax.set_ylim(axobj.get_ylim()[0],axobj.get_ylim()[1])
altax.set_xscale('log'); altax.set_xlabel('$\\lambda_{rest}$, $\\mu m$')
# Plot the data points, and plot upper limits if they exist
for i,point in enumerate(um_data):
if point<200: color = 'c' # PACS points
elif point<= 500: color = 'g' # SPIRE points (also SABOCA...)
else: color = 'r'
if mjy_data[i] > 0:
axobj.errorbar(point,mjy_data[i],yerr=mjy_errors[i],color=color,\
marker='o')
else: # Draw upper limits, but make errobar sizes consistent on log-log
ylims,xlims = axobj.get_ylim(), axobj.get_xlim()
yloc = -(np.log10(ylims[0]) - np.log10(3*mjy_errors[i]))/(np.log10(ylims[1])-np.log10(ylims[0]))
xloc = (np.log10(point)-np.log10(xlims[0]))/(np.log10(xlims[1])-np.log10(xlims[0]))
axobj.errorbar(xloc,yloc,yerr=[[0.07],[0.0]],uplims=True,color=color,marker='o',\
ecolor='grey',mec='grey',transform=axobj.transAxes)
return axobj
def text_summary(samples,labels,axobj):
"""
Write some parameter summary text to the axobj.
"""
#axobj.set_axis_on()
axobj.text(-0.8,0.9,'Chain parameters:',fontsize='xx-large',transform=axobj.transAxes)
tloc = 0.
for par in range(0,samples.shape[1]):
x = deepcopy(samples[:,par])
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2.
xmed = np.median(x)
if 'lambda' in labels[par]: xmed,xstd = 1e6*xmed,1e6*xstd
if 'L$_\\odot$' in labels[par]: xmed,xstd = xmed/1e12, xstd/1e12
axobj.text(-0.8,0.7-tloc,'{0:10s} = {1:.2f} $\\pm$ {2:.2f}'.
format(labels[par],xmed,xstd),fontsize='xx-large',transform=axobj.transAxes)
tloc += 0.18
return axobj
def triangleplot(samples,labels):
"""
Assemble the MCMC samples into the usual triangle plot format.
Parameters:
samples: 2-D array
Array of MCMC samples, of shape (Nsamples,Nparameters)
labels: list of strings
What to label the axes. Latex math-mode is okay.
Returns:
f,axarr: matplotlib figure object and array of Axes objects of shape (Nparams,Nparams)
"""
f,axarr = pl.subplots(samples.shape[1],samples.shape[1],figsize=(3*samples.shape[1],3*samples.shape[1]))
for row in range(0,samples.shape[1]):
for col in range(0,samples.shape[1]):
# been burned too many times by unintentionally altering arrays
x,y = deepcopy(samples[:,col]), deepcopy(samples[:,row])
# Shield ourselves against nans or infinities.
x = x[np.isfinite(x) & np.isfinite(y)]
y = y[np.isfinite(x) & np.isfinite(y)]
# do some unit conversions for the sake of our collective sanity
if 'lambda' in labels[col]: x*=1e6 # convert a wavelength to um from m
if 'lambda' in labels[row]: y*=1e6
if 'L$_\\odot$' in labels[col]: x /= 1e12 # divide down luminosity
if 'L$_\\odot$' in labels[row]: y /= 1e12
# figure out some sensible axis limits
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2
ystd = np.ediff1d(np.percentile(y,[15.87,84.13]))[0]/2
xmin,xmax = np.median(x)-6*xstd, np.median(x)+6*xstd
ymin,ymax = np.median(y)-6*ystd, np.median(y)+6*ystd
if row>col:
try: marginalize_2d(x,y,axarr[row,col],\
extent=[xmin,xmax,ymin,ymax],bins=max(np.floor(x.size/1000),50))
except ValueError:
print(labels[row],labels[col])
raise ValueError("One of the columns has no dynamic range")
if col>0: pl.setp(axarr[row,col].get_yticklabels(),visible=False)
else: axarr[row,col].set_ylabel(labels[row],fontsize='x-large')
if row<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
else: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
axarr[row,col].yaxis.set_major_locator(MaxNLocator(5))
elif row==col:
marginalize_1d(x,axarr[row,col],extent=[xmin,xmax],bins=max(np.floor(x.size/1000),50))
if row==axarr.shape[0]-1: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')
if col<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
else:
axarr[row,col].set_axis_off()
return f, axarr
def make_outputplot(plotfile,samples,labels,plotsed=True,um_rest=None,
Sfit=None,um_data=None, mjy_data=None,mjy_errors=None):
"""
Assemble the whole output thing.
"""
f,axarr = triangleplot(samples,labels)
if plotsed: trcax = plot_sed(um_rest,Sfit,um_data,mjy_data,mjy_errors,axarr[0,-1])
textax = text_summary(samples,labels,axarr[0,-2])
f.subplots_adjust(left=0.1,right=0.97,bottom=0.1,top=0.95,hspace=0,wspace=0)
f.savefig(plotfile)
pl.close()
| plot_sed | identifier_name |
plotting_utils.py | import numpy as np
from get_luminosity import *
from astropy.cosmology import Planck15
import matplotlib.pyplot as pl
pl.ioff()
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.ticker import MaxNLocator
import scipy.ndimage
from copy import deepcopy
# Contains a few routines for making output plots, including
# triangle plots of parameter covariances and the best-fit SED.
__all__ = ['marginalize_2d','marginalize_1d','plot_sed','text_summary','triangleplot',
'make_outputplot']
def marginalize_2d(x,y,axobj,*args,**kwargs):
"""
Routine to plot 2D confidence intervals between two parameters given arrays
of MCMC samples.
Inputs:
x,y:
Arrays of MCMC chain values.
axobj:
A matplotlib Axes object on which to plot.
extent:
List of [xmin,xmax,ymin,ymax] values to be used as plot axis limits.
If not provided, something sensible is chosen.
bins:
Number of bins to put the chains into.
levs:
Contour levels, in sigma. Default is 1,2,3sigma regions.
Returns:
axobj:
The same axis object passed here, now with the regions plotted.
"""
# Get values of various possible kwargs
bins = kwargs.pop('bins',50)
levs = kwargs.pop('levs',[1.,2.,3.])
extent = kwargs.pop('extent',[x.min(),x.max(),y.min(),y.max()])
cmap = kwargs.pop('cmap','Greys')
cmap = cm.get_cmap(cmap.capitalize())
cmap = cmap(np.linspace(0,1,np.asarray(levs).size))
Xbins = np.linspace(extent[0],extent[1],bins+1)
Ybins = np.linspace(extent[2],extent[3],bins+1)
# Bin up the samples. Will fail if x or y has no dynamic range
try:
H,X,Y = np.histogram2d(x.flatten(),y.flatten(),bins=(Xbins,Ybins))
except ValueError: return ValueError("One of your columns has no dynamic range... check it.")
# Generate contour levels, sort probabilities from most to least likely
V = 1.0 - np.exp(-0.5*np.asarray(levs)**2.)
# Here we slightly smooth the contours to account for the finite number
# of MCMC samples. can adjust the 0.7 below, but too small is artificial
# and looks like shit.
H = scipy.ndimage.filters.gaussian_filter(H,0.2*np.log10(x.size))
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
# Find the probability levels that encompass each sigma's worth of likelihood
for i,v0 in enumerate(V):
try: V[i] = Hflat[sm <= v0][-1]
except: V[i] = Hflat[0]
V = V[::-1]
clevs = np.append(V,Hflat.max())
X1, Y1 = 0.5*(X[1:] + X[:-1]), 0.5*(Y[1:]+Y[:-1])
if kwargs.get('plotfilled',True): axobj.contourf(X1,Y1,H.T,clevs,colors=cmap)
axobj.contour(X1,Y1,H.T,clevs,colors=kwargs.get('colors','k'),linewidths=kwargs.get('linewidths',1.5),\
linestyles=kwargs.get('linestyles','solid'))
axobj.set_xlim(extent[0],extent[1])
axobj.set_ylim(extent[2],extent[3])
return axobj
def marginalize_1d(x,axobj,*args,**kwargs):
"""
Plot a histogram of x, with a few tweaks for corner plot pleasantry.
Inputs:
x:
Array of MCMC samples to plot up.
axobj:
Axes object on which to plot.
"""
bins = kwargs.pop('bins',50)
extent = kwargs.pop('extent',[x.min(),x.max()])
fillcolor = kwargs.pop('color','gray')
axobj.hist(x,bins=bins,range=extent,histtype='stepfilled',color=fillcolor)
axobj.yaxis.tick_right()
pl.setp(axobj.get_yticklabels(),visible=False)
axobj.set_xlim(extent[0],extent[1])
return axobj
def plot_sed(um_rest,Sfit,um_data,mjy_data,mjy_errors,axobj,*args,**kwargs):
"""
Plot an SED
"""
sedcolor = kwargs.pop('sedcolor','b')
axobj.set_axis_on()
axobj.plot(um_rest,Sfit,color=sedcolor,ls='-')
axobj.set_xscale('log'); axobj.set_yscale('log')
axobj.set_xlabel('$\\lambda_{obs}$, $\\mu m$')
axobj.set_ylabel('S$_{\\nu}$, mJy')
axobj.set_xlim(1.1**um_rest.max(),0.8*(1+z)*um_rest.min())
axobj.set_ylim(0.5*mjy_data[mjy_data>0.].min(),2*np.max(Sfit))
altax = axobj.twiny()
altax.set_xlim(axobj.get_xlim()[0],axobj.get_xlim()[1])
altax.set_ylim(axobj.get_ylim()[0],axobj.get_ylim()[1])
altax.set_xscale('log'); altax.set_xlabel('$\\lambda_{rest}$, $\\mu m$')
# Plot the data points, and plot upper limits if they exist
for i,point in enumerate(um_data):
if point<200: color = 'c' # PACS points
elif point<= 500: color = 'g' # SPIRE points (also SABOCA...)
else: color = 'r'
if mjy_data[i] > 0:
axobj.errorbar(point,mjy_data[i],yerr=mjy_errors[i],color=color,\
marker='o')
else: # Draw upper limits, but make errobar sizes consistent on log-log
ylims,xlims = axobj.get_ylim(), axobj.get_xlim()
yloc = -(np.log10(ylims[0]) - np.log10(3*mjy_errors[i]))/(np.log10(ylims[1])-np.log10(ylims[0]))
xloc = (np.log10(point)-np.log10(xlims[0]))/(np.log10(xlims[1])-np.log10(xlims[0]))
axobj.errorbar(xloc,yloc,yerr=[[0.07],[0.0]],uplims=True,color=color,marker='o',\
ecolor='grey',mec='grey',transform=axobj.transAxes)
return axobj
def text_summary(samples,labels,axobj):
"""
Write some parameter summary text to the axobj.
"""
#axobj.set_axis_on()
axobj.text(-0.8,0.9,'Chain parameters:',fontsize='xx-large',transform=axobj.transAxes)
tloc = 0.
for par in range(0,samples.shape[1]):
x = deepcopy(samples[:,par])
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2.
xmed = np.median(x)
if 'lambda' in labels[par]: xmed,xstd = 1e6*xmed,1e6*xstd
if 'L$_\\odot$' in labels[par]: xmed,xstd = xmed/1e12, xstd/1e12
axobj.text(-0.8,0.7-tloc,'{0:10s} = {1:.2f} $\\pm$ {2:.2f}'.
format(labels[par],xmed,xstd),fontsize='xx-large',transform=axobj.transAxes)
tloc += 0.18
return axobj
def triangleplot(samples,labels):
|
def make_outputplot(plotfile,samples,labels,plotsed=True,um_rest=None,
Sfit=None,um_data=None, mjy_data=None,mjy_errors=None):
"""
Assemble the whole output thing.
"""
f,axarr = triangleplot(samples,labels)
if plotsed: trcax = plot_sed(um_rest,Sfit,um_data,mjy_data,mjy_errors,axarr[0,-1])
textax = text_summary(samples,labels,axarr[0,-2])
f.subplots_adjust(left=0.1,right=0.97,bottom=0.1,top=0.95,hspace=0,wspace=0)
f.savefig(plotfile)
pl.close()
| """
Assemble the MCMC samples into the usual triangle plot format.
Parameters:
samples: 2-D array
Array of MCMC samples, of shape (Nsamples,Nparameters)
labels: list of strings
What to label the axes. Latex math-mode is okay.
Returns:
f,axarr: matplotlib figure object and array of Axes objects of shape (Nparams,Nparams)
"""
f,axarr = pl.subplots(samples.shape[1],samples.shape[1],figsize=(3*samples.shape[1],3*samples.shape[1]))
for row in range(0,samples.shape[1]):
for col in range(0,samples.shape[1]):
# been burned too many times by unintentionally altering arrays
x,y = deepcopy(samples[:,col]), deepcopy(samples[:,row])
# Shield ourselves against nans or infinities.
x = x[np.isfinite(x) & np.isfinite(y)]
y = y[np.isfinite(x) & np.isfinite(y)]
# do some unit conversions for the sake of our collective sanity
if 'lambda' in labels[col]: x*=1e6 # convert a wavelength to um from m
if 'lambda' in labels[row]: y*=1e6
if 'L$_\\odot$' in labels[col]: x /= 1e12 # divide down luminosity
if 'L$_\\odot$' in labels[row]: y /= 1e12
# figure out some sensible axis limits
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2
ystd = np.ediff1d(np.percentile(y,[15.87,84.13]))[0]/2
xmin,xmax = np.median(x)-6*xstd, np.median(x)+6*xstd
ymin,ymax = np.median(y)-6*ystd, np.median(y)+6*ystd
if row>col:
try: marginalize_2d(x,y,axarr[row,col],\
extent=[xmin,xmax,ymin,ymax],bins=max(np.floor(x.size/1000),50))
except ValueError:
print(labels[row],labels[col])
raise ValueError("One of the columns has no dynamic range")
if col>0: pl.setp(axarr[row,col].get_yticklabels(),visible=False)
else: axarr[row,col].set_ylabel(labels[row],fontsize='x-large')
if row<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
else: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
axarr[row,col].yaxis.set_major_locator(MaxNLocator(5))
elif row==col:
marginalize_1d(x,axarr[row,col],extent=[xmin,xmax],bins=max(np.floor(x.size/1000),50))
if row==axarr.shape[0]-1: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')
if col<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
else:
axarr[row,col].set_axis_off()
return f, axarr | identifier_body |
plotting_utils.py | import numpy as np
from get_luminosity import *
from astropy.cosmology import Planck15
import matplotlib.pyplot as pl
pl.ioff()
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.ticker import MaxNLocator
import scipy.ndimage
from copy import deepcopy
# Contains a few routines for making output plots, including
# triangle plots of parameter covariances and the best-fit SED.
__all__ = ['marginalize_2d','marginalize_1d','plot_sed','text_summary','triangleplot',
'make_outputplot']
def marginalize_2d(x,y,axobj,*args,**kwargs):
"""
Routine to plot 2D confidence intervals between two parameters given arrays
of MCMC samples.
Inputs:
x,y:
Arrays of MCMC chain values.
axobj:
A matplotlib Axes object on which to plot.
extent:
List of [xmin,xmax,ymin,ymax] values to be used as plot axis limits.
If not provided, something sensible is chosen.
bins:
Number of bins to put the chains into.
levs:
Contour levels, in sigma. Default is 1,2,3sigma regions.
Returns:
axobj:
The same axis object passed here, now with the regions plotted.
"""
# Get values of various possible kwargs
bins = kwargs.pop('bins',50)
levs = kwargs.pop('levs',[1.,2.,3.])
extent = kwargs.pop('extent',[x.min(),x.max(),y.min(),y.max()])
cmap = kwargs.pop('cmap','Greys')
cmap = cm.get_cmap(cmap.capitalize())
cmap = cmap(np.linspace(0,1,np.asarray(levs).size))
Xbins = np.linspace(extent[0],extent[1],bins+1)
Ybins = np.linspace(extent[2],extent[3],bins+1)
# Bin up the samples. Will fail if x or y has no dynamic range
try:
H,X,Y = np.histogram2d(x.flatten(),y.flatten(),bins=(Xbins,Ybins))
except ValueError: return ValueError("One of your columns has no dynamic range... check it.")
# Generate contour levels, sort probabilities from most to least likely
V = 1.0 - np.exp(-0.5*np.asarray(levs)**2.)
# Here we slightly smooth the contours to account for the finite number
# of MCMC samples. can adjust the 0.7 below, but too small is artificial
# and looks like shit.
H = scipy.ndimage.filters.gaussian_filter(H,0.2*np.log10(x.size))
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
# Find the probability levels that encompass each sigma's worth of likelihood
for i,v0 in enumerate(V):
try: V[i] = Hflat[sm <= v0][-1]
except: V[i] = Hflat[0]
V = V[::-1]
clevs = np.append(V,Hflat.max())
X1, Y1 = 0.5*(X[1:] + X[:-1]), 0.5*(Y[1:]+Y[:-1])
if kwargs.get('plotfilled',True): axobj.contourf(X1,Y1,H.T,clevs,colors=cmap)
axobj.contour(X1,Y1,H.T,clevs,colors=kwargs.get('colors','k'),linewidths=kwargs.get('linewidths',1.5),\
linestyles=kwargs.get('linestyles','solid'))
axobj.set_xlim(extent[0],extent[1])
axobj.set_ylim(extent[2],extent[3])
return axobj
def marginalize_1d(x,axobj,*args,**kwargs):
"""
Plot a histogram of x, with a few tweaks for corner plot pleasantry.
Inputs:
x:
Array of MCMC samples to plot up.
axobj:
Axes object on which to plot.
"""
bins = kwargs.pop('bins',50)
extent = kwargs.pop('extent',[x.min(),x.max()])
fillcolor = kwargs.pop('color','gray')
axobj.hist(x,bins=bins,range=extent,histtype='stepfilled',color=fillcolor)
axobj.yaxis.tick_right()
pl.setp(axobj.get_yticklabels(),visible=False)
axobj.set_xlim(extent[0],extent[1])
return axobj
def plot_sed(um_rest,Sfit,um_data,mjy_data,mjy_errors,axobj,*args,**kwargs):
"""
Plot an SED
"""
sedcolor = kwargs.pop('sedcolor','b')
axobj.set_axis_on()
axobj.plot(um_rest,Sfit,color=sedcolor,ls='-')
axobj.set_xscale('log'); axobj.set_yscale('log')
axobj.set_xlabel('$\\lambda_{obs}$, $\\mu m$')
axobj.set_ylabel('S$_{\\nu}$, mJy')
axobj.set_xlim(1.1**um_rest.max(),0.8*(1+z)*um_rest.min())
axobj.set_ylim(0.5*mjy_data[mjy_data>0.].min(),2*np.max(Sfit))
altax = axobj.twiny()
altax.set_xlim(axobj.get_xlim()[0],axobj.get_xlim()[1])
altax.set_ylim(axobj.get_ylim()[0],axobj.get_ylim()[1])
altax.set_xscale('log'); altax.set_xlabel('$\\lambda_{rest}$, $\\mu m$')
# Plot the data points, and plot upper limits if they exist
for i,point in enumerate(um_data):
if point<200: color = 'c' # PACS points
elif point<= 500: color = 'g' # SPIRE points (also SABOCA...)
else: |
if mjy_data[i] > 0:
axobj.errorbar(point,mjy_data[i],yerr=mjy_errors[i],color=color,\
marker='o')
else: # Draw upper limits, but make errobar sizes consistent on log-log
ylims,xlims = axobj.get_ylim(), axobj.get_xlim()
yloc = -(np.log10(ylims[0]) - np.log10(3*mjy_errors[i]))/(np.log10(ylims[1])-np.log10(ylims[0]))
xloc = (np.log10(point)-np.log10(xlims[0]))/(np.log10(xlims[1])-np.log10(xlims[0]))
axobj.errorbar(xloc,yloc,yerr=[[0.07],[0.0]],uplims=True,color=color,marker='o',\
ecolor='grey',mec='grey',transform=axobj.transAxes)
return axobj
def text_summary(samples,labels,axobj):
"""
Write some parameter summary text to the axobj.
"""
#axobj.set_axis_on()
axobj.text(-0.8,0.9,'Chain parameters:',fontsize='xx-large',transform=axobj.transAxes)
tloc = 0.
for par in range(0,samples.shape[1]):
x = deepcopy(samples[:,par])
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2.
xmed = np.median(x)
if 'lambda' in labels[par]: xmed,xstd = 1e6*xmed,1e6*xstd
if 'L$_\\odot$' in labels[par]: xmed,xstd = xmed/1e12, xstd/1e12
axobj.text(-0.8,0.7-tloc,'{0:10s} = {1:.2f} $\\pm$ {2:.2f}'.
format(labels[par],xmed,xstd),fontsize='xx-large',transform=axobj.transAxes)
tloc += 0.18
return axobj
def triangleplot(samples,labels):
"""
Assemble the MCMC samples into the usual triangle plot format.
Parameters:
samples: 2-D array
Array of MCMC samples, of shape (Nsamples,Nparameters)
labels: list of strings
What to label the axes. Latex math-mode is okay.
Returns:
f,axarr: matplotlib figure object and array of Axes objects of shape (Nparams,Nparams)
"""
f,axarr = pl.subplots(samples.shape[1],samples.shape[1],figsize=(3*samples.shape[1],3*samples.shape[1]))
for row in range(0,samples.shape[1]):
for col in range(0,samples.shape[1]):
# been burned too many times by unintentionally altering arrays
x,y = deepcopy(samples[:,col]), deepcopy(samples[:,row])
# Shield ourselves against nans or infinities.
x = x[np.isfinite(x) & np.isfinite(y)]
y = y[np.isfinite(x) & np.isfinite(y)]
# do some unit conversions for the sake of our collective sanity
if 'lambda' in labels[col]: x*=1e6 # convert a wavelength to um from m
if 'lambda' in labels[row]: y*=1e6
if 'L$_\\odot$' in labels[col]: x /= 1e12 # divide down luminosity
if 'L$_\\odot$' in labels[row]: y /= 1e12
# figure out some sensible axis limits
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2
ystd = np.ediff1d(np.percentile(y,[15.87,84.13]))[0]/2
xmin,xmax = np.median(x)-6*xstd, np.median(x)+6*xstd
ymin,ymax = np.median(y)-6*ystd, np.median(y)+6*ystd
if row>col:
try: marginalize_2d(x,y,axarr[row,col],\
extent=[xmin,xmax,ymin,ymax],bins=max(np.floor(x.size/1000),50))
except ValueError:
print(labels[row],labels[col])
raise ValueError("One of the columns has no dynamic range")
if col>0: pl.setp(axarr[row,col].get_yticklabels(),visible=False)
else: axarr[row,col].set_ylabel(labels[row],fontsize='x-large')
if row<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
else: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
axarr[row,col].yaxis.set_major_locator(MaxNLocator(5))
elif row==col:
marginalize_1d(x,axarr[row,col],extent=[xmin,xmax],bins=max(np.floor(x.size/1000),50))
if row==axarr.shape[0]-1: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')
if col<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
else:
axarr[row,col].set_axis_off()
return f, axarr
def make_outputplot(plotfile,samples,labels,plotsed=True,um_rest=None,
Sfit=None,um_data=None, mjy_data=None,mjy_errors=None):
"""
Assemble the whole output thing.
"""
f,axarr = triangleplot(samples,labels)
if plotsed: trcax = plot_sed(um_rest,Sfit,um_data,mjy_data,mjy_errors,axarr[0,-1])
textax = text_summary(samples,labels,axarr[0,-2])
f.subplots_adjust(left=0.1,right=0.97,bottom=0.1,top=0.95,hspace=0,wspace=0)
f.savefig(plotfile)
pl.close()
| color = 'r' | conditional_block |
plotting_utils.py | import numpy as np
from get_luminosity import *
from astropy.cosmology import Planck15
import matplotlib.pyplot as pl
pl.ioff()
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.ticker import MaxNLocator
import scipy.ndimage
from copy import deepcopy
# Contains a few routines for making output plots, including
# triangle plots of parameter covariances and the best-fit SED.
__all__ = ['marginalize_2d','marginalize_1d','plot_sed','text_summary','triangleplot',
'make_outputplot']
def marginalize_2d(x,y,axobj,*args,**kwargs):
"""
Routine to plot 2D confidence intervals between two parameters given arrays
of MCMC samples.
Inputs:
x,y:
Arrays of MCMC chain values.
axobj:
A matplotlib Axes object on which to plot.
extent:
List of [xmin,xmax,ymin,ymax] values to be used as plot axis limits.
If not provided, something sensible is chosen.
bins:
Number of bins to put the chains into.
levs:
Contour levels, in sigma. Default is 1,2,3sigma regions.
Returns:
axobj:
The same axis object passed here, now with the regions plotted.
"""
# Get values of various possible kwargs
bins = kwargs.pop('bins',50)
levs = kwargs.pop('levs',[1.,2.,3.])
extent = kwargs.pop('extent',[x.min(),x.max(),y.min(),y.max()])
cmap = kwargs.pop('cmap','Greys')
cmap = cm.get_cmap(cmap.capitalize())
cmap = cmap(np.linspace(0,1,np.asarray(levs).size))
Xbins = np.linspace(extent[0],extent[1],bins+1)
Ybins = np.linspace(extent[2],extent[3],bins+1)
# Bin up the samples. Will fail if x or y has no dynamic range
try:
H,X,Y = np.histogram2d(x.flatten(),y.flatten(),bins=(Xbins,Ybins))
except ValueError: return ValueError("One of your columns has no dynamic range... check it.")
# Generate contour levels, sort probabilities from most to least likely
V = 1.0 - np.exp(-0.5*np.asarray(levs)**2.)
# Here we slightly smooth the contours to account for the finite number
# of MCMC samples. can adjust the 0.7 below, but too small is artificial
# and looks like shit.
H = scipy.ndimage.filters.gaussian_filter(H,0.2*np.log10(x.size))
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
# Find the probability levels that encompass each sigma's worth of likelihood
for i,v0 in enumerate(V):
try: V[i] = Hflat[sm <= v0][-1]
except: V[i] = Hflat[0]
V = V[::-1]
clevs = np.append(V,Hflat.max())
X1, Y1 = 0.5*(X[1:] + X[:-1]), 0.5*(Y[1:]+Y[:-1])
if kwargs.get('plotfilled',True): axobj.contourf(X1,Y1,H.T,clevs,colors=cmap)
axobj.contour(X1,Y1,H.T,clevs,colors=kwargs.get('colors','k'),linewidths=kwargs.get('linewidths',1.5),\
linestyles=kwargs.get('linestyles','solid'))
axobj.set_xlim(extent[0],extent[1])
axobj.set_ylim(extent[2],extent[3])
return axobj
def marginalize_1d(x,axobj,*args,**kwargs):
"""
Plot a histogram of x, with a few tweaks for corner plot pleasantry.
Inputs:
x:
Array of MCMC samples to plot up.
axobj:
Axes object on which to plot.
"""
bins = kwargs.pop('bins',50)
extent = kwargs.pop('extent',[x.min(),x.max()])
fillcolor = kwargs.pop('color','gray')
axobj.hist(x,bins=bins,range=extent,histtype='stepfilled',color=fillcolor)
axobj.yaxis.tick_right()
pl.setp(axobj.get_yticklabels(),visible=False)
axobj.set_xlim(extent[0],extent[1])
return axobj
def plot_sed(um_rest,Sfit,um_data,mjy_data,mjy_errors,axobj,*args,**kwargs):
"""
Plot an SED
"""
sedcolor = kwargs.pop('sedcolor','b')
axobj.set_axis_on()
axobj.plot(um_rest,Sfit,color=sedcolor,ls='-')
axobj.set_xscale('log'); axobj.set_yscale('log')
axobj.set_xlabel('$\\lambda_{obs}$, $\\mu m$')
axobj.set_ylabel('S$_{\\nu}$, mJy')
axobj.set_xlim(1.1**um_rest.max(),0.8*(1+z)*um_rest.min()) | altax.set_xlim(axobj.get_xlim()[0],axobj.get_xlim()[1])
altax.set_ylim(axobj.get_ylim()[0],axobj.get_ylim()[1])
altax.set_xscale('log'); altax.set_xlabel('$\\lambda_{rest}$, $\\mu m$')
# Plot the data points, and plot upper limits if they exist
for i,point in enumerate(um_data):
if point<200: color = 'c' # PACS points
elif point<= 500: color = 'g' # SPIRE points (also SABOCA...)
else: color = 'r'
if mjy_data[i] > 0:
axobj.errorbar(point,mjy_data[i],yerr=mjy_errors[i],color=color,\
marker='o')
else: # Draw upper limits, but make errobar sizes consistent on log-log
ylims,xlims = axobj.get_ylim(), axobj.get_xlim()
yloc = -(np.log10(ylims[0]) - np.log10(3*mjy_errors[i]))/(np.log10(ylims[1])-np.log10(ylims[0]))
xloc = (np.log10(point)-np.log10(xlims[0]))/(np.log10(xlims[1])-np.log10(xlims[0]))
axobj.errorbar(xloc,yloc,yerr=[[0.07],[0.0]],uplims=True,color=color,marker='o',\
ecolor='grey',mec='grey',transform=axobj.transAxes)
return axobj
def text_summary(samples,labels,axobj):
"""
Write some parameter summary text to the axobj.
"""
#axobj.set_axis_on()
axobj.text(-0.8,0.9,'Chain parameters:',fontsize='xx-large',transform=axobj.transAxes)
tloc = 0.
for par in range(0,samples.shape[1]):
x = deepcopy(samples[:,par])
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2.
xmed = np.median(x)
if 'lambda' in labels[par]: xmed,xstd = 1e6*xmed,1e6*xstd
if 'L$_\\odot$' in labels[par]: xmed,xstd = xmed/1e12, xstd/1e12
axobj.text(-0.8,0.7-tloc,'{0:10s} = {1:.2f} $\\pm$ {2:.2f}'.
format(labels[par],xmed,xstd),fontsize='xx-large',transform=axobj.transAxes)
tloc += 0.18
return axobj
def triangleplot(samples,labels):
"""
Assemble the MCMC samples into the usual triangle plot format.
Parameters:
samples: 2-D array
Array of MCMC samples, of shape (Nsamples,Nparameters)
labels: list of strings
What to label the axes. Latex math-mode is okay.
Returns:
f,axarr: matplotlib figure object and array of Axes objects of shape (Nparams,Nparams)
"""
f,axarr = pl.subplots(samples.shape[1],samples.shape[1],figsize=(3*samples.shape[1],3*samples.shape[1]))
for row in range(0,samples.shape[1]):
for col in range(0,samples.shape[1]):
# been burned too many times by unintentionally altering arrays
x,y = deepcopy(samples[:,col]), deepcopy(samples[:,row])
# Shield ourselves against nans or infinities.
x = x[np.isfinite(x) & np.isfinite(y)]
y = y[np.isfinite(x) & np.isfinite(y)]
# do some unit conversions for the sake of our collective sanity
if 'lambda' in labels[col]: x*=1e6 # convert a wavelength to um from m
if 'lambda' in labels[row]: y*=1e6
if 'L$_\\odot$' in labels[col]: x /= 1e12 # divide down luminosity
if 'L$_\\odot$' in labels[row]: y /= 1e12
# figure out some sensible axis limits
xstd = np.ediff1d(np.percentile(x,[15.87,84.13]))[0]/2
ystd = np.ediff1d(np.percentile(y,[15.87,84.13]))[0]/2
xmin,xmax = np.median(x)-6*xstd, np.median(x)+6*xstd
ymin,ymax = np.median(y)-6*ystd, np.median(y)+6*ystd
if row>col:
try: marginalize_2d(x,y,axarr[row,col],\
extent=[xmin,xmax,ymin,ymax],bins=max(np.floor(x.size/1000),50))
except ValueError:
print(labels[row],labels[col])
raise ValueError("One of the columns has no dynamic range")
if col>0: pl.setp(axarr[row,col].get_yticklabels(),visible=False)
else: axarr[row,col].set_ylabel(labels[row],fontsize='x-large')
if row<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
else: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
axarr[row,col].yaxis.set_major_locator(MaxNLocator(5))
elif row==col:
marginalize_1d(x,axarr[row,col],extent=[xmin,xmax],bins=max(np.floor(x.size/1000),50))
if row==axarr.shape[0]-1: axarr[row,col].set_xlabel(labels[col],fontsize='x-large')
if col<axarr.shape[0]-1: pl.setp(axarr[row,col].get_xticklabels(),visible=False)
axarr[row,col].xaxis.set_major_locator(MaxNLocator(5))
else:
axarr[row,col].set_axis_off()
return f, axarr
def make_outputplot(plotfile,samples,labels,plotsed=True,um_rest=None,
Sfit=None,um_data=None, mjy_data=None,mjy_errors=None):
"""
Assemble the whole output thing.
"""
f,axarr = triangleplot(samples,labels)
if plotsed: trcax = plot_sed(um_rest,Sfit,um_data,mjy_data,mjy_errors,axarr[0,-1])
textax = text_summary(samples,labels,axarr[0,-2])
f.subplots_adjust(left=0.1,right=0.97,bottom=0.1,top=0.95,hspace=0,wspace=0)
f.savefig(plotfile)
pl.close() | axobj.set_ylim(0.5*mjy_data[mjy_data>0.].min(),2*np.max(Sfit))
altax = axobj.twiny() | random_line_split |
decoder.rs | use crate::ebml;
use crate::schema::{Schema, SchemaDict};
use crate::vint::{read_vint, UnrepresentableLengthError};
use chrono::{DateTime, NaiveDateTime, Utc};
use err_derive::Error;
use log_derive::{logfn, logfn_inputs};
use std::convert::TryFrom;
pub trait ReadEbmlExt: std::io::Read {
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_ebml_to_end<'a, D: SchemaDict<'a>>(
&mut self,
schema: &'a D,
) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
let mut decoder = Decoder::new(schema);
let mut buf = vec![];
let _size = self.read_to_end(&mut buf).map_err(DecodeError::Io)?;
let elms = decoder.decode(buf)?;
Ok(elms)
}
}
impl<R: std::io::Read + ?Sized> ReadEbmlExt for R {}
pub trait BufReadEbmlExt: std::io::BufRead {
#[logfn(ok = "TRACE", err = "ERROR")]
fn read<'a, D: SchemaDict<'a>>(
&mut self,
schema: &'a D,
) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
let mut decoder = Decoder::new(schema);
let mut buf = vec![];
loop {
let used = {
let available = match self.fill_buf() {
Ok(n) => n,
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => return Err(DecodeError::Io(e)),
};
buf.append(&mut decoder.decode(available.to_vec())?);
available.len()
};
self.consume(used);
if used == 0 {
break;
}
}
Ok(buf)
}
}
impl<R: std::io::BufRead + ?Sized> BufReadEbmlExt for R {}
#[derive(Debug, Error)]
pub enum DecodeError {
#[error(display = "{}", _0)]
ReadVint(#[error(cause)] UnrepresentableLengthError),
#[error(display = "UnknwonSizeNotAllowedInChildElement: pos {:?}", _0)]
UnknwonSizeNotAllowedInChildElement(ebml::ElementPosition),
#[error(display = "ReadContent")]
ReadContent(#[error(cause)] ReadContentError),
#[error(display = "UnknownEbmlId: {:?}", _0)]
UnknownEbmlId(ebml::EbmlId),
#[error(display = "Io")]
Io(#[error(cause)] std::io::Error),
}
impl From<UnrepresentableLengthError> for DecodeError {
fn from(o: UnrepresentableLengthError) -> Self {
DecodeError::ReadVint(o)
}
}
impl From<ReadContentError> for DecodeError {
fn from(o: ReadContentError) -> Self {
DecodeError::ReadContent(o)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum State {
Tag,
Size,
Content,
}
pub struct Decoder<'a, D: SchemaDict<'a>> {
schema: &'a D,
state: State,
buffer: Vec<u8>,
cursor: usize,
total: usize,
stack: Vec<ebml::ElementPosition>,
queue: Vec<ebml::ElementDetail>,
}
impl<'a, D: SchemaDict<'a>> Decoder<'a, D> {
pub fn new(schema: &'a D) -> Self {
Self {
schema,
state: State::Tag,
buffer: vec![],
cursor: 0,
total: 0,
stack: vec![],
queue: vec![],
}
}
#[logfn(ok = "TRACE", err = "ERROR")]
pub fn decode(&mut self, chunk: Vec<u8>) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
self.read_chunk(chunk)?;
let mut result = vec![];
std::mem::swap(&mut self.queue, &mut result);
Ok(result)
}
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_chunk(&mut self, mut chunk: Vec<u8>) -> Result<(), DecodeError> {
// 読みかけの(読めなかった) buffer と 新しい chunk を合わせて読み直す
self.buffer.append(&mut chunk);
while self.cursor < self.buffer.len() {
match self.state {
State::Tag => {
if !self.read_tag()? {
break;
}
}
State::Size => {
if !self.read_size()? {
break;
}
}
State::Content => {
if !self.read_content()? {
break;
}
}
}
}
Ok(())
}
/// return false when waiting for more data
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_tag(&mut self) -> Result<bool, DecodeError> {
// tag is out of buffer
if self.cursor >= self.buffer.len() {
return Ok(false);
}
// read ebml id vint without first byte
let opt_tag = read_vint(&self.buffer, self.cursor)?;
// cannot read tag yet
if opt_tag.is_none() {
return Ok(false);
}
let tag_size = opt_tag.unwrap().length;
let ebml_id = ebml::EbmlId(opt_tag.unwrap().value);
let tag_start = self.total;
let size_start = self.total + (tag_size as usize);
let content_start = 0;
let content_size = 0;
let schema = self
.schema
.get(ebml_id)
.ok_or_else(|| DecodeError::UnknownEbmlId(ebml_id))?;
let pos = ebml::ElementPosition {
level: schema.level(),
r#type: schema.r#type(),
ebml_id,
tag_start,
size_start,
content_start,
content_size,
};
self.stack.push(pos);
// move cursor
self.cursor += tag_size as usize;
self.total += tag_size as usize;
// change decoder state
self.state = State::Size;
Ok(true)
}
/// return false when waiting for more data
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_size(&mut self) -> Result<bool, DecodeError> {
if self.cursor >= self.buffer.len() {
return Ok(false);
}
// read ebml datasize vint without first byte
let opt_size = read_vint(&self.buffer, self.cursor)?;
if opt_size.is_none() {
return Ok(false);
}
let size = opt_size.unwrap();
// decide current tag data size
let ebml::ElementPosition {
ref mut tag_start,
ref mut content_start,
ref mut content_size,
..
} = self.stack.last_mut().unwrap();
*content_start = *tag_start + (size.length as usize);
*content_size = size.value;
// move cursor and change state
self.cursor += size.length as usize;
self.total += size.length as usize;
self.state = State::Content;
Ok(true)
}
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_content(&mut self) -> Result<bool, DecodeError> {
let current_pos = self.stack.last().unwrap();
// master element は子要素を持つので生データはない
if current_pos.r#type == 'm' {
let elm = (
ebml::MasterStartElement {
ebml_id: current_pos.ebml_id,
unknown_size: current_pos.content_size == -1,
},
*current_pos,
)
.into();
self.queue.push(elm);
self.state = State::Tag;
// この Mastert Element は空要素か
if current_pos.content_size == 0 {
// 即座に終了タグを追加
self.queue.push(
(
ebml::MasterEndElement {
ebml_id: current_pos.ebml_id,
},
*current_pos,
)
.into(),
);
// スタックからこのタグを捨てる
self.stack.pop();
}
return Ok(true);
}
// endless master element
// waiting for more data
if current_pos.content_size < 0 {
return Err(DecodeError::UnknwonSizeNotAllowedInChildElement(
*current_pos,
));
}
use std::convert::TryFrom as _;
let content_size = usize::try_from(current_pos.content_size).unwrap();
if self.buffer.len() < self.cursor + content_size {
return Ok(false);
}
// タグの中身の生データ
let content = self.buffer[self.cursor..self.cursor + content_size].to_vec();
// 読み終わったバッファを捨てて読み込んでいる部分のバッファのみ残す
self.buffer = self.buffer.split_off(self.cursor + content_size);
let child_elm = read_child_element(
current_pos.ebml_id,
current_pos.r#type,
std::io::Cursor::new(content),
content_size,
)?;
self.queue.push((child_elm, *current_pos).into());
// ポインタを進める
self.total += content_size;
// タグ待ちモードに変更
self.state = State::Tag;
self.cursor = 0;
// remove the object from the stack
self.stack.pop();
while !self.stack.is_empty() {
let parent_pos = self.stack.last().unwrap();
// 親が不定長サイズなので閉じタグは期待できない
if parent_pos.content_size < 0 {
self.stack.pop(); // 親タグを捨てる
return Ok(true);
}
// 閉じタグの来るべき場所まで来たかどうか
if self.total < parent_pos.content_start + content_size {
break; | if parent_pos.r#type != 'm' {
// throw new Error("parent element is not master element");
unreachable!();
}
self.queue.push(
(
ebml::MasterEndElement {
ebml_id: parent_pos.ebml_id,
},
*parent_pos,
)
.into(),
);
// スタックからこのタグを捨てる
self.stack.pop();
}
Ok(true)
}
}
#[derive(Debug, Error)]
pub enum ReadContentError {
#[error(display = "Date")]
Date(#[error(cause)] std::io::Error),
#[error(display = "Utf8")]
Utf8(#[error(cause)] std::io::Error),
#[error(display = "UnsignedInteger")]
UnsignedInteger(#[error(cause)] std::io::Error),
#[error(display = "Integer")]
Integer(#[error(cause)] std::io::Error),
#[error(display = "Float")]
Float(#[error(cause)] std::io::Error),
#[error(display = "Binary")]
Binary(#[error(cause)] std::io::Error),
#[error(display = "String")]
String(#[error(cause)] std::io::Error),
#[error(display = "Master")]
Master(#[error(cause)] std::io::Error),
#[error(display = "Unknown")]
Unknown(#[error(cause)] std::io::Error),
}
#[logfn_inputs(TRACE)]
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_child_element<C: std::io::Read + std::fmt::Debug>(
ebml_id: ebml::EbmlId,
r#type: char,
mut content: C,
content_size: usize,
) -> Result<ebml::ChildElement, ReadContentError> {
use byteorder::{BigEndian, ReadBytesExt as _};
use ReadContentError::{String as StringE, *};
match r#type {
// Unsigned Integer - Big-endian, any size from 1 to 8 octets
'u' => {
let value = content
.read_uint::<BigEndian>(content_size)
.map_err(UnsignedInteger)?;
Ok(ebml::UnsignedIntegerElement { ebml_id, value }.into())
}
// Signed Integer - Big-endian, any size from 1 to 8 octets
'i' => {
let value = content
.read_int::<BigEndian>(content_size)
.map_err(Integer)?;
Ok(ebml::IntegerElement { ebml_id, value }.into())
}
// Float - Big-endian, defined for 4 and 8 octets (32, 64 bits)
'f' => {
let value = if content_size == 4 {
f64::from(content.read_f32::<BigEndian>().map_err(Float)?)
} else if content_size == 8 {
content.read_f64::<BigEndian>().map_err(Float)?
} else {
Err(Float(std::io::Error::new(
std::io::ErrorKind::Other,
format!("invalid float content_size: {}", content_size),
)))?
};
Ok(ebml::FloatElement { ebml_id, value }.into())
}
// Printable ASCII (0x20 to 0x7E), zero-padded when needed
's' => {
let mut value = vec![0; content_size];
content.read_exact(&mut value).map_err(StringE)?;
Ok(ebml::StringElement { ebml_id, value }.into())
}
// Unicode string, zero padded when needed (RFC 2279)
'8' => {
let mut value = std::string::String::new();
content.read_to_string(&mut value).map_err(Utf8)?;
Ok(ebml::Utf8Element { ebml_id, value }.into())
}
// Binary - not interpreted by the parser
'b' => {
let mut value = vec![0; content_size];
content.read_exact(&mut value).map_err(Binary)?;
Ok(ebml::BinaryElement { ebml_id, value }.into())
}
// nano second; Date.UTC(2001,1,1,0,0,0,0) === 980985600000
// new Date("2001-01-01T00:00:00.000Z").getTime() = 978307200000
// Date - signed 8 octets integer in nanoseconds with 0 indicating
// the precise beginning of the millennium (at 2001-01-01T00:00:00,000000000 UTC)
'd' => {
let nanos = content.read_i64::<BigEndian>().map_err(Date)?;
let unix_time_nanos: i64 = nanos - 978_307_200 * 1000 * 1000 * 1000;
let unix_time_secs: i64 = unix_time_nanos / 1000 / 1000 / 1000 - 1;
let nsecs: u32 =
u32::try_from((unix_time_nanos & (1000 * 1000 * 1000)) + (1000 * 1000 * 1000))
.unwrap();
let datetime = NaiveDateTime::from_timestamp(unix_time_secs, nsecs);
let value = DateTime::from_utc(datetime, Utc);
Ok(ebml::DateElement { ebml_id, value }.into())
}
// Master-Element - contains other EBML sub-elements of the next lower level
'm' => Err(Master(std::io::Error::new(
std::io::ErrorKind::Other,
"cannot read master element as child element".to_string(),
)))?,
_ => Err(Unknown(std::io::Error::new(
std::io::ErrorKind::Other,
format!("unknown type: {}", r#type),
)))?,
}
} | }
// 閉じタグを挿入すべきタイミングが来た | random_line_split |
decoder.rs | use crate::ebml;
use crate::schema::{Schema, SchemaDict};
use crate::vint::{read_vint, UnrepresentableLengthError};
use chrono::{DateTime, NaiveDateTime, Utc};
use err_derive::Error;
use log_derive::{logfn, logfn_inputs};
use std::convert::TryFrom;
pub trait ReadEbmlExt: std::io::Read {
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_ebml_to_end<'a, D: SchemaDict<'a>>(
&mut self,
schema: &'a D,
) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
let mut decoder = Decoder::new(schema);
let mut buf = vec![];
let _size = self.read_to_end(&mut buf).map_err(DecodeError::Io)?;
let elms = decoder.decode(buf)?;
Ok(elms)
}
}
impl<R: std::io::Read + ?Sized> ReadEbmlExt for R {}
pub trait BufReadEbmlExt: std::io::BufRead {
#[logfn(ok = "TRACE", err = "ERROR")]
fn read<'a, D: SchemaDict<'a>>(
&mut self,
schema: &'a D,
) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
let mut decoder = Decoder::new(schema);
let mut buf = vec![];
loop {
let used = {
let available = match self.fill_buf() {
Ok(n) => n,
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => return Err(DecodeError::Io(e)),
};
buf.append(&mut decoder.decode(available.to_vec())?);
available.len()
};
self.consume(used);
if used == 0 {
break;
}
}
Ok(buf)
}
}
impl<R: std::io::BufRead + ?Sized> BufReadEbmlExt for R {}
#[derive(Debug, Error)]
pub enum DecodeError {
#[error(display = "{}", _0)]
ReadVint(#[error(cause)] UnrepresentableLengthError),
#[error(display = "UnknwonSizeNotAllowedInChildElement: pos {:?}", _0)]
UnknwonSizeNotAllowedInChildElement(ebml::ElementPosition),
#[error(display = "ReadContent")]
ReadContent(#[error(cause)] ReadContentError),
#[error(display = "UnknownEbmlId: {:?}", _0)]
UnknownEbmlId(ebml::EbmlId),
#[error(display = "Io")]
Io(#[error(cause)] std::io::Error),
}
impl From<UnrepresentableLengthError> for DecodeError {
fn from(o: UnrepresentableLengthError) -> Self {
DecodeError::ReadVint(o)
}
}
impl From<ReadContentError> for DecodeError {
fn from(o: ReadContentError) -> Self {
DecodeError::ReadContent(o)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum State {
Tag,
Size,
Content,
}
pub struct Decoder<'a, D: SchemaDict<'a>> {
schema: &'a D,
state: State,
buffer: Vec<u8>,
cursor: usize,
total: usize,
stack: Vec<ebml::ElementPosition>,
queue: Vec<ebml::ElementDetail>,
}
impl<'a, D: SchemaDict<'a>> Decoder<'a, D> {
pub fn new(schema: &'a D) -> Self {
Self {
schema,
state: State::Tag,
buffer: vec![],
cursor: 0,
total: 0,
stack: vec![],
queue: vec![],
}
}
#[logfn(ok = "TRACE", err = "ERROR")]
pub fn decode(&mut self, chunk: Vec<u8>) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
self.read_chunk(chunk)?;
let mut result = vec![];
std::mem::swap(&mut self.queue, &mut result);
Ok(result)
}
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_chunk(&mut self, mut chunk: Vec<u8>) -> Result<(), DecodeError> {
// 読みかけの(読めなかった) buffer と 新しい chunk を合わせて読み直す
self.buffer.append(&mut chunk);
while self.cursor < self.buffer.len() {
match self.state {
State::Tag => {
if !self.read_tag()? {
break;
}
}
State::Size => {
if !self.read_size()? {
break;
}
}
State::Content => {
if !self.read_content()? {
break;
}
}
}
}
Ok(())
}
/// return false when waiting for more data
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_tag(&mut self) -> Result<bool, DecodeError> {
// tag is out of buffer
if self.cursor >= self.buffer.len() {
return Ok(false);
}
// read ebml id vint without first byte
let opt_tag = read_vint(&self.buffer, self.cursor)?;
// cannot read tag yet
if opt_tag.is_none() {
return Ok(false);
}
let tag_size = opt_tag.unwrap().length;
let ebml_id = ebml::EbmlId(opt_tag.unwrap().value);
let tag_start = self.total;
let size_start = self.total + (tag_size as usize);
let content_start = 0;
let content_size = 0;
let schema = self
.schema
.get(ebml_id)
.ok_or_else(|| DecodeError::UnknownEbmlId(ebml_id))?;
let pos = ebml::ElementPosition {
level: schema.level(),
r#type: schema.r#type(),
ebml_id,
tag_start,
size_start,
content_start,
content_size,
};
self.stack.push(pos);
// move cursor
self.cursor += tag_size as usize;
self.total += tag_size as usize;
// change decoder state
self.state = State::Size;
Ok(true)
}
/// return false when waiting for more data
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_size(&mut self) -> Result<bool, DecodeError> {
if self.cursor >= self.buffer.len() {
return Ok(false);
}
// read ebml datasize vint without first byte
let opt_size = read_vint(&self.buffer, self.cursor)?;
if opt_size.is_none() {
return Ok(false);
}
let size = opt_size.unwrap();
// decide current tag data size
let ebml::ElementPosition {
ref mut tag_start,
ref mut content_start,
ref mut content_size,
..
} = self.stack.last_mut().unwrap();
*content_start = *tag_start + (size.length as usize);
*content_size = size.value;
// move cursor and change state
self.cursor += size.length as usize;
self.total += size.length as usize;
self.state = State::Content;
Ok(true)
}
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_content(&mut self) -> Result<bool, DecodeError> {
let current_pos = self.stack.last().unwrap();
// master element は子要素を持つので生データはない
if current_pos.r#type == 'm' {
let elm = (
ebml::MasterStartElement {
ebml_id: current_pos.ebml_id,
unknown_size: current_pos.content_size == -1,
},
*current_pos,
)
.into();
self.queue.push(elm);
self.state = State::Tag;
// この Mastert Element は空要素か
if current_pos.content_size == 0 {
// 即座に終了タグを追加
self.queue.push(
(
ebml::MasterEndElement {
ebml_id: current_pos.ebml_id,
},
*current_pos,
)
.into(),
);
// スタックからこのタグを捨てる
self.stack.pop();
}
return Ok(true);
}
// endless master element
// waiting for more data
if current_pos.content_size < 0 {
return Err(DecodeError::UnknwonSizeNotAllowedInChildElement(
*current_pos,
));
}
use std::convert::TryFrom as _;
let content_size = usize::try_from(current_pos.content_size).unwrap();
if self.buffer.len() < self.cursor + content_size {
return Ok(false);
}
// タグの中身の生データ
let content = self.buffer[self.cursor..self.cursor + content_size].to_vec();
// 読み終わったバッファを捨てて読み込んでいる部分のバッファのみ残す
self.buffer = self.buffer.split_off(self.cursor + content_size);
let child_elm = read_child_element(
current_pos.ebml_id,
current_pos.r#type,
std::io::Cursor::new(content),
content_size,
)?;
self.queue.push((child_elm, *current_pos).into());
// ポインタを進める
self.total += content_size;
// タグ待ちモードに変更
self.state = State::Tag;
self.cursor = 0;
// remove the object from the stack
self.stack.pop();
while !self.stack.is_empty() {
let parent_pos = self.stack.last().unwrap();
// 親が不定長サイズなので閉じタグは期待できない
if parent_pos.content_size < 0 {
self.stack.pop(); // 親タグを捨てる
return Ok(true);
}
// 閉じタグの来るべき場所まで来たかどうか
if self.total < parent_pos.content_start + content_size {
break;
}
// 閉じタグを挿入すべきタイミングが来た
if parent_pos.r#type != 'm' {
// throw new Error("parent element is not master element");
unreachable!();
}
self.queue.push(
(
ebml::MasterEndElement {
ebml_id: parent_pos.ebml_id,
},
*parent_pos,
)
.into(),
);
| eadContentError {
#[error(display = "Date")]
Date(#[error(cause)] std::io::Error),
#[error(display = "Utf8")]
Utf8(#[error(cause)] std::io::Error),
#[error(display = "UnsignedInteger")]
UnsignedInteger(#[error(cause)] std::io::Error),
#[error(display = "Integer")]
Integer(#[error(cause)] std::io::Error),
#[error(display = "Float")]
Float(#[error(cause)] std::io::Error),
#[error(display = "Binary")]
Binary(#[error(cause)] std::io::Error),
#[error(display = "String")]
String(#[error(cause)] std::io::Error),
#[error(display = "Master")]
Master(#[error(cause)] std::io::Error),
#[error(display = "Unknown")]
Unknown(#[error(cause)] std::io::Error),
}
#[logfn_inputs(TRACE)]
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_child_element<C: std::io::Read + std::fmt::Debug>(
ebml_id: ebml::EbmlId,
r#type: char,
mut content: C,
content_size: usize,
) -> Result<ebml::ChildElement, ReadContentError> {
use byteorder::{BigEndian, ReadBytesExt as _};
use ReadContentError::{String as StringE, *};
match r#type {
// Unsigned Integer - Big-endian, any size from 1 to 8 octets
'u' => {
let value = content
.read_uint::<BigEndian>(content_size)
.map_err(UnsignedInteger)?;
Ok(ebml::UnsignedIntegerElement { ebml_id, value }.into())
}
// Signed Integer - Big-endian, any size from 1 to 8 octets
'i' => {
let value = content
.read_int::<BigEndian>(content_size)
.map_err(Integer)?;
Ok(ebml::IntegerElement { ebml_id, value }.into())
}
// Float - Big-endian, defined for 4 and 8 octets (32, 64 bits)
'f' => {
let value = if content_size == 4 {
f64::from(content.read_f32::<BigEndian>().map_err(Float)?)
} else if content_size == 8 {
content.read_f64::<BigEndian>().map_err(Float)?
} else {
Err(Float(std::io::Error::new(
std::io::ErrorKind::Other,
format!("invalid float content_size: {}", content_size),
)))?
};
Ok(ebml::FloatElement { ebml_id, value }.into())
}
// Printable ASCII (0x20 to 0x7E), zero-padded when needed
's' => {
let mut value = vec![0; content_size];
content.read_exact(&mut value).map_err(StringE)?;
Ok(ebml::StringElement { ebml_id, value }.into())
}
// Unicode string, zero padded when needed (RFC 2279)
'8' => {
let mut value = std::string::String::new();
content.read_to_string(&mut value).map_err(Utf8)?;
Ok(ebml::Utf8Element { ebml_id, value }.into())
}
// Binary - not interpreted by the parser
'b' => {
let mut value = vec![0; content_size];
content.read_exact(&mut value).map_err(Binary)?;
Ok(ebml::BinaryElement { ebml_id, value }.into())
}
// nano second; Date.UTC(2001,1,1,0,0,0,0) === 980985600000
// new Date("2001-01-01T00:00:00.000Z").getTime() = 978307200000
// Date - signed 8 octets integer in nanoseconds with 0 indicating
// the precise beginning of the millennium (at 2001-01-01T00:00:00,000000000 UTC)
'd' => {
let nanos = content.read_i64::<BigEndian>().map_err(Date)?;
let unix_time_nanos: i64 = nanos - 978_307_200 * 1000 * 1000 * 1000;
let unix_time_secs: i64 = unix_time_nanos / 1000 / 1000 / 1000 - 1;
let nsecs: u32 =
u32::try_from((unix_time_nanos & (1000 * 1000 * 1000)) + (1000 * 1000 * 1000))
.unwrap();
let datetime = NaiveDateTime::from_timestamp(unix_time_secs, nsecs);
let value = DateTime::from_utc(datetime, Utc);
Ok(ebml::DateElement { ebml_id, value }.into())
}
// Master-Element - contains other EBML sub-elements of the next lower level
'm' => Err(Master(std::io::Error::new(
std::io::ErrorKind::Other,
"cannot read master element as child element".to_string(),
)))?,
_ => Err(Unknown(std::io::Error::new(
std::io::ErrorKind::Other,
format!("unknown type: {}", r#type),
)))?,
}
}
| // スタックからこのタグを捨てる
self.stack.pop();
}
Ok(true)
}
}
#[derive(Debug, Error)]
pub enum R | conditional_block |
decoder.rs | use crate::ebml;
use crate::schema::{Schema, SchemaDict};
use crate::vint::{read_vint, UnrepresentableLengthError};
use chrono::{DateTime, NaiveDateTime, Utc};
use err_derive::Error;
use log_derive::{logfn, logfn_inputs};
use std::convert::TryFrom;
pub trait ReadEbmlExt: std::io::Read {
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_ebml_to_end<'a, D: SchemaDict<'a>>(
&mut self,
schema: &'a D,
) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
let mut decoder = Decoder::new(schema);
let mut buf = vec![];
let _size = self.read_to_end(&mut buf).map_err(DecodeError::Io)?;
let elms = decoder.decode(buf)?;
Ok(elms)
}
}
impl<R: std::io::Read + ?Sized> ReadEbmlExt for R {}
pub trait BufReadEbmlExt: std::io::BufRead {
#[logfn(ok = "TRACE", err = "ERROR")]
fn read<'a, D: SchemaDict<'a>>(
&mut self,
schema: &'a D,
) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
let mut decoder = Decoder::new(schema);
let mut buf = vec![];
loop {
let used = {
let available = match self.fill_buf() {
Ok(n) => n,
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => return Err(DecodeError::Io(e)),
};
buf.append(&mut decoder.decode(available.to_vec())?);
available.len()
};
self.consume(used);
if used == 0 {
break;
}
}
Ok(buf)
}
}
impl<R: std::io::BufRead + ?Sized> BufReadEbmlExt for R {}
#[derive(Debug, Error)]
pub enum DecodeError {
#[error(display = "{}", _0)]
ReadVint(#[error(cause)] UnrepresentableLengthError),
#[error(display = "UnknwonSizeNotAllowedInChildElement: pos {:?}", _0)]
UnknwonSizeNotAllowedInChildElement(ebml::ElementPosition),
#[error(display = "ReadContent")]
ReadContent(#[error(cause)] ReadContentError),
#[error(display = "UnknownEbmlId: {:?}", _0)]
UnknownEbmlId(ebml::EbmlId),
#[error(display = "Io")]
Io(#[error(cause)] std::io::Error),
}
impl From<UnrepresentableLengthError> for DecodeError {
fn from(o: UnrepresentableLengthError) -> Self |
}
impl From<ReadContentError> for DecodeError {
fn from(o: ReadContentError) -> Self {
DecodeError::ReadContent(o)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum State {
Tag,
Size,
Content,
}
pub struct Decoder<'a, D: SchemaDict<'a>> {
schema: &'a D,
state: State,
buffer: Vec<u8>,
cursor: usize,
total: usize,
stack: Vec<ebml::ElementPosition>,
queue: Vec<ebml::ElementDetail>,
}
impl<'a, D: SchemaDict<'a>> Decoder<'a, D> {
pub fn new(schema: &'a D) -> Self {
Self {
schema,
state: State::Tag,
buffer: vec![],
cursor: 0,
total: 0,
stack: vec![],
queue: vec![],
}
}
#[logfn(ok = "TRACE", err = "ERROR")]
pub fn decode(&mut self, chunk: Vec<u8>) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
self.read_chunk(chunk)?;
let mut result = vec![];
std::mem::swap(&mut self.queue, &mut result);
Ok(result)
}
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_chunk(&mut self, mut chunk: Vec<u8>) -> Result<(), DecodeError> {
// 読みかけの(読めなかった) buffer と 新しい chunk を合わせて読み直す
self.buffer.append(&mut chunk);
while self.cursor < self.buffer.len() {
match self.state {
State::Tag => {
if !self.read_tag()? {
break;
}
}
State::Size => {
if !self.read_size()? {
break;
}
}
State::Content => {
if !self.read_content()? {
break;
}
}
}
}
Ok(())
}
/// return false when waiting for more data
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_tag(&mut self) -> Result<bool, DecodeError> {
// tag is out of buffer
if self.cursor >= self.buffer.len() {
return Ok(false);
}
// read ebml id vint without first byte
let opt_tag = read_vint(&self.buffer, self.cursor)?;
// cannot read tag yet
if opt_tag.is_none() {
return Ok(false);
}
let tag_size = opt_tag.unwrap().length;
let ebml_id = ebml::EbmlId(opt_tag.unwrap().value);
let tag_start = self.total;
let size_start = self.total + (tag_size as usize);
let content_start = 0;
let content_size = 0;
let schema = self
.schema
.get(ebml_id)
.ok_or_else(|| DecodeError::UnknownEbmlId(ebml_id))?;
let pos = ebml::ElementPosition {
level: schema.level(),
r#type: schema.r#type(),
ebml_id,
tag_start,
size_start,
content_start,
content_size,
};
self.stack.push(pos);
// move cursor
self.cursor += tag_size as usize;
self.total += tag_size as usize;
// change decoder state
self.state = State::Size;
Ok(true)
}
/// return false when waiting for more data
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_size(&mut self) -> Result<bool, DecodeError> {
if self.cursor >= self.buffer.len() {
return Ok(false);
}
// read ebml datasize vint without first byte
let opt_size = read_vint(&self.buffer, self.cursor)?;
if opt_size.is_none() {
return Ok(false);
}
let size = opt_size.unwrap();
// decide current tag data size
let ebml::ElementPosition {
ref mut tag_start,
ref mut content_start,
ref mut content_size,
..
} = self.stack.last_mut().unwrap();
*content_start = *tag_start + (size.length as usize);
*content_size = size.value;
// move cursor and change state
self.cursor += size.length as usize;
self.total += size.length as usize;
self.state = State::Content;
Ok(true)
}
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_content(&mut self) -> Result<bool, DecodeError> {
let current_pos = self.stack.last().unwrap();
// master element は子要素を持つので生データはない
if current_pos.r#type == 'm' {
let elm = (
ebml::MasterStartElement {
ebml_id: current_pos.ebml_id,
unknown_size: current_pos.content_size == -1,
},
*current_pos,
)
.into();
self.queue.push(elm);
self.state = State::Tag;
// この Mastert Element は空要素か
if current_pos.content_size == 0 {
// 即座に終了タグを追加
self.queue.push(
(
ebml::MasterEndElement {
ebml_id: current_pos.ebml_id,
},
*current_pos,
)
.into(),
);
// スタックからこのタグを捨てる
self.stack.pop();
}
return Ok(true);
}
// endless master element
// waiting for more data
if current_pos.content_size < 0 {
return Err(DecodeError::UnknwonSizeNotAllowedInChildElement(
*current_pos,
));
}
use std::convert::TryFrom as _;
let content_size = usize::try_from(current_pos.content_size).unwrap();
if self.buffer.len() < self.cursor + content_size {
return Ok(false);
}
// タグの中身の生データ
let content = self.buffer[self.cursor..self.cursor + content_size].to_vec();
// 読み終わったバッファを捨てて読み込んでいる部分のバッファのみ残す
self.buffer = self.buffer.split_off(self.cursor + content_size);
let child_elm = read_child_element(
current_pos.ebml_id,
current_pos.r#type,
std::io::Cursor::new(content),
content_size,
)?;
self.queue.push((child_elm, *current_pos).into());
// ポインタを進める
self.total += content_size;
// タグ待ちモードに変更
self.state = State::Tag;
self.cursor = 0;
// remove the object from the stack
self.stack.pop();
while !self.stack.is_empty() {
let parent_pos = self.stack.last().unwrap();
// 親が不定長サイズなので閉じタグは期待できない
if parent_pos.content_size < 0 {
self.stack.pop(); // 親タグを捨てる
return Ok(true);
}
// 閉じタグの来るべき場所まで来たかどうか
if self.total < parent_pos.content_start + content_size {
break;
}
// 閉じタグを挿入すべきタイミングが来た
if parent_pos.r#type != 'm' {
// throw new Error("parent element is not master element");
unreachable!();
}
self.queue.push(
(
ebml::MasterEndElement {
ebml_id: parent_pos.ebml_id,
},
*parent_pos,
)
.into(),
);
// スタックからこのタグを捨てる
self.stack.pop();
}
Ok(true)
}
}
#[derive(Debug, Error)]
pub enum ReadContentError {
#[error(display = "Date")]
Date(#[error(cause)] std::io::Error),
#[error(display = "Utf8")]
Utf8(#[error(cause)] std::io::Error),
#[error(display = "UnsignedInteger")]
UnsignedInteger(#[error(cause)] std::io::Error),
#[error(display = "Integer")]
Integer(#[error(cause)] std::io::Error),
#[error(display = "Float")]
Float(#[error(cause)] std::io::Error),
#[error(display = "Binary")]
Binary(#[error(cause)] std::io::Error),
#[error(display = "String")]
String(#[error(cause)] std::io::Error),
#[error(display = "Master")]
Master(#[error(cause)] std::io::Error),
#[error(display = "Unknown")]
Unknown(#[error(cause)] std::io::Error),
}
#[logfn_inputs(TRACE)]
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_child_element<C: std::io::Read + std::fmt::Debug>(
ebml_id: ebml::EbmlId,
r#type: char,
mut content: C,
content_size: usize,
) -> Result<ebml::ChildElement, ReadContentError> {
use byteorder::{BigEndian, ReadBytesExt as _};
use ReadContentError::{String as StringE, *};
match r#type {
// Unsigned Integer - Big-endian, any size from 1 to 8 octets
'u' => {
let value = content
.read_uint::<BigEndian>(content_size)
.map_err(UnsignedInteger)?;
Ok(ebml::UnsignedIntegerElement { ebml_id, value }.into())
}
// Signed Integer - Big-endian, any size from 1 to 8 octets
'i' => {
let value = content
.read_int::<BigEndian>(content_size)
.map_err(Integer)?;
Ok(ebml::IntegerElement { ebml_id, value }.into())
}
// Float - Big-endian, defined for 4 and 8 octets (32, 64 bits)
'f' => {
let value = if content_size == 4 {
f64::from(content.read_f32::<BigEndian>().map_err(Float)?)
} else if content_size == 8 {
content.read_f64::<BigEndian>().map_err(Float)?
} else {
Err(Float(std::io::Error::new(
std::io::ErrorKind::Other,
format!("invalid float content_size: {}", content_size),
)))?
};
Ok(ebml::FloatElement { ebml_id, value }.into())
}
// Printable ASCII (0x20 to 0x7E), zero-padded when needed
's' => {
let mut value = vec![0; content_size];
content.read_exact(&mut value).map_err(StringE)?;
Ok(ebml::StringElement { ebml_id, value }.into())
}
// Unicode string, zero padded when needed (RFC 2279)
'8' => {
let mut value = std::string::String::new();
content.read_to_string(&mut value).map_err(Utf8)?;
Ok(ebml::Utf8Element { ebml_id, value }.into())
}
// Binary - not interpreted by the parser
'b' => {
let mut value = vec![0; content_size];
content.read_exact(&mut value).map_err(Binary)?;
Ok(ebml::BinaryElement { ebml_id, value }.into())
}
// nano second; Date.UTC(2001,1,1,0,0,0,0) === 980985600000
// new Date("2001-01-01T00:00:00.000Z").getTime() = 978307200000
// Date - signed 8 octets integer in nanoseconds with 0 indicating
// the precise beginning of the millennium (at 2001-01-01T00:00:00,000000000 UTC)
'd' => {
let nanos = content.read_i64::<BigEndian>().map_err(Date)?;
let unix_time_nanos: i64 = nanos - 978_307_200 * 1000 * 1000 * 1000;
let unix_time_secs: i64 = unix_time_nanos / 1000 / 1000 / 1000 - 1;
let nsecs: u32 =
u32::try_from((unix_time_nanos & (1000 * 1000 * 1000)) + (1000 * 1000 * 1000))
.unwrap();
let datetime = NaiveDateTime::from_timestamp(unix_time_secs, nsecs);
let value = DateTime::from_utc(datetime, Utc);
Ok(ebml::DateElement { ebml_id, value }.into())
}
// Master-Element - contains other EBML sub-elements of the next lower level
'm' => Err(Master(std::io::Error::new(
std::io::ErrorKind::Other,
"cannot read master element as child element".to_string(),
)))?,
_ => Err(Unknown(std::io::Error::new(
std::io::ErrorKind::Other,
format!("unknown type: {}", r#type),
)))?,
}
}
| {
DecodeError::ReadVint(o)
} | identifier_body |
decoder.rs | use crate::ebml;
use crate::schema::{Schema, SchemaDict};
use crate::vint::{read_vint, UnrepresentableLengthError};
use chrono::{DateTime, NaiveDateTime, Utc};
use err_derive::Error;
use log_derive::{logfn, logfn_inputs};
use std::convert::TryFrom;
pub trait ReadEbmlExt: std::io::Read {
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_ebml_to_end<'a, D: SchemaDict<'a>>(
&mut self,
schema: &'a D,
) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
let mut decoder = Decoder::new(schema);
let mut buf = vec![];
let _size = self.read_to_end(&mut buf).map_err(DecodeError::Io)?;
let elms = decoder.decode(buf)?;
Ok(elms)
}
}
impl<R: std::io::Read + ?Sized> ReadEbmlExt for R {}
pub trait BufReadEbmlExt: std::io::BufRead {
#[logfn(ok = "TRACE", err = "ERROR")]
fn read<'a, D: SchemaDict<'a>>(
&mut self,
schema: &'a D,
) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
let mut decoder = Decoder::new(schema);
let mut buf = vec![];
loop {
let used = {
let available = match self.fill_buf() {
Ok(n) => n,
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
Err(e) => return Err(DecodeError::Io(e)),
};
buf.append(&mut decoder.decode(available.to_vec())?);
available.len()
};
self.consume(used);
if used == 0 {
break;
}
}
Ok(buf)
}
}
impl<R: std::io::BufRead + ?Sized> BufReadEbmlExt for R {}
#[derive(Debug, Error)]
pub enum DecodeError {
#[error(display = "{}", _0)]
ReadVint(#[error(cause)] UnrepresentableLengthError),
#[error(display = "UnknwonSizeNotAllowedInChildElement: pos {:?}", _0)]
UnknwonSizeNotAllowedInChildElement(ebml::ElementPosition),
#[error(display = "ReadContent")]
ReadContent(#[error(cause)] ReadContentError),
#[error(display = "UnknownEbmlId: {:?}", _0)]
UnknownEbmlId(ebml::EbmlId),
#[error(display = "Io")]
Io(#[error(cause)] std::io::Error),
}
impl From<UnrepresentableLengthError> for DecodeError {
fn from(o: UnrepresentableLengthError) -> Self {
DecodeError::ReadVint(o)
}
}
impl From<ReadContentError> for DecodeError {
fn | (o: ReadContentError) -> Self {
DecodeError::ReadContent(o)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum State {
Tag,
Size,
Content,
}
pub struct Decoder<'a, D: SchemaDict<'a>> {
schema: &'a D,
state: State,
buffer: Vec<u8>,
cursor: usize,
total: usize,
stack: Vec<ebml::ElementPosition>,
queue: Vec<ebml::ElementDetail>,
}
impl<'a, D: SchemaDict<'a>> Decoder<'a, D> {
pub fn new(schema: &'a D) -> Self {
Self {
schema,
state: State::Tag,
buffer: vec![],
cursor: 0,
total: 0,
stack: vec![],
queue: vec![],
}
}
#[logfn(ok = "TRACE", err = "ERROR")]
pub fn decode(&mut self, chunk: Vec<u8>) -> Result<Vec<ebml::ElementDetail>, DecodeError> {
self.read_chunk(chunk)?;
let mut result = vec![];
std::mem::swap(&mut self.queue, &mut result);
Ok(result)
}
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_chunk(&mut self, mut chunk: Vec<u8>) -> Result<(), DecodeError> {
// 読みかけの(読めなかった) buffer と 新しい chunk を合わせて読み直す
self.buffer.append(&mut chunk);
while self.cursor < self.buffer.len() {
match self.state {
State::Tag => {
if !self.read_tag()? {
break;
}
}
State::Size => {
if !self.read_size()? {
break;
}
}
State::Content => {
if !self.read_content()? {
break;
}
}
}
}
Ok(())
}
/// return false when waiting for more data
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_tag(&mut self) -> Result<bool, DecodeError> {
// tag is out of buffer
if self.cursor >= self.buffer.len() {
return Ok(false);
}
// read ebml id vint without first byte
let opt_tag = read_vint(&self.buffer, self.cursor)?;
// cannot read tag yet
if opt_tag.is_none() {
return Ok(false);
}
let tag_size = opt_tag.unwrap().length;
let ebml_id = ebml::EbmlId(opt_tag.unwrap().value);
let tag_start = self.total;
let size_start = self.total + (tag_size as usize);
let content_start = 0;
let content_size = 0;
let schema = self
.schema
.get(ebml_id)
.ok_or_else(|| DecodeError::UnknownEbmlId(ebml_id))?;
let pos = ebml::ElementPosition {
level: schema.level(),
r#type: schema.r#type(),
ebml_id,
tag_start,
size_start,
content_start,
content_size,
};
self.stack.push(pos);
// move cursor
self.cursor += tag_size as usize;
self.total += tag_size as usize;
// change decoder state
self.state = State::Size;
Ok(true)
}
/// return false when waiting for more data
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_size(&mut self) -> Result<bool, DecodeError> {
if self.cursor >= self.buffer.len() {
return Ok(false);
}
// read ebml datasize vint without first byte
let opt_size = read_vint(&self.buffer, self.cursor)?;
if opt_size.is_none() {
return Ok(false);
}
let size = opt_size.unwrap();
// decide current tag data size
let ebml::ElementPosition {
ref mut tag_start,
ref mut content_start,
ref mut content_size,
..
} = self.stack.last_mut().unwrap();
*content_start = *tag_start + (size.length as usize);
*content_size = size.value;
// move cursor and change state
self.cursor += size.length as usize;
self.total += size.length as usize;
self.state = State::Content;
Ok(true)
}
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_content(&mut self) -> Result<bool, DecodeError> {
let current_pos = self.stack.last().unwrap();
// master element は子要素を持つので生データはない
if current_pos.r#type == 'm' {
let elm = (
ebml::MasterStartElement {
ebml_id: current_pos.ebml_id,
unknown_size: current_pos.content_size == -1,
},
*current_pos,
)
.into();
self.queue.push(elm);
self.state = State::Tag;
// この Mastert Element は空要素か
if current_pos.content_size == 0 {
// 即座に終了タグを追加
self.queue.push(
(
ebml::MasterEndElement {
ebml_id: current_pos.ebml_id,
},
*current_pos,
)
.into(),
);
// スタックからこのタグを捨てる
self.stack.pop();
}
return Ok(true);
}
// endless master element
// waiting for more data
if current_pos.content_size < 0 {
return Err(DecodeError::UnknwonSizeNotAllowedInChildElement(
*current_pos,
));
}
use std::convert::TryFrom as _;
let content_size = usize::try_from(current_pos.content_size).unwrap();
if self.buffer.len() < self.cursor + content_size {
return Ok(false);
}
// タグの中身の生データ
let content = self.buffer[self.cursor..self.cursor + content_size].to_vec();
// 読み終わったバッファを捨てて読み込んでいる部分のバッファのみ残す
self.buffer = self.buffer.split_off(self.cursor + content_size);
let child_elm = read_child_element(
current_pos.ebml_id,
current_pos.r#type,
std::io::Cursor::new(content),
content_size,
)?;
self.queue.push((child_elm, *current_pos).into());
// ポインタを進める
self.total += content_size;
// タグ待ちモードに変更
self.state = State::Tag;
self.cursor = 0;
// remove the object from the stack
self.stack.pop();
while !self.stack.is_empty() {
let parent_pos = self.stack.last().unwrap();
// 親が不定長サイズなので閉じタグは期待できない
if parent_pos.content_size < 0 {
self.stack.pop(); // 親タグを捨てる
return Ok(true);
}
// 閉じタグの来るべき場所まで来たかどうか
if self.total < parent_pos.content_start + content_size {
break;
}
// 閉じタグを挿入すべきタイミングが来た
if parent_pos.r#type != 'm' {
// throw new Error("parent element is not master element");
unreachable!();
}
self.queue.push(
(
ebml::MasterEndElement {
ebml_id: parent_pos.ebml_id,
},
*parent_pos,
)
.into(),
);
// スタックからこのタグを捨てる
self.stack.pop();
}
Ok(true)
}
}
#[derive(Debug, Error)]
pub enum ReadContentError {
#[error(display = "Date")]
Date(#[error(cause)] std::io::Error),
#[error(display = "Utf8")]
Utf8(#[error(cause)] std::io::Error),
#[error(display = "UnsignedInteger")]
UnsignedInteger(#[error(cause)] std::io::Error),
#[error(display = "Integer")]
Integer(#[error(cause)] std::io::Error),
#[error(display = "Float")]
Float(#[error(cause)] std::io::Error),
#[error(display = "Binary")]
Binary(#[error(cause)] std::io::Error),
#[error(display = "String")]
String(#[error(cause)] std::io::Error),
#[error(display = "Master")]
Master(#[error(cause)] std::io::Error),
#[error(display = "Unknown")]
Unknown(#[error(cause)] std::io::Error),
}
#[logfn_inputs(TRACE)]
#[logfn(ok = "TRACE", err = "ERROR")]
fn read_child_element<C: std::io::Read + std::fmt::Debug>(
ebml_id: ebml::EbmlId,
r#type: char,
mut content: C,
content_size: usize,
) -> Result<ebml::ChildElement, ReadContentError> {
use byteorder::{BigEndian, ReadBytesExt as _};
use ReadContentError::{String as StringE, *};
match r#type {
// Unsigned Integer - Big-endian, any size from 1 to 8 octets
'u' => {
let value = content
.read_uint::<BigEndian>(content_size)
.map_err(UnsignedInteger)?;
Ok(ebml::UnsignedIntegerElement { ebml_id, value }.into())
}
// Signed Integer - Big-endian, any size from 1 to 8 octets
'i' => {
let value = content
.read_int::<BigEndian>(content_size)
.map_err(Integer)?;
Ok(ebml::IntegerElement { ebml_id, value }.into())
}
// Float - Big-endian, defined for 4 and 8 octets (32, 64 bits)
'f' => {
let value = if content_size == 4 {
f64::from(content.read_f32::<BigEndian>().map_err(Float)?)
} else if content_size == 8 {
content.read_f64::<BigEndian>().map_err(Float)?
} else {
Err(Float(std::io::Error::new(
std::io::ErrorKind::Other,
format!("invalid float content_size: {}", content_size),
)))?
};
Ok(ebml::FloatElement { ebml_id, value }.into())
}
// Printable ASCII (0x20 to 0x7E), zero-padded when needed
's' => {
let mut value = vec![0; content_size];
content.read_exact(&mut value).map_err(StringE)?;
Ok(ebml::StringElement { ebml_id, value }.into())
}
// Unicode string, zero padded when needed (RFC 2279)
'8' => {
let mut value = std::string::String::new();
content.read_to_string(&mut value).map_err(Utf8)?;
Ok(ebml::Utf8Element { ebml_id, value }.into())
}
// Binary - not interpreted by the parser
'b' => {
let mut value = vec![0; content_size];
content.read_exact(&mut value).map_err(Binary)?;
Ok(ebml::BinaryElement { ebml_id, value }.into())
}
// nano second; Date.UTC(2001,1,1,0,0,0,0) === 980985600000
// new Date("2001-01-01T00:00:00.000Z").getTime() = 978307200000
// Date - signed 8 octets integer in nanoseconds with 0 indicating
// the precise beginning of the millennium (at 2001-01-01T00:00:00,000000000 UTC)
'd' => {
let nanos = content.read_i64::<BigEndian>().map_err(Date)?;
let unix_time_nanos: i64 = nanos - 978_307_200 * 1000 * 1000 * 1000;
let unix_time_secs: i64 = unix_time_nanos / 1000 / 1000 / 1000 - 1;
let nsecs: u32 =
u32::try_from((unix_time_nanos & (1000 * 1000 * 1000)) + (1000 * 1000 * 1000))
.unwrap();
let datetime = NaiveDateTime::from_timestamp(unix_time_secs, nsecs);
let value = DateTime::from_utc(datetime, Utc);
Ok(ebml::DateElement { ebml_id, value }.into())
}
// Master-Element - contains other EBML sub-elements of the next lower level
'm' => Err(Master(std::io::Error::new(
std::io::ErrorKind::Other,
"cannot read master element as child element".to_string(),
)))?,
_ => Err(Unknown(std::io::Error::new(
std::io::ErrorKind::Other,
format!("unknown type: {}", r#type),
)))?,
}
}
| from | identifier_name |
message.py | # encoding: utf-8
"""MIME-encoded electronic mail message classes."""
import logging
import os
import time
import warnings
from datetime import datetime
from turbomail import release
from turbomail.compat import Header, MIMEBase, MIMEImage, MIMEMultipart, MIMEText, encode_base64, formatdate, make_msgid
from turbomail.util import Address, AddressList
from turbomail.control import interface
__all__ = ['Message']
log = logging.getLogger("turbomail.message")
class NoDefault(object):
pass
class BaseMessage(object):
def __init__(self, smtp_from=None, to=None, kw=None):
self.merge_if_set(kw, smtp_from, 'smtp_from')
self._smtp_from = AddressList(self.pop_deprecated(kw, 'smtp_from', 'smtpfrom'))
self._to = AddressList(self.pop_deprecated(kw, 'to', 'recipient', value=to))
self.nr_retries = self.kwpop(kw, 'nr_retries', default=3)
smtp_from = AddressList.protected('_smtp_from')
to = AddressList.protected('_to')
def merge_if_set(self, kw, value, name):
if value not in [None, '']:
kw[name] = value
def kwpop(self, kw, name, configkey=None, default=None, old_configkey=None):
if name in kw:
value = kw.pop(name)
else:
if configkey == None:
configkey = 'mail.message.%s' % name
value = interface.config.get(configkey, NoDefault)
if value == NoDefault and old_configkey != None:
value = interface.config.get(old_configkey, NoDefault)
if value != NoDefault:
msg = 'Falling back to deprecated configuration option "%s", please use "%s" instead'
warnings.warn(msg % (old_configkey, configkey),
category=DeprecationWarning)
if value == NoDefault:
value = default
return value
def pop_deprecated(self, kw, new_name, deprecated_name, value=None):
deprecated_value = self.kwpop(kw, deprecated_name)
if deprecated_value != None:
self._warn_about_deprecated_property(deprecated_name, new_name)
value = deprecated_value
elif value == None:
value = self.kwpop(kw, new_name)
return value
def send(self):
return interface.send(self)
# --------------------------------------------------------------------------
# Deprecated properties
def _warn_about_deprecated_property(self, deprecated_name, new_name):
msg = 'Property "%s" is deprecated, please use "%s" instead'
warnings.warn(msg % (deprecated_name, new_name), category=DeprecationWarning)
def get_smtpfrom(self):
self._warn_about_deprecated_property('smtpfrom', 'smtp_from')
return self.smtp_from
def set_smtpfrom(self, smtpfrom):
self._warn_about_deprecated_property('smtpfrom', 'smtp_from')
self.smtp_from = smtpfrom
smtpfrom = property(fget=get_smtpfrom, fset=set_smtpfrom)
def get_recipient(self):
self._warn_about_deprecated_property('recipient', 'to')
return self.to
def set_recipient(self, recipient):
self._warn_about_deprecated_property('recipient', 'to')
self.to = recipient
recipient = property(fget=get_recipient, fset=set_recipient)
class Message(BaseMessage):
"""Simple e-mail message class."""
def __init__(self, author=None, to=None, subject=None, **kw):
"""Instantiate a new Message object.
No arguments are required, as everything can be set using class
properties. Alternatively, I{everything} can be set using the
constructor, using named arguments. The first three positional
arguments can be used to quickly prepare a simple message.
"""
super(Message, self).__init__(to=to, kw=kw)
kwpop = lambda *args, **kwargs: self.kwpop(kw, *args, **kwargs)
pop_deprecated = lambda name, old_name: self.pop_deprecated(kw, name, old_name)
self.merge_if_set(kw, author, 'author')
self._author = AddressList(kwpop('author'))
self._cc = AddressList(kwpop("cc"))
self._bcc = AddressList(kwpop("bcc"))
self._sender = AddressList(kwpop("sender"))
self._reply_to = AddressList(pop_deprecated('reply_to', 'replyto'))
self._disposition = AddressList(kwpop("disposition"))
self.subject = subject
self.date = kwpop("date")
self.encoding = kwpop("encoding", default='us-ascii',
old_configkey='mail.encoding')
self.organization = kwpop("organization")
self.priority = kwpop("priority")
self.plain = kwpop("plain", default=None)
self.rich = kwpop("rich", default=None)
self.attachments = kwpop("attachments", default=[])
self.embedded = kwpop("embedded", default=[])
self.headers = kwpop("headers", default=[])
self._id = kw.get("id", None)
self._processed = False
self._dirty = False
if len(kw) > 0:
parameter_name = kw.keys()[0]
error_msg = "__init__() got an unexpected keyword argument '%s'"
raise TypeError(error_msg % parameter_name)
author = AddressList.protected('_author')
bcc = AddressList.protected('_bcc')
cc = AddressList.protected('_cc')
disposition = AddressList.protected('_disposition')
reply_to = AddressList.protected('_reply_to')
sender = AddressList.protected('_sender')
def __setattr__(self, name, value):
"""Set the dirty flag as properties are updated."""
super(Message, self).__setattr__(name, value)
if name not in ('bcc', '_dirty', '_processed'):
self.__dict__['_dirty'] = True
def __str__(self):
return self.mime.as_string()
def _get_authors(self):
# Just for better readability. I put this method here because a wrapped
# message must only have one author (smtp_from) so this method is only
# useful in a TurboMail Message.
return self.author
def _set_authors(self, value):
self.author = value
authors = property(_get_authors, _set_authors)
def id(self):
if not self._id or (self._processed and self._dirty):
self.__dict__['_id'] = make_msgid()
self._processed = False
return self._id
id = property(id)
def envelope_sender(self):
"""Returns the address of the envelope sender address (SMTP from, if not
set the sender, if this one isn't set too, the author)."""
envelope_sender = None
# TODO: Make this check better as soon as SMTP from and sender are
# Addresses, not AddressLists anymore.
if self.smtp_from != None and len(self.smtp_from) > 0:
envelope_sender = self.smtp_from
elif self.sender != None and len(self.sender) > 0:
envelope_sender = self.sender
else:
envelope_sender = self.author
return Address(envelope_sender)
envelope_sender = property(envelope_sender)
def recipients(self):
return AddressList(self.to + self.cc + self.bcc)
recipients = property(recipients)
def mime_document(self, plain, rich=None):
if not rich:
message = plain
else:
message = MIMEMultipart('alternative')
message.attach(plain)
if not self.embedded:
message.attach(rich)
else:
embedded = MIMEMultipart('related')
embedded.attach(rich)
for attachment in self.embedded: embedded.attach(attachment)
message.attach(embedded)
if self.attachments:
attachments = MIMEMultipart()
attachments.attach(message)
for attachment in self.attachments: attachments.attach(attachment)
message = attachments
return message
def _build_date_header_string(self, date_value):
"""Gets the date_value (may be None, basestring, float or
datetime.datetime instance) and returns a valid date string as per
RFC 2822."""
if isinstance(date_value, datetime):
date_value = time.mktime(date_value.timetuple())
if not isinstance(date_value, basestring):
date_value = formatdate(date_value, localtime=True)
return date_value
def _build_header_list(self, author, sender):
date_value = self._build_date_header_string(self.date)
headers = [
('Sender', sender),
('From', author),
('Reply-To', self.reply_to),
('Subject', self.subject),
('Date', date_value),
('To', self.to),
('Cc', self.cc),
('Disposition-Notification-To', self.disposition),
('Organization', self.organization),
('X-Priority', self.priority),
]
if interface.config.get("mail.brand", True):
headers.extend([
('X-Mailer', "%s %s" % (release.name, release.version)),
])
if isinstance(self.headers, dict):
for key in self.headers:
headers.append((key, self.headers[key]))
else:
headers.extend(self.headers)
return headers
def _add_headers_to_message(self, message, headers):
for header in headers:
if isinstance(header, (tuple, list)):
if header[1] is None or ( isinstance(header[1], list) and not header[1] ): continue
header = list(header)
if isinstance(header[1], unicode):
header[1] = Header(header[1], self.encoding)
elif isinstance(header[1], AddressList):
header[1] = header[1].encode(self.encoding)
header[1] = str(header[1])
message.add_header(*header)
elif isinstance(header, dict):
message.add_header(**header)
def mime(self):
"""Produce the final MIME message."""
author = self.author
sender = self.sender
if not author and sender:
msg = 'Please specify the author using the "author" property. ' + \
'Using "sender" for the From header is deprecated!'
warnings.warn(msg, category=DeprecationWarning)
author = sender
sender = []
if not author:
raise ValueError('You must specify an author.')
assert self.subject, "You must specify a subject."
assert len(self.recipients) > 0, "You must specify at least one recipient."
assert self.plain, "You must provide plain text content."
if len(author) > 1 and len(sender) == 0:
raise ValueError('If there are multiple authors of message, you must specify a sender!')
if len(sender) > 1:
raise ValueError('You must not specify more than one sender!')
if not self._dirty and self._processed and not interface.config.get("mail.debug", False):
return self._mime
self._processed = False
plain = MIMEText(self._callable(self.plain).encode(self.encoding), 'plain', self.encoding)
rich = None
if self.rich:
rich = MIMEText(self._callable(self.rich).encode(self.encoding), 'html', self.encoding)
message = self.mime_document(plain, rich)
headers = self._build_header_list(author, sender)
self._add_headers_to_message(message, headers)
| self._mime = message
self._processed = True
self._dirty = False
return message
mime = property(mime)
def attach(self, file, name=None):
"""Attach an on-disk file to this message."""
part = MIMEBase('application', "octet-stream")
if isinstance(file, (str, unicode)):
fp = open(file, "rb")
else:
assert name is not None, "If attaching a file-like object, you must pass a custom filename, as one can not be inferred."
fp = file
part.set_payload(fp.read())
encode_base64(part)
part.add_header('Content-Disposition', 'attachment', filename=os.path.basename([name, file][name is None]))
self.attachments.append(part)
def embed(self, file, name=None):
"""Attach an on-disk image file and prepare for HTML embedding.
This method should only be used to embed images.
@param file: The path to the file you wish to attach, or an
instance of a file-like object.
@param name: You can optionally override the filename of the
attached file. This name will appear in the
recipient's mail viewer. B{Optional if passing
an on-disk path. Required if passing a file-like
object.}
@type name: string
"""
if isinstance(file, (str, unicode)):
fp = open(file, "rb")
name = os.path.basename(file)
else:
assert name is not None, "If embedding a file-like object, you must pass a custom filename."
fp = file
part = MIMEImage(fp.read(), name=name)
fp.close()
del part['Content-Disposition']
part.add_header('Content-Disposition', 'inline', filename=name)
part.add_header('Content-ID', '<%s>' % name)
self.embedded.append(part)
def _callable(self, var):
if callable(var):
return var()
return var
# --------------------------------------------------------------------------
# Deprecated properties
def get_replyto(self):
self._warn_about_deprecated_property('replyto', 'reply_to')
return self.reply_to
def set_replyto(self, replyto):
self._warn_about_deprecated_property('replyto', 'reply_to')
self.reply_to = replyto
replyto = property(fget=get_replyto, fset=set_replyto) | random_line_split | |
message.py | # encoding: utf-8
"""MIME-encoded electronic mail message classes."""
import logging
import os
import time
import warnings
from datetime import datetime
from turbomail import release
from turbomail.compat import Header, MIMEBase, MIMEImage, MIMEMultipart, MIMEText, encode_base64, formatdate, make_msgid
from turbomail.util import Address, AddressList
from turbomail.control import interface
__all__ = ['Message']
log = logging.getLogger("turbomail.message")
class NoDefault(object):
pass
class BaseMessage(object):
def __init__(self, smtp_from=None, to=None, kw=None):
self.merge_if_set(kw, smtp_from, 'smtp_from')
self._smtp_from = AddressList(self.pop_deprecated(kw, 'smtp_from', 'smtpfrom'))
self._to = AddressList(self.pop_deprecated(kw, 'to', 'recipient', value=to))
self.nr_retries = self.kwpop(kw, 'nr_retries', default=3)
smtp_from = AddressList.protected('_smtp_from')
to = AddressList.protected('_to')
def merge_if_set(self, kw, value, name):
if value not in [None, '']:
kw[name] = value
def kwpop(self, kw, name, configkey=None, default=None, old_configkey=None):
if name in kw:
value = kw.pop(name)
else:
if configkey == None:
configkey = 'mail.message.%s' % name
value = interface.config.get(configkey, NoDefault)
if value == NoDefault and old_configkey != None:
value = interface.config.get(old_configkey, NoDefault)
if value != NoDefault:
msg = 'Falling back to deprecated configuration option "%s", please use "%s" instead'
warnings.warn(msg % (old_configkey, configkey),
category=DeprecationWarning)
if value == NoDefault:
value = default
return value
def pop_deprecated(self, kw, new_name, deprecated_name, value=None):
deprecated_value = self.kwpop(kw, deprecated_name)
if deprecated_value != None:
self._warn_about_deprecated_property(deprecated_name, new_name)
value = deprecated_value
elif value == None:
value = self.kwpop(kw, new_name)
return value
def send(self):
return interface.send(self)
# --------------------------------------------------------------------------
# Deprecated properties
def _warn_about_deprecated_property(self, deprecated_name, new_name):
msg = 'Property "%s" is deprecated, please use "%s" instead'
warnings.warn(msg % (deprecated_name, new_name), category=DeprecationWarning)
def get_smtpfrom(self):
self._warn_about_deprecated_property('smtpfrom', 'smtp_from')
return self.smtp_from
def set_smtpfrom(self, smtpfrom):
self._warn_about_deprecated_property('smtpfrom', 'smtp_from')
self.smtp_from = smtpfrom
smtpfrom = property(fget=get_smtpfrom, fset=set_smtpfrom)
def get_recipient(self):
self._warn_about_deprecated_property('recipient', 'to')
return self.to
def set_recipient(self, recipient):
self._warn_about_deprecated_property('recipient', 'to')
self.to = recipient
recipient = property(fget=get_recipient, fset=set_recipient)
class Message(BaseMessage):
"""Simple e-mail message class."""
def __init__(self, author=None, to=None, subject=None, **kw):
|
author = AddressList.protected('_author')
bcc = AddressList.protected('_bcc')
cc = AddressList.protected('_cc')
disposition = AddressList.protected('_disposition')
reply_to = AddressList.protected('_reply_to')
sender = AddressList.protected('_sender')
def __setattr__(self, name, value):
"""Set the dirty flag as properties are updated."""
super(Message, self).__setattr__(name, value)
if name not in ('bcc', '_dirty', '_processed'):
self.__dict__['_dirty'] = True
def __str__(self):
return self.mime.as_string()
def _get_authors(self):
# Just for better readability. I put this method here because a wrapped
# message must only have one author (smtp_from) so this method is only
# useful in a TurboMail Message.
return self.author
def _set_authors(self, value):
self.author = value
authors = property(_get_authors, _set_authors)
def id(self):
if not self._id or (self._processed and self._dirty):
self.__dict__['_id'] = make_msgid()
self._processed = False
return self._id
id = property(id)
def envelope_sender(self):
"""Returns the address of the envelope sender address (SMTP from, if not
set the sender, if this one isn't set too, the author)."""
envelope_sender = None
# TODO: Make this check better as soon as SMTP from and sender are
# Addresses, not AddressLists anymore.
if self.smtp_from != None and len(self.smtp_from) > 0:
envelope_sender = self.smtp_from
elif self.sender != None and len(self.sender) > 0:
envelope_sender = self.sender
else:
envelope_sender = self.author
return Address(envelope_sender)
envelope_sender = property(envelope_sender)
def recipients(self):
return AddressList(self.to + self.cc + self.bcc)
recipients = property(recipients)
def mime_document(self, plain, rich=None):
if not rich:
message = plain
else:
message = MIMEMultipart('alternative')
message.attach(plain)
if not self.embedded:
message.attach(rich)
else:
embedded = MIMEMultipart('related')
embedded.attach(rich)
for attachment in self.embedded: embedded.attach(attachment)
message.attach(embedded)
if self.attachments:
attachments = MIMEMultipart()
attachments.attach(message)
for attachment in self.attachments: attachments.attach(attachment)
message = attachments
return message
def _build_date_header_string(self, date_value):
"""Gets the date_value (may be None, basestring, float or
datetime.datetime instance) and returns a valid date string as per
RFC 2822."""
if isinstance(date_value, datetime):
date_value = time.mktime(date_value.timetuple())
if not isinstance(date_value, basestring):
date_value = formatdate(date_value, localtime=True)
return date_value
def _build_header_list(self, author, sender):
date_value = self._build_date_header_string(self.date)
headers = [
('Sender', sender),
('From', author),
('Reply-To', self.reply_to),
('Subject', self.subject),
('Date', date_value),
('To', self.to),
('Cc', self.cc),
('Disposition-Notification-To', self.disposition),
('Organization', self.organization),
('X-Priority', self.priority),
]
if interface.config.get("mail.brand", True):
headers.extend([
('X-Mailer', "%s %s" % (release.name, release.version)),
])
if isinstance(self.headers, dict):
for key in self.headers:
headers.append((key, self.headers[key]))
else:
headers.extend(self.headers)
return headers
def _add_headers_to_message(self, message, headers):
for header in headers:
if isinstance(header, (tuple, list)):
if header[1] is None or ( isinstance(header[1], list) and not header[1] ): continue
header = list(header)
if isinstance(header[1], unicode):
header[1] = Header(header[1], self.encoding)
elif isinstance(header[1], AddressList):
header[1] = header[1].encode(self.encoding)
header[1] = str(header[1])
message.add_header(*header)
elif isinstance(header, dict):
message.add_header(**header)
def mime(self):
"""Produce the final MIME message."""
author = self.author
sender = self.sender
if not author and sender:
msg = 'Please specify the author using the "author" property. ' + \
'Using "sender" for the From header is deprecated!'
warnings.warn(msg, category=DeprecationWarning)
author = sender
sender = []
if not author:
raise ValueError('You must specify an author.')
assert self.subject, "You must specify a subject."
assert len(self.recipients) > 0, "You must specify at least one recipient."
assert self.plain, "You must provide plain text content."
if len(author) > 1 and len(sender) == 0:
raise ValueError('If there are multiple authors of message, you must specify a sender!')
if len(sender) > 1:
raise ValueError('You must not specify more than one sender!')
if not self._dirty and self._processed and not interface.config.get("mail.debug", False):
return self._mime
self._processed = False
plain = MIMEText(self._callable(self.plain).encode(self.encoding), 'plain', self.encoding)
rich = None
if self.rich:
rich = MIMEText(self._callable(self.rich).encode(self.encoding), 'html', self.encoding)
message = self.mime_document(plain, rich)
headers = self._build_header_list(author, sender)
self._add_headers_to_message(message, headers)
self._mime = message
self._processed = True
self._dirty = False
return message
mime = property(mime)
def attach(self, file, name=None):
"""Attach an on-disk file to this message."""
part = MIMEBase('application', "octet-stream")
if isinstance(file, (str, unicode)):
fp = open(file, "rb")
else:
assert name is not None, "If attaching a file-like object, you must pass a custom filename, as one can not be inferred."
fp = file
part.set_payload(fp.read())
encode_base64(part)
part.add_header('Content-Disposition', 'attachment', filename=os.path.basename([name, file][name is None]))
self.attachments.append(part)
def embed(self, file, name=None):
"""Attach an on-disk image file and prepare for HTML embedding.
This method should only be used to embed images.
@param file: The path to the file you wish to attach, or an
instance of a file-like object.
@param name: You can optionally override the filename of the
attached file. This name will appear in the
recipient's mail viewer. B{Optional if passing
an on-disk path. Required if passing a file-like
object.}
@type name: string
"""
if isinstance(file, (str, unicode)):
fp = open(file, "rb")
name = os.path.basename(file)
else:
assert name is not None, "If embedding a file-like object, you must pass a custom filename."
fp = file
part = MIMEImage(fp.read(), name=name)
fp.close()
del part['Content-Disposition']
part.add_header('Content-Disposition', 'inline', filename=name)
part.add_header('Content-ID', '<%s>' % name)
self.embedded.append(part)
def _callable(self, var):
if callable(var):
return var()
return var
# --------------------------------------------------------------------------
# Deprecated properties
def get_replyto(self):
self._warn_about_deprecated_property('replyto', 'reply_to')
return self.reply_to
def set_replyto(self, replyto):
self._warn_about_deprecated_property('replyto', 'reply_to')
self.reply_to = replyto
replyto = property(fget=get_replyto, fset=set_replyto)
| """Instantiate a new Message object.
No arguments are required, as everything can be set using class
properties. Alternatively, I{everything} can be set using the
constructor, using named arguments. The first three positional
arguments can be used to quickly prepare a simple message.
"""
super(Message, self).__init__(to=to, kw=kw)
kwpop = lambda *args, **kwargs: self.kwpop(kw, *args, **kwargs)
pop_deprecated = lambda name, old_name: self.pop_deprecated(kw, name, old_name)
self.merge_if_set(kw, author, 'author')
self._author = AddressList(kwpop('author'))
self._cc = AddressList(kwpop("cc"))
self._bcc = AddressList(kwpop("bcc"))
self._sender = AddressList(kwpop("sender"))
self._reply_to = AddressList(pop_deprecated('reply_to', 'replyto'))
self._disposition = AddressList(kwpop("disposition"))
self.subject = subject
self.date = kwpop("date")
self.encoding = kwpop("encoding", default='us-ascii',
old_configkey='mail.encoding')
self.organization = kwpop("organization")
self.priority = kwpop("priority")
self.plain = kwpop("plain", default=None)
self.rich = kwpop("rich", default=None)
self.attachments = kwpop("attachments", default=[])
self.embedded = kwpop("embedded", default=[])
self.headers = kwpop("headers", default=[])
self._id = kw.get("id", None)
self._processed = False
self._dirty = False
if len(kw) > 0:
parameter_name = kw.keys()[0]
error_msg = "__init__() got an unexpected keyword argument '%s'"
raise TypeError(error_msg % parameter_name) | identifier_body |
message.py | # encoding: utf-8
"""MIME-encoded electronic mail message classes."""
import logging
import os
import time
import warnings
from datetime import datetime
from turbomail import release
from turbomail.compat import Header, MIMEBase, MIMEImage, MIMEMultipart, MIMEText, encode_base64, formatdate, make_msgid
from turbomail.util import Address, AddressList
from turbomail.control import interface
__all__ = ['Message']
log = logging.getLogger("turbomail.message")
class NoDefault(object):
pass
class BaseMessage(object):
def __init__(self, smtp_from=None, to=None, kw=None):
self.merge_if_set(kw, smtp_from, 'smtp_from')
self._smtp_from = AddressList(self.pop_deprecated(kw, 'smtp_from', 'smtpfrom'))
self._to = AddressList(self.pop_deprecated(kw, 'to', 'recipient', value=to))
self.nr_retries = self.kwpop(kw, 'nr_retries', default=3)
smtp_from = AddressList.protected('_smtp_from')
to = AddressList.protected('_to')
def merge_if_set(self, kw, value, name):
if value not in [None, '']:
kw[name] = value
def kwpop(self, kw, name, configkey=None, default=None, old_configkey=None):
if name in kw:
value = kw.pop(name)
else:
if configkey == None:
configkey = 'mail.message.%s' % name
value = interface.config.get(configkey, NoDefault)
if value == NoDefault and old_configkey != None:
value = interface.config.get(old_configkey, NoDefault)
if value != NoDefault:
msg = 'Falling back to deprecated configuration option "%s", please use "%s" instead'
warnings.warn(msg % (old_configkey, configkey),
category=DeprecationWarning)
if value == NoDefault:
value = default
return value
def | (self, kw, new_name, deprecated_name, value=None):
deprecated_value = self.kwpop(kw, deprecated_name)
if deprecated_value != None:
self._warn_about_deprecated_property(deprecated_name, new_name)
value = deprecated_value
elif value == None:
value = self.kwpop(kw, new_name)
return value
def send(self):
return interface.send(self)
# --------------------------------------------------------------------------
# Deprecated properties
def _warn_about_deprecated_property(self, deprecated_name, new_name):
msg = 'Property "%s" is deprecated, please use "%s" instead'
warnings.warn(msg % (deprecated_name, new_name), category=DeprecationWarning)
def get_smtpfrom(self):
self._warn_about_deprecated_property('smtpfrom', 'smtp_from')
return self.smtp_from
def set_smtpfrom(self, smtpfrom):
self._warn_about_deprecated_property('smtpfrom', 'smtp_from')
self.smtp_from = smtpfrom
smtpfrom = property(fget=get_smtpfrom, fset=set_smtpfrom)
def get_recipient(self):
self._warn_about_deprecated_property('recipient', 'to')
return self.to
def set_recipient(self, recipient):
self._warn_about_deprecated_property('recipient', 'to')
self.to = recipient
recipient = property(fget=get_recipient, fset=set_recipient)
class Message(BaseMessage):
"""Simple e-mail message class."""
def __init__(self, author=None, to=None, subject=None, **kw):
"""Instantiate a new Message object.
No arguments are required, as everything can be set using class
properties. Alternatively, I{everything} can be set using the
constructor, using named arguments. The first three positional
arguments can be used to quickly prepare a simple message.
"""
super(Message, self).__init__(to=to, kw=kw)
kwpop = lambda *args, **kwargs: self.kwpop(kw, *args, **kwargs)
pop_deprecated = lambda name, old_name: self.pop_deprecated(kw, name, old_name)
self.merge_if_set(kw, author, 'author')
self._author = AddressList(kwpop('author'))
self._cc = AddressList(kwpop("cc"))
self._bcc = AddressList(kwpop("bcc"))
self._sender = AddressList(kwpop("sender"))
self._reply_to = AddressList(pop_deprecated('reply_to', 'replyto'))
self._disposition = AddressList(kwpop("disposition"))
self.subject = subject
self.date = kwpop("date")
self.encoding = kwpop("encoding", default='us-ascii',
old_configkey='mail.encoding')
self.organization = kwpop("organization")
self.priority = kwpop("priority")
self.plain = kwpop("plain", default=None)
self.rich = kwpop("rich", default=None)
self.attachments = kwpop("attachments", default=[])
self.embedded = kwpop("embedded", default=[])
self.headers = kwpop("headers", default=[])
self._id = kw.get("id", None)
self._processed = False
self._dirty = False
if len(kw) > 0:
parameter_name = kw.keys()[0]
error_msg = "__init__() got an unexpected keyword argument '%s'"
raise TypeError(error_msg % parameter_name)
author = AddressList.protected('_author')
bcc = AddressList.protected('_bcc')
cc = AddressList.protected('_cc')
disposition = AddressList.protected('_disposition')
reply_to = AddressList.protected('_reply_to')
sender = AddressList.protected('_sender')
def __setattr__(self, name, value):
"""Set the dirty flag as properties are updated."""
super(Message, self).__setattr__(name, value)
if name not in ('bcc', '_dirty', '_processed'):
self.__dict__['_dirty'] = True
def __str__(self):
return self.mime.as_string()
def _get_authors(self):
# Just for better readability. I put this method here because a wrapped
# message must only have one author (smtp_from) so this method is only
# useful in a TurboMail Message.
return self.author
def _set_authors(self, value):
self.author = value
authors = property(_get_authors, _set_authors)
def id(self):
if not self._id or (self._processed and self._dirty):
self.__dict__['_id'] = make_msgid()
self._processed = False
return self._id
id = property(id)
def envelope_sender(self):
"""Returns the address of the envelope sender address (SMTP from, if not
set the sender, if this one isn't set too, the author)."""
envelope_sender = None
# TODO: Make this check better as soon as SMTP from and sender are
# Addresses, not AddressLists anymore.
if self.smtp_from != None and len(self.smtp_from) > 0:
envelope_sender = self.smtp_from
elif self.sender != None and len(self.sender) > 0:
envelope_sender = self.sender
else:
envelope_sender = self.author
return Address(envelope_sender)
envelope_sender = property(envelope_sender)
def recipients(self):
return AddressList(self.to + self.cc + self.bcc)
recipients = property(recipients)
def mime_document(self, plain, rich=None):
if not rich:
message = plain
else:
message = MIMEMultipart('alternative')
message.attach(plain)
if not self.embedded:
message.attach(rich)
else:
embedded = MIMEMultipart('related')
embedded.attach(rich)
for attachment in self.embedded: embedded.attach(attachment)
message.attach(embedded)
if self.attachments:
attachments = MIMEMultipart()
attachments.attach(message)
for attachment in self.attachments: attachments.attach(attachment)
message = attachments
return message
def _build_date_header_string(self, date_value):
"""Gets the date_value (may be None, basestring, float or
datetime.datetime instance) and returns a valid date string as per
RFC 2822."""
if isinstance(date_value, datetime):
date_value = time.mktime(date_value.timetuple())
if not isinstance(date_value, basestring):
date_value = formatdate(date_value, localtime=True)
return date_value
def _build_header_list(self, author, sender):
date_value = self._build_date_header_string(self.date)
headers = [
('Sender', sender),
('From', author),
('Reply-To', self.reply_to),
('Subject', self.subject),
('Date', date_value),
('To', self.to),
('Cc', self.cc),
('Disposition-Notification-To', self.disposition),
('Organization', self.organization),
('X-Priority', self.priority),
]
if interface.config.get("mail.brand", True):
headers.extend([
('X-Mailer', "%s %s" % (release.name, release.version)),
])
if isinstance(self.headers, dict):
for key in self.headers:
headers.append((key, self.headers[key]))
else:
headers.extend(self.headers)
return headers
def _add_headers_to_message(self, message, headers):
for header in headers:
if isinstance(header, (tuple, list)):
if header[1] is None or ( isinstance(header[1], list) and not header[1] ): continue
header = list(header)
if isinstance(header[1], unicode):
header[1] = Header(header[1], self.encoding)
elif isinstance(header[1], AddressList):
header[1] = header[1].encode(self.encoding)
header[1] = str(header[1])
message.add_header(*header)
elif isinstance(header, dict):
message.add_header(**header)
def mime(self):
"""Produce the final MIME message."""
author = self.author
sender = self.sender
if not author and sender:
msg = 'Please specify the author using the "author" property. ' + \
'Using "sender" for the From header is deprecated!'
warnings.warn(msg, category=DeprecationWarning)
author = sender
sender = []
if not author:
raise ValueError('You must specify an author.')
assert self.subject, "You must specify a subject."
assert len(self.recipients) > 0, "You must specify at least one recipient."
assert self.plain, "You must provide plain text content."
if len(author) > 1 and len(sender) == 0:
raise ValueError('If there are multiple authors of message, you must specify a sender!')
if len(sender) > 1:
raise ValueError('You must not specify more than one sender!')
if not self._dirty and self._processed and not interface.config.get("mail.debug", False):
return self._mime
self._processed = False
plain = MIMEText(self._callable(self.plain).encode(self.encoding), 'plain', self.encoding)
rich = None
if self.rich:
rich = MIMEText(self._callable(self.rich).encode(self.encoding), 'html', self.encoding)
message = self.mime_document(plain, rich)
headers = self._build_header_list(author, sender)
self._add_headers_to_message(message, headers)
self._mime = message
self._processed = True
self._dirty = False
return message
mime = property(mime)
def attach(self, file, name=None):
"""Attach an on-disk file to this message."""
part = MIMEBase('application', "octet-stream")
if isinstance(file, (str, unicode)):
fp = open(file, "rb")
else:
assert name is not None, "If attaching a file-like object, you must pass a custom filename, as one can not be inferred."
fp = file
part.set_payload(fp.read())
encode_base64(part)
part.add_header('Content-Disposition', 'attachment', filename=os.path.basename([name, file][name is None]))
self.attachments.append(part)
def embed(self, file, name=None):
"""Attach an on-disk image file and prepare for HTML embedding.
This method should only be used to embed images.
@param file: The path to the file you wish to attach, or an
instance of a file-like object.
@param name: You can optionally override the filename of the
attached file. This name will appear in the
recipient's mail viewer. B{Optional if passing
an on-disk path. Required if passing a file-like
object.}
@type name: string
"""
if isinstance(file, (str, unicode)):
fp = open(file, "rb")
name = os.path.basename(file)
else:
assert name is not None, "If embedding a file-like object, you must pass a custom filename."
fp = file
part = MIMEImage(fp.read(), name=name)
fp.close()
del part['Content-Disposition']
part.add_header('Content-Disposition', 'inline', filename=name)
part.add_header('Content-ID', '<%s>' % name)
self.embedded.append(part)
def _callable(self, var):
if callable(var):
return var()
return var
# --------------------------------------------------------------------------
# Deprecated properties
def get_replyto(self):
self._warn_about_deprecated_property('replyto', 'reply_to')
return self.reply_to
def set_replyto(self, replyto):
self._warn_about_deprecated_property('replyto', 'reply_to')
self.reply_to = replyto
replyto = property(fget=get_replyto, fset=set_replyto)
| pop_deprecated | identifier_name |
message.py | # encoding: utf-8
"""MIME-encoded electronic mail message classes."""
import logging
import os
import time
import warnings
from datetime import datetime
from turbomail import release
from turbomail.compat import Header, MIMEBase, MIMEImage, MIMEMultipart, MIMEText, encode_base64, formatdate, make_msgid
from turbomail.util import Address, AddressList
from turbomail.control import interface
__all__ = ['Message']
log = logging.getLogger("turbomail.message")
class NoDefault(object):
pass
class BaseMessage(object):
def __init__(self, smtp_from=None, to=None, kw=None):
self.merge_if_set(kw, smtp_from, 'smtp_from')
self._smtp_from = AddressList(self.pop_deprecated(kw, 'smtp_from', 'smtpfrom'))
self._to = AddressList(self.pop_deprecated(kw, 'to', 'recipient', value=to))
self.nr_retries = self.kwpop(kw, 'nr_retries', default=3)
smtp_from = AddressList.protected('_smtp_from')
to = AddressList.protected('_to')
def merge_if_set(self, kw, value, name):
if value not in [None, '']:
kw[name] = value
def kwpop(self, kw, name, configkey=None, default=None, old_configkey=None):
if name in kw:
value = kw.pop(name)
else:
if configkey == None:
configkey = 'mail.message.%s' % name
value = interface.config.get(configkey, NoDefault)
if value == NoDefault and old_configkey != None:
value = interface.config.get(old_configkey, NoDefault)
if value != NoDefault:
msg = 'Falling back to deprecated configuration option "%s", please use "%s" instead'
warnings.warn(msg % (old_configkey, configkey),
category=DeprecationWarning)
if value == NoDefault:
value = default
return value
def pop_deprecated(self, kw, new_name, deprecated_name, value=None):
deprecated_value = self.kwpop(kw, deprecated_name)
if deprecated_value != None:
self._warn_about_deprecated_property(deprecated_name, new_name)
value = deprecated_value
elif value == None:
value = self.kwpop(kw, new_name)
return value
def send(self):
return interface.send(self)
# --------------------------------------------------------------------------
# Deprecated properties
def _warn_about_deprecated_property(self, deprecated_name, new_name):
msg = 'Property "%s" is deprecated, please use "%s" instead'
warnings.warn(msg % (deprecated_name, new_name), category=DeprecationWarning)
def get_smtpfrom(self):
self._warn_about_deprecated_property('smtpfrom', 'smtp_from')
return self.smtp_from
def set_smtpfrom(self, smtpfrom):
self._warn_about_deprecated_property('smtpfrom', 'smtp_from')
self.smtp_from = smtpfrom
smtpfrom = property(fget=get_smtpfrom, fset=set_smtpfrom)
def get_recipient(self):
self._warn_about_deprecated_property('recipient', 'to')
return self.to
def set_recipient(self, recipient):
self._warn_about_deprecated_property('recipient', 'to')
self.to = recipient
recipient = property(fget=get_recipient, fset=set_recipient)
class Message(BaseMessage):
"""Simple e-mail message class."""
def __init__(self, author=None, to=None, subject=None, **kw):
"""Instantiate a new Message object.
No arguments are required, as everything can be set using class
properties. Alternatively, I{everything} can be set using the
constructor, using named arguments. The first three positional
arguments can be used to quickly prepare a simple message.
"""
super(Message, self).__init__(to=to, kw=kw)
kwpop = lambda *args, **kwargs: self.kwpop(kw, *args, **kwargs)
pop_deprecated = lambda name, old_name: self.pop_deprecated(kw, name, old_name)
self.merge_if_set(kw, author, 'author')
self._author = AddressList(kwpop('author'))
self._cc = AddressList(kwpop("cc"))
self._bcc = AddressList(kwpop("bcc"))
self._sender = AddressList(kwpop("sender"))
self._reply_to = AddressList(pop_deprecated('reply_to', 'replyto'))
self._disposition = AddressList(kwpop("disposition"))
self.subject = subject
self.date = kwpop("date")
self.encoding = kwpop("encoding", default='us-ascii',
old_configkey='mail.encoding')
self.organization = kwpop("organization")
self.priority = kwpop("priority")
self.plain = kwpop("plain", default=None)
self.rich = kwpop("rich", default=None)
self.attachments = kwpop("attachments", default=[])
self.embedded = kwpop("embedded", default=[])
self.headers = kwpop("headers", default=[])
self._id = kw.get("id", None)
self._processed = False
self._dirty = False
if len(kw) > 0:
parameter_name = kw.keys()[0]
error_msg = "__init__() got an unexpected keyword argument '%s'"
raise TypeError(error_msg % parameter_name)
author = AddressList.protected('_author')
bcc = AddressList.protected('_bcc')
cc = AddressList.protected('_cc')
disposition = AddressList.protected('_disposition')
reply_to = AddressList.protected('_reply_to')
sender = AddressList.protected('_sender')
def __setattr__(self, name, value):
"""Set the dirty flag as properties are updated."""
super(Message, self).__setattr__(name, value)
if name not in ('bcc', '_dirty', '_processed'):
self.__dict__['_dirty'] = True
def __str__(self):
return self.mime.as_string()
def _get_authors(self):
# Just for better readability. I put this method here because a wrapped
# message must only have one author (smtp_from) so this method is only
# useful in a TurboMail Message.
return self.author
def _set_authors(self, value):
self.author = value
authors = property(_get_authors, _set_authors)
def id(self):
if not self._id or (self._processed and self._dirty):
self.__dict__['_id'] = make_msgid()
self._processed = False
return self._id
id = property(id)
def envelope_sender(self):
"""Returns the address of the envelope sender address (SMTP from, if not
set the sender, if this one isn't set too, the author)."""
envelope_sender = None
# TODO: Make this check better as soon as SMTP from and sender are
# Addresses, not AddressLists anymore.
if self.smtp_from != None and len(self.smtp_from) > 0:
envelope_sender = self.smtp_from
elif self.sender != None and len(self.sender) > 0:
envelope_sender = self.sender
else:
envelope_sender = self.author
return Address(envelope_sender)
envelope_sender = property(envelope_sender)
def recipients(self):
return AddressList(self.to + self.cc + self.bcc)
recipients = property(recipients)
def mime_document(self, plain, rich=None):
if not rich:
message = plain
else:
message = MIMEMultipart('alternative')
message.attach(plain)
if not self.embedded:
message.attach(rich)
else:
embedded = MIMEMultipart('related')
embedded.attach(rich)
for attachment in self.embedded: embedded.attach(attachment)
message.attach(embedded)
if self.attachments:
attachments = MIMEMultipart()
attachments.attach(message)
for attachment in self.attachments: attachments.attach(attachment)
message = attachments
return message
def _build_date_header_string(self, date_value):
"""Gets the date_value (may be None, basestring, float or
datetime.datetime instance) and returns a valid date string as per
RFC 2822."""
if isinstance(date_value, datetime):
date_value = time.mktime(date_value.timetuple())
if not isinstance(date_value, basestring):
date_value = formatdate(date_value, localtime=True)
return date_value
def _build_header_list(self, author, sender):
date_value = self._build_date_header_string(self.date)
headers = [
('Sender', sender),
('From', author),
('Reply-To', self.reply_to),
('Subject', self.subject),
('Date', date_value),
('To', self.to),
('Cc', self.cc),
('Disposition-Notification-To', self.disposition),
('Organization', self.organization),
('X-Priority', self.priority),
]
if interface.config.get("mail.brand", True):
headers.extend([
('X-Mailer', "%s %s" % (release.name, release.version)),
])
if isinstance(self.headers, dict):
for key in self.headers:
headers.append((key, self.headers[key]))
else:
headers.extend(self.headers)
return headers
def _add_headers_to_message(self, message, headers):
for header in headers:
if isinstance(header, (tuple, list)):
if header[1] is None or ( isinstance(header[1], list) and not header[1] ): continue
header = list(header)
if isinstance(header[1], unicode):
header[1] = Header(header[1], self.encoding)
elif isinstance(header[1], AddressList):
header[1] = header[1].encode(self.encoding)
header[1] = str(header[1])
message.add_header(*header)
elif isinstance(header, dict):
|
def mime(self):
"""Produce the final MIME message."""
author = self.author
sender = self.sender
if not author and sender:
msg = 'Please specify the author using the "author" property. ' + \
'Using "sender" for the From header is deprecated!'
warnings.warn(msg, category=DeprecationWarning)
author = sender
sender = []
if not author:
raise ValueError('You must specify an author.')
assert self.subject, "You must specify a subject."
assert len(self.recipients) > 0, "You must specify at least one recipient."
assert self.plain, "You must provide plain text content."
if len(author) > 1 and len(sender) == 0:
raise ValueError('If there are multiple authors of message, you must specify a sender!')
if len(sender) > 1:
raise ValueError('You must not specify more than one sender!')
if not self._dirty and self._processed and not interface.config.get("mail.debug", False):
return self._mime
self._processed = False
plain = MIMEText(self._callable(self.plain).encode(self.encoding), 'plain', self.encoding)
rich = None
if self.rich:
rich = MIMEText(self._callable(self.rich).encode(self.encoding), 'html', self.encoding)
message = self.mime_document(plain, rich)
headers = self._build_header_list(author, sender)
self._add_headers_to_message(message, headers)
self._mime = message
self._processed = True
self._dirty = False
return message
mime = property(mime)
def attach(self, file, name=None):
"""Attach an on-disk file to this message."""
part = MIMEBase('application', "octet-stream")
if isinstance(file, (str, unicode)):
fp = open(file, "rb")
else:
assert name is not None, "If attaching a file-like object, you must pass a custom filename, as one can not be inferred."
fp = file
part.set_payload(fp.read())
encode_base64(part)
part.add_header('Content-Disposition', 'attachment', filename=os.path.basename([name, file][name is None]))
self.attachments.append(part)
def embed(self, file, name=None):
"""Attach an on-disk image file and prepare for HTML embedding.
This method should only be used to embed images.
@param file: The path to the file you wish to attach, or an
instance of a file-like object.
@param name: You can optionally override the filename of the
attached file. This name will appear in the
recipient's mail viewer. B{Optional if passing
an on-disk path. Required if passing a file-like
object.}
@type name: string
"""
if isinstance(file, (str, unicode)):
fp = open(file, "rb")
name = os.path.basename(file)
else:
assert name is not None, "If embedding a file-like object, you must pass a custom filename."
fp = file
part = MIMEImage(fp.read(), name=name)
fp.close()
del part['Content-Disposition']
part.add_header('Content-Disposition', 'inline', filename=name)
part.add_header('Content-ID', '<%s>' % name)
self.embedded.append(part)
def _callable(self, var):
if callable(var):
return var()
return var
# --------------------------------------------------------------------------
# Deprecated properties
def get_replyto(self):
self._warn_about_deprecated_property('replyto', 'reply_to')
return self.reply_to
def set_replyto(self, replyto):
self._warn_about_deprecated_property('replyto', 'reply_to')
self.reply_to = replyto
replyto = property(fget=get_replyto, fset=set_replyto)
| message.add_header(**header) | conditional_block |
kubeseal.go | package kubeseal
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
ssv1alpha1 "github.com/bitnami-labs/sealed-secrets/pkg/apis/sealedsecrets/v1alpha1"
"github.com/bitnami-labs/sealed-secrets/pkg/crypto"
"github.com/bitnami-labs/sealed-secrets/pkg/multidocyaml"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
)
type ClientConfig interface {
ClientConfig() (*rest.Config, error)
Namespace() (string, bool, error)
}
func ParseKey(r io.Reader) (*rsa.PublicKey, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
certs, err := cert.ParseCertsPEM(data)
if err != nil {
return nil, err
}
// ParseCertsPem returns error if len(certs) == 0, but best to be sure...
if len(certs) == 0 {
return nil, errors.New("failed to read any certificates")
}
cert, ok := certs[0].PublicKey.(*rsa.PublicKey)
if !ok {
return nil, fmt.Errorf("expected RSA public key but found %v", certs[0].PublicKey)
}
if time.Now().After(certs[0].NotAfter) {
return nil, fmt.Errorf("failed to encrypt using an expired certificate on %v", certs[0].NotAfter.Format("January 2, 2006"))
}
return cert, nil
}
func readSecret(codec runtime.Decoder, r io.Reader) (*v1.Secret, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
if err := multidocyaml.EnsureNotMultiDoc(data); err != nil {
return nil, err
}
var ret v1.Secret
if err = runtime.DecodeInto(codec, data, &ret); err != nil {
return nil, err
}
return &ret, nil
}
func prettyEncoder(codecs runtimeserializer.CodecFactory, mediaType string, gv runtime.GroupVersioner) (runtime.Encoder, error) {
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("binary can't serialize %s", mediaType)
}
prettyEncoder := info.PrettySerializer
if prettyEncoder == nil {
prettyEncoder = info.Serializer
}
enc := codecs.EncoderForVersion(prettyEncoder, gv)
return enc, nil
}
func isFilename(name string) (bool, error) {
u, err := url.Parse(name)
if err != nil {
return false, err
}
// windows drive letters
if s := strings.ToLower(u.Scheme); len(s) == 1 && s[0] >= 'a' && s[0] <= 'z' {
return true, nil
}
return u.Scheme == "", nil
}
// getServicePortName obtains the SealedSecrets service port name.
func getServicePortName(ctx context.Context, client corev1.CoreV1Interface, namespace, serviceName string) (string, error) {
service, err := client.Services(namespace).Get(ctx, serviceName, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("cannot get sealed secret service: %v.\nPlease, use the flag --controller-name and --controller-namespace to set up the name and namespace of the sealed secrets controller", err)
}
return service.Spec.Ports[0].Name, nil
}
// openCertLocal opens a cert URI or local filename, by fetching it locally from the client
// (as opposed as openCertCluster which fetches it via HTTP but through the k8s API proxy).
func openCertLocal(filenameOrURI string) (io.ReadCloser, error) {
// detect if a certificate is a local file or an URI.
if ok, err := isFilename(filenameOrURI); err != nil {
return nil, err
} else if ok {
// #nosec G304 -- should open user provided file
return os.Open(filenameOrURI)
}
return openCertURI(filenameOrURI)
}
func openCertURI(uri string) (io.ReadCloser, error) {
// support file:// scheme. Note: we're opening the file using os.Open rather
// than using the file:// scheme below because there is no point in complicating our lives
// and escape the filename properly.
t := &http.Transport{}
// #nosec: G111 -- we want to allow all files to be opened
t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
c := &http.Client{Transport: t}
resp, err := c.Get(uri)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("cannot fetch %q: %s", uri, resp.Status)
}
return resp.Body, nil
}
// openCertCluster fetches a certificate by performing an HTTP request to the controller
// through the k8s API proxy.
func openCertCluster(ctx context.Context, c corev1.CoreV1Interface, namespace, name string) (io.ReadCloser, error) {
portName, err := getServicePortName(ctx, c, namespace, name)
if err != nil {
return nil, err
}
cert, err := c.Services(namespace).ProxyGet("http", name, portName, "/v1/cert.pem", nil).Stream(ctx)
if err != nil {
return nil, fmt.Errorf("cannot fetch certificate: %v", err)
}
return cert, nil
}
func OpenCert(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName string, certURL string) (io.ReadCloser, error) {
if certURL != "" {
return openCertLocal(certURL)
}
conf, err := clientConfig.ClientConfig()
if err != nil |
conf.AcceptContentTypes = "application/x-pem-file, */*"
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return nil, err
}
return openCertCluster(ctx, restClient, controllerNs, controllerName)
}
// Seal reads a k8s Secret resource parsed from an input reader by a given codec, encrypts all its secrets
// with a given public key, using the name and namespace found in the input secret, unless explicitly overridden
// by the overrideName and overrideNamespace arguments.
func Seal(clientConfig ClientConfig, outputFormat string, in io.Reader, out io.Writer, codecs runtimeserializer.CodecFactory, pubKey *rsa.PublicKey, scope ssv1alpha1.SealingScope, allowEmptyData bool, overrideName, overrideNamespace string) error {
secret, err := readSecret(codecs.UniversalDecoder(), in)
if err != nil {
return err
}
if len(secret.Data) == 0 && len(secret.StringData) == 0 && !allowEmptyData {
return fmt.Errorf("secret.data is empty in input Secret, assuming this is an error and aborting. To work with empty data, --allow-empty-data can be used")
}
if overrideName != "" {
secret.Name = overrideName
}
if secret.GetName() == "" {
return fmt.Errorf("missing metadata.name in input Secret")
}
if overrideNamespace != "" {
secret.Namespace = overrideNamespace
}
if scope != ssv1alpha1.DefaultScope {
secret.Annotations = ssv1alpha1.UpdateScopeAnnotations(secret.Annotations, scope)
}
if ssv1alpha1.SecretScope(secret) != ssv1alpha1.ClusterWideScope && secret.GetNamespace() == "" {
ns, _, err := clientConfig.Namespace()
if clientcmd.IsEmptyConfig(err) {
return fmt.Errorf("input secret has no namespace and cannot infer the namespace automatically when no kube config is available")
} else if err != nil {
return err
}
secret.SetNamespace(ns)
}
// Strip read-only server-side ObjectMeta (if present)
secret.SetSelfLink("")
secret.SetUID("")
secret.SetResourceVersion("")
secret.Generation = 0
secret.SetCreationTimestamp(metav1.Time{})
secret.SetDeletionTimestamp(nil)
secret.DeletionGracePeriodSeconds = nil
ssecret, err := ssv1alpha1.NewSealedSecret(codecs, pubKey, secret)
if err != nil {
return err
}
if err = sealedSecretOutput(out, outputFormat, codecs, ssecret); err != nil {
return err
}
return nil
}
func ValidateSealedSecret(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName string, in io.Reader) error {
conf, err := clientConfig.ClientConfig()
if err != nil {
return err
}
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return err
}
portName, err := getServicePortName(ctx, restClient, controllerNs, controllerName)
if err != nil {
return err
}
content, err := io.ReadAll(in)
if err != nil {
return err
}
req := restClient.RESTClient().Post().
Namespace(controllerNs).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort("http", controllerName, portName)).
Suffix("/v1/verify")
req.Body(content)
res := req.Do(ctx)
if err := res.Error(); err != nil {
if status, ok := err.(*k8serrors.StatusError); ok && status.Status().Code == http.StatusConflict {
return fmt.Errorf("unable to decrypt sealed secret")
}
return fmt.Errorf("cannot validate sealed secret: %v", err)
}
return nil
}
func ReEncryptSealedSecret(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName, outputFormat string, in io.Reader, out io.Writer, codecs runtimeserializer.CodecFactory) error {
conf, err := clientConfig.ClientConfig()
if err != nil {
return err
}
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return err
}
portName, err := getServicePortName(ctx, restClient, controllerNs, controllerName)
if err != nil {
return err
}
content, err := io.ReadAll(in)
if err != nil {
return err
}
req := restClient.RESTClient().Post().
Namespace(controllerNs).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort("http", controllerName, portName)).
Suffix("/v1/rotate")
req.Body(content)
res := req.Do(ctx)
if err := res.Error(); err != nil {
if status, ok := err.(*k8serrors.StatusError); ok && status.Status().Code == http.StatusConflict {
return fmt.Errorf("unable to rotate secret")
}
return fmt.Errorf("cannot re-encrypt secret: %v", err)
}
body, err := res.Raw()
if err != nil {
return err
}
ssecret := &ssv1alpha1.SealedSecret{}
if err = json.Unmarshal(body, ssecret); err != nil {
return err
}
ssecret.SetCreationTimestamp(metav1.Time{})
ssecret.SetDeletionTimestamp(nil)
ssecret.Generation = 0
if err = sealedSecretOutput(out, outputFormat, codecs, ssecret); err != nil {
return err
}
return nil
}
func resourceOutput(out io.Writer, outputFormat string, codecs runtimeserializer.CodecFactory, gv runtime.GroupVersioner, obj runtime.Object) error {
var contentType string
switch strings.ToLower(outputFormat) {
case "json", "":
contentType = runtime.ContentTypeJSON
case "yaml":
contentType = runtime.ContentTypeYAML
default:
return fmt.Errorf("unsupported output format: %s", outputFormat)
}
prettyEnc, err := prettyEncoder(codecs, contentType, gv)
if err != nil {
return err
}
buf, err := runtime.Encode(prettyEnc, obj)
if err != nil {
return err
}
_, _ = out.Write(buf)
fmt.Fprint(out, "\n")
return nil
}
func sealedSecretOutput(out io.Writer, outputFormat string, codecs runtimeserializer.CodecFactory, ssecret *ssv1alpha1.SealedSecret) error {
return resourceOutput(out, outputFormat, codecs, ssv1alpha1.SchemeGroupVersion, ssecret)
}
func decodeSealedSecret(codecs runtimeserializer.CodecFactory, b []byte) (*ssv1alpha1.SealedSecret, error) {
var ss ssv1alpha1.SealedSecret
if err := runtime.DecodeInto(codecs.UniversalDecoder(), b, &ss); err != nil {
return nil, err
}
return &ss, nil
}
func SealMergingInto(clientConfig ClientConfig, outputFormat string, in io.Reader, filename string, codecs runtimeserializer.CodecFactory, pubKey *rsa.PublicKey, scope ssv1alpha1.SealingScope, allowEmptyData bool) error {
// #nosec G304 -- should open user provided file
f, err := os.OpenFile(filename, os.O_RDWR, 0)
if err != nil {
return err
}
// #nosec G307 -- we are explicitly managing a potential error from f.Close() at the end of the function
defer f.Close()
b, err := io.ReadAll(f)
if err != nil {
return err
}
orig, err := decodeSealedSecret(codecs, b)
if err != nil {
return err
}
var buf bytes.Buffer
if err := Seal(clientConfig, outputFormat, in, &buf, codecs, pubKey, scope, allowEmptyData, orig.Name, orig.Namespace); err != nil {
return err
}
update, err := decodeSealedSecret(codecs, buf.Bytes())
if err != nil {
return err
}
// merge encrypted data and metadata
for k, v := range update.Spec.EncryptedData {
orig.Spec.EncryptedData[k] = v
}
for k, v := range update.Spec.Template.Annotations {
orig.Spec.Template.Annotations[k] = v
}
for k, v := range update.Spec.Template.Labels {
orig.Spec.Template.Labels[k] = v
}
for k, v := range update.Spec.Template.Data {
orig.Spec.Template.Data[k] = v
}
// updated sealed secret file in-place avoiding clobbering the file upon rendering errors.
var out bytes.Buffer
if err := sealedSecretOutput(&out, outputFormat, codecs, orig); err != nil {
return err
}
if err := f.Truncate(0); err != nil {
return err
}
if _, err := f.Seek(0, 0); err != nil {
return err
}
if _, err := io.Copy(f, &out); err != nil {
return err
}
// we explicitly call f.Close() to return a potential error when closing the file that wouldn't be returned in the deferred f.Close()
if err := f.Close(); err != nil {
return err
}
return nil
}
func EncryptSecretItem(w io.Writer, secretName, ns string, data []byte, scope ssv1alpha1.SealingScope, pubKey *rsa.PublicKey) error {
// TODO(mkm): refactor cluster-wide/namespace-wide to an actual enum so we can have a simple flag
// to refer to the scope mode that is not a tuple of booleans.
label := ssv1alpha1.EncryptionLabel(ns, secretName, scope)
out, err := crypto.HybridEncrypt(rand.Reader, pubKey, data, label)
if err != nil {
return err
}
fmt.Fprint(w, base64.StdEncoding.EncodeToString(out))
return nil
}
// parseFromFile parses a value of the kubectl --from-file flag, which can optionally include an item name
// preceding the first equals sign.
func ParseFromFile(s string) (string, string) {
c := strings.SplitN(s, "=", 2)
if len(c) == 1 {
return "", c[0]
}
return c[0], c[1]
}
func readPrivKeysFromFile(filename string) ([]*rsa.PrivateKey, error) {
// #nosec G304 -- should open user provided file
b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
res, err := parsePrivKey(b)
if err == nil {
return []*rsa.PrivateKey{res}, nil
}
var secrets []*v1.Secret
// try to parse it as json/yaml encoded v1.List of secrets
var lst v1.List
if err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), b, &lst); err == nil {
for _, r := range lst.Items {
s, err := readSecret(scheme.Codecs.UniversalDecoder(), bytes.NewBuffer(r.Raw))
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
} else {
// try to parse it as json/yaml encoded secret
s, err := readSecret(scheme.Codecs.UniversalDecoder(), bytes.NewBuffer(b))
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
var keys []*rsa.PrivateKey
for _, s := range secrets {
tlsKey, ok := s.Data["tls.key"]
if !ok {
return nil, fmt.Errorf("secret must contain a 'tls.data' key")
}
pk, err := parsePrivKey(tlsKey)
if err != nil {
return nil, err
}
keys = append(keys, pk)
}
return keys, nil
}
func readPrivKey(filename string) (*rsa.PrivateKey, error) {
pks, err := readPrivKeysFromFile(filename)
if err != nil {
return nil, err
}
return pks[0], nil
}
func parsePrivKey(b []byte) (*rsa.PrivateKey, error) {
key, err := keyutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, err
}
switch rsaKey := key.(type) {
case *rsa.PrivateKey:
return rsaKey, nil
default:
return nil, fmt.Errorf("unexpected private key type %T", key)
}
}
func readPrivKeys(filenames []string) (map[string]*rsa.PrivateKey, error) {
res := map[string]*rsa.PrivateKey{}
for _, filename := range filenames {
pks, err := readPrivKeysFromFile(filename)
if err != nil {
return nil, err
}
for _, pk := range pks {
fingerprint, err := crypto.PublicKeyFingerprint(&pk.PublicKey)
if err != nil {
return nil, err
}
res[fingerprint] = pk
}
}
return res, nil
}
func UnsealSealedSecret(w io.Writer, in io.Reader, privKeysFilenames []string, outputFormat string, codecs runtimeserializer.CodecFactory) error {
privKeys, err := readPrivKeys(privKeysFilenames)
if err != nil {
return err
}
b, err := io.ReadAll(in)
if err != nil {
return err
}
ss, err := decodeSealedSecret(codecs, b)
if err != nil {
return err
}
sec, err := ss.Unseal(codecs, privKeys)
if err != nil {
return err
}
return resourceOutput(w, outputFormat, codecs, v1.SchemeGroupVersion, sec)
}
| {
return nil, err
} | conditional_block |
kubeseal.go | package kubeseal
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
ssv1alpha1 "github.com/bitnami-labs/sealed-secrets/pkg/apis/sealedsecrets/v1alpha1"
"github.com/bitnami-labs/sealed-secrets/pkg/crypto"
"github.com/bitnami-labs/sealed-secrets/pkg/multidocyaml"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
)
type ClientConfig interface {
ClientConfig() (*rest.Config, error)
Namespace() (string, bool, error)
}
func ParseKey(r io.Reader) (*rsa.PublicKey, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
certs, err := cert.ParseCertsPEM(data)
if err != nil {
return nil, err
}
// ParseCertsPem returns error if len(certs) == 0, but best to be sure...
if len(certs) == 0 {
return nil, errors.New("failed to read any certificates")
}
cert, ok := certs[0].PublicKey.(*rsa.PublicKey)
if !ok {
return nil, fmt.Errorf("expected RSA public key but found %v", certs[0].PublicKey)
}
if time.Now().After(certs[0].NotAfter) {
return nil, fmt.Errorf("failed to encrypt using an expired certificate on %v", certs[0].NotAfter.Format("January 2, 2006"))
}
return cert, nil
}
func readSecret(codec runtime.Decoder, r io.Reader) (*v1.Secret, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
if err := multidocyaml.EnsureNotMultiDoc(data); err != nil {
return nil, err
}
var ret v1.Secret
if err = runtime.DecodeInto(codec, data, &ret); err != nil {
return nil, err
}
return &ret, nil
}
func prettyEncoder(codecs runtimeserializer.CodecFactory, mediaType string, gv runtime.GroupVersioner) (runtime.Encoder, error) {
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("binary can't serialize %s", mediaType)
}
prettyEncoder := info.PrettySerializer
if prettyEncoder == nil {
prettyEncoder = info.Serializer
}
enc := codecs.EncoderForVersion(prettyEncoder, gv)
return enc, nil
}
func isFilename(name string) (bool, error) {
u, err := url.Parse(name)
if err != nil {
return false, err
}
// windows drive letters
if s := strings.ToLower(u.Scheme); len(s) == 1 && s[0] >= 'a' && s[0] <= 'z' {
return true, nil
}
return u.Scheme == "", nil
}
// getServicePortName obtains the SealedSecrets service port name.
func getServicePortName(ctx context.Context, client corev1.CoreV1Interface, namespace, serviceName string) (string, error) {
service, err := client.Services(namespace).Get(ctx, serviceName, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("cannot get sealed secret service: %v.\nPlease, use the flag --controller-name and --controller-namespace to set up the name and namespace of the sealed secrets controller", err)
}
return service.Spec.Ports[0].Name, nil
}
// openCertLocal opens a cert URI or local filename, by fetching it locally from the client
// (as opposed as openCertCluster which fetches it via HTTP but through the k8s API proxy).
func openCertLocal(filenameOrURI string) (io.ReadCloser, error) {
// detect if a certificate is a local file or an URI.
if ok, err := isFilename(filenameOrURI); err != nil {
return nil, err
} else if ok {
// #nosec G304 -- should open user provided file
return os.Open(filenameOrURI)
}
return openCertURI(filenameOrURI)
}
func openCertURI(uri string) (io.ReadCloser, error) {
// support file:// scheme. Note: we're opening the file using os.Open rather
// than using the file:// scheme below because there is no point in complicating our lives
// and escape the filename properly.
t := &http.Transport{}
// #nosec: G111 -- we want to allow all files to be opened
t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
c := &http.Client{Transport: t}
resp, err := c.Get(uri)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("cannot fetch %q: %s", uri, resp.Status)
}
return resp.Body, nil
}
// openCertCluster fetches a certificate by performing an HTTP request to the controller
// through the k8s API proxy.
func openCertCluster(ctx context.Context, c corev1.CoreV1Interface, namespace, name string) (io.ReadCloser, error) {
portName, err := getServicePortName(ctx, c, namespace, name)
if err != nil {
return nil, err
}
cert, err := c.Services(namespace).ProxyGet("http", name, portName, "/v1/cert.pem", nil).Stream(ctx)
if err != nil {
return nil, fmt.Errorf("cannot fetch certificate: %v", err)
}
return cert, nil
}
func OpenCert(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName string, certURL string) (io.ReadCloser, error) {
if certURL != "" {
return openCertLocal(certURL)
}
conf, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
conf.AcceptContentTypes = "application/x-pem-file, */*"
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return nil, err
}
return openCertCluster(ctx, restClient, controllerNs, controllerName)
}
// Seal reads a k8s Secret resource parsed from an input reader by a given codec, encrypts all its secrets
// with a given public key, using the name and namespace found in the input secret, unless explicitly overridden
// by the overrideName and overrideNamespace arguments.
func Seal(clientConfig ClientConfig, outputFormat string, in io.Reader, out io.Writer, codecs runtimeserializer.CodecFactory, pubKey *rsa.PublicKey, scope ssv1alpha1.SealingScope, allowEmptyData bool, overrideName, overrideNamespace string) error {
secret, err := readSecret(codecs.UniversalDecoder(), in)
if err != nil {
return err
}
if len(secret.Data) == 0 && len(secret.StringData) == 0 && !allowEmptyData {
return fmt.Errorf("secret.data is empty in input Secret, assuming this is an error and aborting. To work with empty data, --allow-empty-data can be used")
}
if overrideName != "" {
secret.Name = overrideName
}
if secret.GetName() == "" {
return fmt.Errorf("missing metadata.name in input Secret")
}
if overrideNamespace != "" {
secret.Namespace = overrideNamespace
}
if scope != ssv1alpha1.DefaultScope {
secret.Annotations = ssv1alpha1.UpdateScopeAnnotations(secret.Annotations, scope)
}
if ssv1alpha1.SecretScope(secret) != ssv1alpha1.ClusterWideScope && secret.GetNamespace() == "" {
ns, _, err := clientConfig.Namespace()
if clientcmd.IsEmptyConfig(err) {
return fmt.Errorf("input secret has no namespace and cannot infer the namespace automatically when no kube config is available")
} else if err != nil {
return err
}
secret.SetNamespace(ns)
}
// Strip read-only server-side ObjectMeta (if present)
secret.SetSelfLink("")
secret.SetUID("")
secret.SetResourceVersion("")
secret.Generation = 0
secret.SetCreationTimestamp(metav1.Time{})
secret.SetDeletionTimestamp(nil)
secret.DeletionGracePeriodSeconds = nil
ssecret, err := ssv1alpha1.NewSealedSecret(codecs, pubKey, secret)
if err != nil {
return err
}
if err = sealedSecretOutput(out, outputFormat, codecs, ssecret); err != nil {
return err
}
return nil
}
func ValidateSealedSecret(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName string, in io.Reader) error {
conf, err := clientConfig.ClientConfig()
if err != nil {
return err
}
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return err
}
portName, err := getServicePortName(ctx, restClient, controllerNs, controllerName)
if err != nil {
return err
}
content, err := io.ReadAll(in)
if err != nil {
return err
}
req := restClient.RESTClient().Post().
Namespace(controllerNs).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort("http", controllerName, portName)).
Suffix("/v1/verify")
req.Body(content)
res := req.Do(ctx)
if err := res.Error(); err != nil {
if status, ok := err.(*k8serrors.StatusError); ok && status.Status().Code == http.StatusConflict {
return fmt.Errorf("unable to decrypt sealed secret")
}
return fmt.Errorf("cannot validate sealed secret: %v", err)
}
return nil
}
func ReEncryptSealedSecret(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName, outputFormat string, in io.Reader, out io.Writer, codecs runtimeserializer.CodecFactory) error {
conf, err := clientConfig.ClientConfig()
if err != nil {
return err
}
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return err
}
portName, err := getServicePortName(ctx, restClient, controllerNs, controllerName)
if err != nil {
return err
}
content, err := io.ReadAll(in)
if err != nil {
return err
}
req := restClient.RESTClient().Post().
Namespace(controllerNs).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort("http", controllerName, portName)).
Suffix("/v1/rotate")
req.Body(content)
res := req.Do(ctx)
if err := res.Error(); err != nil {
if status, ok := err.(*k8serrors.StatusError); ok && status.Status().Code == http.StatusConflict {
return fmt.Errorf("unable to rotate secret")
}
return fmt.Errorf("cannot re-encrypt secret: %v", err)
}
body, err := res.Raw()
if err != nil {
return err
}
ssecret := &ssv1alpha1.SealedSecret{}
if err = json.Unmarshal(body, ssecret); err != nil {
return err
}
ssecret.SetCreationTimestamp(metav1.Time{})
ssecret.SetDeletionTimestamp(nil)
ssecret.Generation = 0
if err = sealedSecretOutput(out, outputFormat, codecs, ssecret); err != nil {
return err
}
return nil
}
func resourceOutput(out io.Writer, outputFormat string, codecs runtimeserializer.CodecFactory, gv runtime.GroupVersioner, obj runtime.Object) error {
var contentType string
switch strings.ToLower(outputFormat) {
case "json", "":
contentType = runtime.ContentTypeJSON
case "yaml":
contentType = runtime.ContentTypeYAML
default:
return fmt.Errorf("unsupported output format: %s", outputFormat)
}
prettyEnc, err := prettyEncoder(codecs, contentType, gv)
if err != nil {
return err
}
buf, err := runtime.Encode(prettyEnc, obj)
if err != nil {
return err
}
_, _ = out.Write(buf)
fmt.Fprint(out, "\n")
return nil
}
func sealedSecretOutput(out io.Writer, outputFormat string, codecs runtimeserializer.CodecFactory, ssecret *ssv1alpha1.SealedSecret) error {
return resourceOutput(out, outputFormat, codecs, ssv1alpha1.SchemeGroupVersion, ssecret)
}
func decodeSealedSecret(codecs runtimeserializer.CodecFactory, b []byte) (*ssv1alpha1.SealedSecret, error) {
var ss ssv1alpha1.SealedSecret
if err := runtime.DecodeInto(codecs.UniversalDecoder(), b, &ss); err != nil {
return nil, err
}
return &ss, nil
}
func SealMergingInto(clientConfig ClientConfig, outputFormat string, in io.Reader, filename string, codecs runtimeserializer.CodecFactory, pubKey *rsa.PublicKey, scope ssv1alpha1.SealingScope, allowEmptyData bool) error {
// #nosec G304 -- should open user provided file
f, err := os.OpenFile(filename, os.O_RDWR, 0)
if err != nil {
return err
}
// #nosec G307 -- we are explicitly managing a potential error from f.Close() at the end of the function
defer f.Close()
b, err := io.ReadAll(f)
if err != nil {
return err
}
orig, err := decodeSealedSecret(codecs, b)
if err != nil {
return err
}
var buf bytes.Buffer
if err := Seal(clientConfig, outputFormat, in, &buf, codecs, pubKey, scope, allowEmptyData, orig.Name, orig.Namespace); err != nil {
return err
}
update, err := decodeSealedSecret(codecs, buf.Bytes())
if err != nil {
return err
}
// merge encrypted data and metadata
for k, v := range update.Spec.EncryptedData {
orig.Spec.EncryptedData[k] = v
}
for k, v := range update.Spec.Template.Annotations {
orig.Spec.Template.Annotations[k] = v
}
for k, v := range update.Spec.Template.Labels {
orig.Spec.Template.Labels[k] = v
}
for k, v := range update.Spec.Template.Data {
orig.Spec.Template.Data[k] = v
}
// updated sealed secret file in-place avoiding clobbering the file upon rendering errors.
var out bytes.Buffer
if err := sealedSecretOutput(&out, outputFormat, codecs, orig); err != nil {
return err
}
if err := f.Truncate(0); err != nil {
return err
}
if _, err := f.Seek(0, 0); err != nil {
return err
}
if _, err := io.Copy(f, &out); err != nil {
return err
}
// we explicitly call f.Close() to return a potential error when closing the file that wouldn't be returned in the deferred f.Close()
if err := f.Close(); err != nil {
return err
}
return nil
}
func | (w io.Writer, secretName, ns string, data []byte, scope ssv1alpha1.SealingScope, pubKey *rsa.PublicKey) error {
// TODO(mkm): refactor cluster-wide/namespace-wide to an actual enum so we can have a simple flag
// to refer to the scope mode that is not a tuple of booleans.
label := ssv1alpha1.EncryptionLabel(ns, secretName, scope)
out, err := crypto.HybridEncrypt(rand.Reader, pubKey, data, label)
if err != nil {
return err
}
fmt.Fprint(w, base64.StdEncoding.EncodeToString(out))
return nil
}
// parseFromFile parses a value of the kubectl --from-file flag, which can optionally include an item name
// preceding the first equals sign.
func ParseFromFile(s string) (string, string) {
c := strings.SplitN(s, "=", 2)
if len(c) == 1 {
return "", c[0]
}
return c[0], c[1]
}
func readPrivKeysFromFile(filename string) ([]*rsa.PrivateKey, error) {
// #nosec G304 -- should open user provided file
b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
res, err := parsePrivKey(b)
if err == nil {
return []*rsa.PrivateKey{res}, nil
}
var secrets []*v1.Secret
// try to parse it as json/yaml encoded v1.List of secrets
var lst v1.List
if err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), b, &lst); err == nil {
for _, r := range lst.Items {
s, err := readSecret(scheme.Codecs.UniversalDecoder(), bytes.NewBuffer(r.Raw))
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
} else {
// try to parse it as json/yaml encoded secret
s, err := readSecret(scheme.Codecs.UniversalDecoder(), bytes.NewBuffer(b))
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
var keys []*rsa.PrivateKey
for _, s := range secrets {
tlsKey, ok := s.Data["tls.key"]
if !ok {
return nil, fmt.Errorf("secret must contain a 'tls.data' key")
}
pk, err := parsePrivKey(tlsKey)
if err != nil {
return nil, err
}
keys = append(keys, pk)
}
return keys, nil
}
func readPrivKey(filename string) (*rsa.PrivateKey, error) {
pks, err := readPrivKeysFromFile(filename)
if err != nil {
return nil, err
}
return pks[0], nil
}
func parsePrivKey(b []byte) (*rsa.PrivateKey, error) {
key, err := keyutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, err
}
switch rsaKey := key.(type) {
case *rsa.PrivateKey:
return rsaKey, nil
default:
return nil, fmt.Errorf("unexpected private key type %T", key)
}
}
func readPrivKeys(filenames []string) (map[string]*rsa.PrivateKey, error) {
res := map[string]*rsa.PrivateKey{}
for _, filename := range filenames {
pks, err := readPrivKeysFromFile(filename)
if err != nil {
return nil, err
}
for _, pk := range pks {
fingerprint, err := crypto.PublicKeyFingerprint(&pk.PublicKey)
if err != nil {
return nil, err
}
res[fingerprint] = pk
}
}
return res, nil
}
func UnsealSealedSecret(w io.Writer, in io.Reader, privKeysFilenames []string, outputFormat string, codecs runtimeserializer.CodecFactory) error {
privKeys, err := readPrivKeys(privKeysFilenames)
if err != nil {
return err
}
b, err := io.ReadAll(in)
if err != nil {
return err
}
ss, err := decodeSealedSecret(codecs, b)
if err != nil {
return err
}
sec, err := ss.Unseal(codecs, privKeys)
if err != nil {
return err
}
return resourceOutput(w, outputFormat, codecs, v1.SchemeGroupVersion, sec)
}
| EncryptSecretItem | identifier_name |
kubeseal.go | package kubeseal
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
ssv1alpha1 "github.com/bitnami-labs/sealed-secrets/pkg/apis/sealedsecrets/v1alpha1"
"github.com/bitnami-labs/sealed-secrets/pkg/crypto"
"github.com/bitnami-labs/sealed-secrets/pkg/multidocyaml"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
)
type ClientConfig interface {
ClientConfig() (*rest.Config, error)
Namespace() (string, bool, error)
}
func ParseKey(r io.Reader) (*rsa.PublicKey, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
certs, err := cert.ParseCertsPEM(data)
if err != nil {
return nil, err
}
// ParseCertsPem returns error if len(certs) == 0, but best to be sure...
if len(certs) == 0 {
return nil, errors.New("failed to read any certificates")
}
cert, ok := certs[0].PublicKey.(*rsa.PublicKey)
if !ok {
return nil, fmt.Errorf("expected RSA public key but found %v", certs[0].PublicKey)
}
if time.Now().After(certs[0].NotAfter) {
return nil, fmt.Errorf("failed to encrypt using an expired certificate on %v", certs[0].NotAfter.Format("January 2, 2006"))
}
return cert, nil
}
func readSecret(codec runtime.Decoder, r io.Reader) (*v1.Secret, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
if err := multidocyaml.EnsureNotMultiDoc(data); err != nil {
return nil, err
}
var ret v1.Secret
if err = runtime.DecodeInto(codec, data, &ret); err != nil {
return nil, err
}
return &ret, nil
}
func prettyEncoder(codecs runtimeserializer.CodecFactory, mediaType string, gv runtime.GroupVersioner) (runtime.Encoder, error) {
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("binary can't serialize %s", mediaType)
}
prettyEncoder := info.PrettySerializer
if prettyEncoder == nil {
prettyEncoder = info.Serializer
}
enc := codecs.EncoderForVersion(prettyEncoder, gv)
return enc, nil
}
func isFilename(name string) (bool, error) {
u, err := url.Parse(name)
if err != nil {
return false, err
}
// windows drive letters
if s := strings.ToLower(u.Scheme); len(s) == 1 && s[0] >= 'a' && s[0] <= 'z' {
return true, nil
}
return u.Scheme == "", nil
}
// getServicePortName obtains the SealedSecrets service port name.
func getServicePortName(ctx context.Context, client corev1.CoreV1Interface, namespace, serviceName string) (string, error) {
service, err := client.Services(namespace).Get(ctx, serviceName, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("cannot get sealed secret service: %v.\nPlease, use the flag --controller-name and --controller-namespace to set up the name and namespace of the sealed secrets controller", err)
}
return service.Spec.Ports[0].Name, nil
}
// openCertLocal opens a cert URI or local filename, by fetching it locally from the client
// (as opposed as openCertCluster which fetches it via HTTP but through the k8s API proxy).
func openCertLocal(filenameOrURI string) (io.ReadCloser, error) {
// detect if a certificate is a local file or an URI.
if ok, err := isFilename(filenameOrURI); err != nil {
return nil, err
} else if ok {
// #nosec G304 -- should open user provided file
return os.Open(filenameOrURI)
}
return openCertURI(filenameOrURI)
}
func openCertURI(uri string) (io.ReadCloser, error) {
// support file:// scheme. Note: we're opening the file using os.Open rather
// than using the file:// scheme below because there is no point in complicating our lives
// and escape the filename properly.
t := &http.Transport{}
// #nosec: G111 -- we want to allow all files to be opened
t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
c := &http.Client{Transport: t}
resp, err := c.Get(uri)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("cannot fetch %q: %s", uri, resp.Status)
}
return resp.Body, nil
}
// openCertCluster fetches a certificate by performing an HTTP request to the controller
// through the k8s API proxy.
func openCertCluster(ctx context.Context, c corev1.CoreV1Interface, namespace, name string) (io.ReadCloser, error) {
portName, err := getServicePortName(ctx, c, namespace, name)
if err != nil {
return nil, err
}
cert, err := c.Services(namespace).ProxyGet("http", name, portName, "/v1/cert.pem", nil).Stream(ctx)
if err != nil {
return nil, fmt.Errorf("cannot fetch certificate: %v", err)
}
return cert, nil
}
func OpenCert(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName string, certURL string) (io.ReadCloser, error) {
if certURL != "" {
return openCertLocal(certURL)
}
conf, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
conf.AcceptContentTypes = "application/x-pem-file, */*"
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return nil, err
}
return openCertCluster(ctx, restClient, controllerNs, controllerName)
}
// Seal reads a k8s Secret resource parsed from an input reader by a given codec, encrypts all its secrets
// with a given public key, using the name and namespace found in the input secret, unless explicitly overridden
// by the overrideName and overrideNamespace arguments.
func Seal(clientConfig ClientConfig, outputFormat string, in io.Reader, out io.Writer, codecs runtimeserializer.CodecFactory, pubKey *rsa.PublicKey, scope ssv1alpha1.SealingScope, allowEmptyData bool, overrideName, overrideNamespace string) error {
secret, err := readSecret(codecs.UniversalDecoder(), in) |
if len(secret.Data) == 0 && len(secret.StringData) == 0 && !allowEmptyData {
return fmt.Errorf("secret.data is empty in input Secret, assuming this is an error and aborting. To work with empty data, --allow-empty-data can be used")
}
if overrideName != "" {
secret.Name = overrideName
}
if secret.GetName() == "" {
return fmt.Errorf("missing metadata.name in input Secret")
}
if overrideNamespace != "" {
secret.Namespace = overrideNamespace
}
if scope != ssv1alpha1.DefaultScope {
secret.Annotations = ssv1alpha1.UpdateScopeAnnotations(secret.Annotations, scope)
}
if ssv1alpha1.SecretScope(secret) != ssv1alpha1.ClusterWideScope && secret.GetNamespace() == "" {
ns, _, err := clientConfig.Namespace()
if clientcmd.IsEmptyConfig(err) {
return fmt.Errorf("input secret has no namespace and cannot infer the namespace automatically when no kube config is available")
} else if err != nil {
return err
}
secret.SetNamespace(ns)
}
// Strip read-only server-side ObjectMeta (if present)
secret.SetSelfLink("")
secret.SetUID("")
secret.SetResourceVersion("")
secret.Generation = 0
secret.SetCreationTimestamp(metav1.Time{})
secret.SetDeletionTimestamp(nil)
secret.DeletionGracePeriodSeconds = nil
ssecret, err := ssv1alpha1.NewSealedSecret(codecs, pubKey, secret)
if err != nil {
return err
}
if err = sealedSecretOutput(out, outputFormat, codecs, ssecret); err != nil {
return err
}
return nil
}
func ValidateSealedSecret(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName string, in io.Reader) error {
conf, err := clientConfig.ClientConfig()
if err != nil {
return err
}
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return err
}
portName, err := getServicePortName(ctx, restClient, controllerNs, controllerName)
if err != nil {
return err
}
content, err := io.ReadAll(in)
if err != nil {
return err
}
req := restClient.RESTClient().Post().
Namespace(controllerNs).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort("http", controllerName, portName)).
Suffix("/v1/verify")
req.Body(content)
res := req.Do(ctx)
if err := res.Error(); err != nil {
if status, ok := err.(*k8serrors.StatusError); ok && status.Status().Code == http.StatusConflict {
return fmt.Errorf("unable to decrypt sealed secret")
}
return fmt.Errorf("cannot validate sealed secret: %v", err)
}
return nil
}
func ReEncryptSealedSecret(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName, outputFormat string, in io.Reader, out io.Writer, codecs runtimeserializer.CodecFactory) error {
conf, err := clientConfig.ClientConfig()
if err != nil {
return err
}
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return err
}
portName, err := getServicePortName(ctx, restClient, controllerNs, controllerName)
if err != nil {
return err
}
content, err := io.ReadAll(in)
if err != nil {
return err
}
req := restClient.RESTClient().Post().
Namespace(controllerNs).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort("http", controllerName, portName)).
Suffix("/v1/rotate")
req.Body(content)
res := req.Do(ctx)
if err := res.Error(); err != nil {
if status, ok := err.(*k8serrors.StatusError); ok && status.Status().Code == http.StatusConflict {
return fmt.Errorf("unable to rotate secret")
}
return fmt.Errorf("cannot re-encrypt secret: %v", err)
}
body, err := res.Raw()
if err != nil {
return err
}
ssecret := &ssv1alpha1.SealedSecret{}
if err = json.Unmarshal(body, ssecret); err != nil {
return err
}
ssecret.SetCreationTimestamp(metav1.Time{})
ssecret.SetDeletionTimestamp(nil)
ssecret.Generation = 0
if err = sealedSecretOutput(out, outputFormat, codecs, ssecret); err != nil {
return err
}
return nil
}
func resourceOutput(out io.Writer, outputFormat string, codecs runtimeserializer.CodecFactory, gv runtime.GroupVersioner, obj runtime.Object) error {
var contentType string
switch strings.ToLower(outputFormat) {
case "json", "":
contentType = runtime.ContentTypeJSON
case "yaml":
contentType = runtime.ContentTypeYAML
default:
return fmt.Errorf("unsupported output format: %s", outputFormat)
}
prettyEnc, err := prettyEncoder(codecs, contentType, gv)
if err != nil {
return err
}
buf, err := runtime.Encode(prettyEnc, obj)
if err != nil {
return err
}
_, _ = out.Write(buf)
fmt.Fprint(out, "\n")
return nil
}
func sealedSecretOutput(out io.Writer, outputFormat string, codecs runtimeserializer.CodecFactory, ssecret *ssv1alpha1.SealedSecret) error {
return resourceOutput(out, outputFormat, codecs, ssv1alpha1.SchemeGroupVersion, ssecret)
}
func decodeSealedSecret(codecs runtimeserializer.CodecFactory, b []byte) (*ssv1alpha1.SealedSecret, error) {
var ss ssv1alpha1.SealedSecret
if err := runtime.DecodeInto(codecs.UniversalDecoder(), b, &ss); err != nil {
return nil, err
}
return &ss, nil
}
func SealMergingInto(clientConfig ClientConfig, outputFormat string, in io.Reader, filename string, codecs runtimeserializer.CodecFactory, pubKey *rsa.PublicKey, scope ssv1alpha1.SealingScope, allowEmptyData bool) error {
// #nosec G304 -- should open user provided file
f, err := os.OpenFile(filename, os.O_RDWR, 0)
if err != nil {
return err
}
// #nosec G307 -- we are explicitly managing a potential error from f.Close() at the end of the function
defer f.Close()
b, err := io.ReadAll(f)
if err != nil {
return err
}
orig, err := decodeSealedSecret(codecs, b)
if err != nil {
return err
}
var buf bytes.Buffer
if err := Seal(clientConfig, outputFormat, in, &buf, codecs, pubKey, scope, allowEmptyData, orig.Name, orig.Namespace); err != nil {
return err
}
update, err := decodeSealedSecret(codecs, buf.Bytes())
if err != nil {
return err
}
// merge encrypted data and metadata
for k, v := range update.Spec.EncryptedData {
orig.Spec.EncryptedData[k] = v
}
for k, v := range update.Spec.Template.Annotations {
orig.Spec.Template.Annotations[k] = v
}
for k, v := range update.Spec.Template.Labels {
orig.Spec.Template.Labels[k] = v
}
for k, v := range update.Spec.Template.Data {
orig.Spec.Template.Data[k] = v
}
// updated sealed secret file in-place avoiding clobbering the file upon rendering errors.
var out bytes.Buffer
if err := sealedSecretOutput(&out, outputFormat, codecs, orig); err != nil {
return err
}
if err := f.Truncate(0); err != nil {
return err
}
if _, err := f.Seek(0, 0); err != nil {
return err
}
if _, err := io.Copy(f, &out); err != nil {
return err
}
// we explicitly call f.Close() to return a potential error when closing the file that wouldn't be returned in the deferred f.Close()
if err := f.Close(); err != nil {
return err
}
return nil
}
func EncryptSecretItem(w io.Writer, secretName, ns string, data []byte, scope ssv1alpha1.SealingScope, pubKey *rsa.PublicKey) error {
// TODO(mkm): refactor cluster-wide/namespace-wide to an actual enum so we can have a simple flag
// to refer to the scope mode that is not a tuple of booleans.
label := ssv1alpha1.EncryptionLabel(ns, secretName, scope)
out, err := crypto.HybridEncrypt(rand.Reader, pubKey, data, label)
if err != nil {
return err
}
fmt.Fprint(w, base64.StdEncoding.EncodeToString(out))
return nil
}
// parseFromFile parses a value of the kubectl --from-file flag, which can optionally include an item name
// preceding the first equals sign.
func ParseFromFile(s string) (string, string) {
c := strings.SplitN(s, "=", 2)
if len(c) == 1 {
return "", c[0]
}
return c[0], c[1]
}
func readPrivKeysFromFile(filename string) ([]*rsa.PrivateKey, error) {
// #nosec G304 -- should open user provided file
b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
res, err := parsePrivKey(b)
if err == nil {
return []*rsa.PrivateKey{res}, nil
}
var secrets []*v1.Secret
// try to parse it as json/yaml encoded v1.List of secrets
var lst v1.List
if err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), b, &lst); err == nil {
for _, r := range lst.Items {
s, err := readSecret(scheme.Codecs.UniversalDecoder(), bytes.NewBuffer(r.Raw))
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
} else {
// try to parse it as json/yaml encoded secret
s, err := readSecret(scheme.Codecs.UniversalDecoder(), bytes.NewBuffer(b))
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
var keys []*rsa.PrivateKey
for _, s := range secrets {
tlsKey, ok := s.Data["tls.key"]
if !ok {
return nil, fmt.Errorf("secret must contain a 'tls.data' key")
}
pk, err := parsePrivKey(tlsKey)
if err != nil {
return nil, err
}
keys = append(keys, pk)
}
return keys, nil
}
func readPrivKey(filename string) (*rsa.PrivateKey, error) {
pks, err := readPrivKeysFromFile(filename)
if err != nil {
return nil, err
}
return pks[0], nil
}
func parsePrivKey(b []byte) (*rsa.PrivateKey, error) {
key, err := keyutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, err
}
switch rsaKey := key.(type) {
case *rsa.PrivateKey:
return rsaKey, nil
default:
return nil, fmt.Errorf("unexpected private key type %T", key)
}
}
func readPrivKeys(filenames []string) (map[string]*rsa.PrivateKey, error) {
res := map[string]*rsa.PrivateKey{}
for _, filename := range filenames {
pks, err := readPrivKeysFromFile(filename)
if err != nil {
return nil, err
}
for _, pk := range pks {
fingerprint, err := crypto.PublicKeyFingerprint(&pk.PublicKey)
if err != nil {
return nil, err
}
res[fingerprint] = pk
}
}
return res, nil
}
func UnsealSealedSecret(w io.Writer, in io.Reader, privKeysFilenames []string, outputFormat string, codecs runtimeserializer.CodecFactory) error {
privKeys, err := readPrivKeys(privKeysFilenames)
if err != nil {
return err
}
b, err := io.ReadAll(in)
if err != nil {
return err
}
ss, err := decodeSealedSecret(codecs, b)
if err != nil {
return err
}
sec, err := ss.Unseal(codecs, privKeys)
if err != nil {
return err
}
return resourceOutput(w, outputFormat, codecs, v1.SchemeGroupVersion, sec)
} | if err != nil {
return err
} | random_line_split |
kubeseal.go | package kubeseal
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strings"
"time"
ssv1alpha1 "github.com/bitnami-labs/sealed-secrets/pkg/apis/sealedsecrets/v1alpha1"
"github.com/bitnami-labs/sealed-secrets/pkg/crypto"
"github.com/bitnami-labs/sealed-secrets/pkg/multidocyaml"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
)
type ClientConfig interface {
ClientConfig() (*rest.Config, error)
Namespace() (string, bool, error)
}
func ParseKey(r io.Reader) (*rsa.PublicKey, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
certs, err := cert.ParseCertsPEM(data)
if err != nil {
return nil, err
}
// ParseCertsPem returns error if len(certs) == 0, but best to be sure...
if len(certs) == 0 {
return nil, errors.New("failed to read any certificates")
}
cert, ok := certs[0].PublicKey.(*rsa.PublicKey)
if !ok {
return nil, fmt.Errorf("expected RSA public key but found %v", certs[0].PublicKey)
}
if time.Now().After(certs[0].NotAfter) {
return nil, fmt.Errorf("failed to encrypt using an expired certificate on %v", certs[0].NotAfter.Format("January 2, 2006"))
}
return cert, nil
}
func readSecret(codec runtime.Decoder, r io.Reader) (*v1.Secret, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
if err := multidocyaml.EnsureNotMultiDoc(data); err != nil {
return nil, err
}
var ret v1.Secret
if err = runtime.DecodeInto(codec, data, &ret); err != nil {
return nil, err
}
return &ret, nil
}
func prettyEncoder(codecs runtimeserializer.CodecFactory, mediaType string, gv runtime.GroupVersioner) (runtime.Encoder, error) {
info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType)
if !ok {
return nil, fmt.Errorf("binary can't serialize %s", mediaType)
}
prettyEncoder := info.PrettySerializer
if prettyEncoder == nil {
prettyEncoder = info.Serializer
}
enc := codecs.EncoderForVersion(prettyEncoder, gv)
return enc, nil
}
func isFilename(name string) (bool, error) {
u, err := url.Parse(name)
if err != nil {
return false, err
}
// windows drive letters
if s := strings.ToLower(u.Scheme); len(s) == 1 && s[0] >= 'a' && s[0] <= 'z' {
return true, nil
}
return u.Scheme == "", nil
}
// getServicePortName obtains the SealedSecrets service port name.
func getServicePortName(ctx context.Context, client corev1.CoreV1Interface, namespace, serviceName string) (string, error) {
service, err := client.Services(namespace).Get(ctx, serviceName, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("cannot get sealed secret service: %v.\nPlease, use the flag --controller-name and --controller-namespace to set up the name and namespace of the sealed secrets controller", err)
}
return service.Spec.Ports[0].Name, nil
}
// openCertLocal opens a cert URI or local filename, by fetching it locally from the client
// (as opposed as openCertCluster which fetches it via HTTP but through the k8s API proxy).
func openCertLocal(filenameOrURI string) (io.ReadCloser, error) {
// detect if a certificate is a local file or an URI.
if ok, err := isFilename(filenameOrURI); err != nil {
return nil, err
} else if ok {
// #nosec G304 -- should open user provided file
return os.Open(filenameOrURI)
}
return openCertURI(filenameOrURI)
}
func openCertURI(uri string) (io.ReadCloser, error) {
// support file:// scheme. Note: we're opening the file using os.Open rather
// than using the file:// scheme below because there is no point in complicating our lives
// and escape the filename properly.
t := &http.Transport{}
// #nosec: G111 -- we want to allow all files to be opened
t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
c := &http.Client{Transport: t}
resp, err := c.Get(uri)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("cannot fetch %q: %s", uri, resp.Status)
}
return resp.Body, nil
}
// openCertCluster fetches a certificate by performing an HTTP request to the controller
// through the k8s API proxy.
func openCertCluster(ctx context.Context, c corev1.CoreV1Interface, namespace, name string) (io.ReadCloser, error) {
portName, err := getServicePortName(ctx, c, namespace, name)
if err != nil {
return nil, err
}
cert, err := c.Services(namespace).ProxyGet("http", name, portName, "/v1/cert.pem", nil).Stream(ctx)
if err != nil {
return nil, fmt.Errorf("cannot fetch certificate: %v", err)
}
return cert, nil
}
func OpenCert(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName string, certURL string) (io.ReadCloser, error) {
if certURL != "" {
return openCertLocal(certURL)
}
conf, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
conf.AcceptContentTypes = "application/x-pem-file, */*"
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return nil, err
}
return openCertCluster(ctx, restClient, controllerNs, controllerName)
}
// Seal reads a k8s Secret resource parsed from an input reader by a given codec, encrypts all its secrets
// with a given public key, using the name and namespace found in the input secret, unless explicitly overridden
// by the overrideName and overrideNamespace arguments.
func Seal(clientConfig ClientConfig, outputFormat string, in io.Reader, out io.Writer, codecs runtimeserializer.CodecFactory, pubKey *rsa.PublicKey, scope ssv1alpha1.SealingScope, allowEmptyData bool, overrideName, overrideNamespace string) error {
secret, err := readSecret(codecs.UniversalDecoder(), in)
if err != nil {
return err
}
if len(secret.Data) == 0 && len(secret.StringData) == 0 && !allowEmptyData {
return fmt.Errorf("secret.data is empty in input Secret, assuming this is an error and aborting. To work with empty data, --allow-empty-data can be used")
}
if overrideName != "" {
secret.Name = overrideName
}
if secret.GetName() == "" {
return fmt.Errorf("missing metadata.name in input Secret")
}
if overrideNamespace != "" {
secret.Namespace = overrideNamespace
}
if scope != ssv1alpha1.DefaultScope {
secret.Annotations = ssv1alpha1.UpdateScopeAnnotations(secret.Annotations, scope)
}
if ssv1alpha1.SecretScope(secret) != ssv1alpha1.ClusterWideScope && secret.GetNamespace() == "" {
ns, _, err := clientConfig.Namespace()
if clientcmd.IsEmptyConfig(err) {
return fmt.Errorf("input secret has no namespace and cannot infer the namespace automatically when no kube config is available")
} else if err != nil {
return err
}
secret.SetNamespace(ns)
}
// Strip read-only server-side ObjectMeta (if present)
secret.SetSelfLink("")
secret.SetUID("")
secret.SetResourceVersion("")
secret.Generation = 0
secret.SetCreationTimestamp(metav1.Time{})
secret.SetDeletionTimestamp(nil)
secret.DeletionGracePeriodSeconds = nil
ssecret, err := ssv1alpha1.NewSealedSecret(codecs, pubKey, secret)
if err != nil {
return err
}
if err = sealedSecretOutput(out, outputFormat, codecs, ssecret); err != nil {
return err
}
return nil
}
func ValidateSealedSecret(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName string, in io.Reader) error {
conf, err := clientConfig.ClientConfig()
if err != nil {
return err
}
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return err
}
portName, err := getServicePortName(ctx, restClient, controllerNs, controllerName)
if err != nil {
return err
}
content, err := io.ReadAll(in)
if err != nil {
return err
}
req := restClient.RESTClient().Post().
Namespace(controllerNs).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort("http", controllerName, portName)).
Suffix("/v1/verify")
req.Body(content)
res := req.Do(ctx)
if err := res.Error(); err != nil {
if status, ok := err.(*k8serrors.StatusError); ok && status.Status().Code == http.StatusConflict {
return fmt.Errorf("unable to decrypt sealed secret")
}
return fmt.Errorf("cannot validate sealed secret: %v", err)
}
return nil
}
func ReEncryptSealedSecret(ctx context.Context, clientConfig ClientConfig, controllerNs, controllerName, outputFormat string, in io.Reader, out io.Writer, codecs runtimeserializer.CodecFactory) error {
conf, err := clientConfig.ClientConfig()
if err != nil {
return err
}
restClient, err := corev1.NewForConfig(conf)
if err != nil {
return err
}
portName, err := getServicePortName(ctx, restClient, controllerNs, controllerName)
if err != nil {
return err
}
content, err := io.ReadAll(in)
if err != nil {
return err
}
req := restClient.RESTClient().Post().
Namespace(controllerNs).
Resource("services").
SubResource("proxy").
Name(net.JoinSchemeNamePort("http", controllerName, portName)).
Suffix("/v1/rotate")
req.Body(content)
res := req.Do(ctx)
if err := res.Error(); err != nil {
if status, ok := err.(*k8serrors.StatusError); ok && status.Status().Code == http.StatusConflict {
return fmt.Errorf("unable to rotate secret")
}
return fmt.Errorf("cannot re-encrypt secret: %v", err)
}
body, err := res.Raw()
if err != nil {
return err
}
ssecret := &ssv1alpha1.SealedSecret{}
if err = json.Unmarshal(body, ssecret); err != nil {
return err
}
ssecret.SetCreationTimestamp(metav1.Time{})
ssecret.SetDeletionTimestamp(nil)
ssecret.Generation = 0
if err = sealedSecretOutput(out, outputFormat, codecs, ssecret); err != nil {
return err
}
return nil
}
func resourceOutput(out io.Writer, outputFormat string, codecs runtimeserializer.CodecFactory, gv runtime.GroupVersioner, obj runtime.Object) error {
var contentType string
switch strings.ToLower(outputFormat) {
case "json", "":
contentType = runtime.ContentTypeJSON
case "yaml":
contentType = runtime.ContentTypeYAML
default:
return fmt.Errorf("unsupported output format: %s", outputFormat)
}
prettyEnc, err := prettyEncoder(codecs, contentType, gv)
if err != nil {
return err
}
buf, err := runtime.Encode(prettyEnc, obj)
if err != nil {
return err
}
_, _ = out.Write(buf)
fmt.Fprint(out, "\n")
return nil
}
func sealedSecretOutput(out io.Writer, outputFormat string, codecs runtimeserializer.CodecFactory, ssecret *ssv1alpha1.SealedSecret) error {
return resourceOutput(out, outputFormat, codecs, ssv1alpha1.SchemeGroupVersion, ssecret)
}
func decodeSealedSecret(codecs runtimeserializer.CodecFactory, b []byte) (*ssv1alpha1.SealedSecret, error) {
var ss ssv1alpha1.SealedSecret
if err := runtime.DecodeInto(codecs.UniversalDecoder(), b, &ss); err != nil {
return nil, err
}
return &ss, nil
}
func SealMergingInto(clientConfig ClientConfig, outputFormat string, in io.Reader, filename string, codecs runtimeserializer.CodecFactory, pubKey *rsa.PublicKey, scope ssv1alpha1.SealingScope, allowEmptyData bool) error |
func EncryptSecretItem(w io.Writer, secretName, ns string, data []byte, scope ssv1alpha1.SealingScope, pubKey *rsa.PublicKey) error {
// TODO(mkm): refactor cluster-wide/namespace-wide to an actual enum so we can have a simple flag
// to refer to the scope mode that is not a tuple of booleans.
label := ssv1alpha1.EncryptionLabel(ns, secretName, scope)
out, err := crypto.HybridEncrypt(rand.Reader, pubKey, data, label)
if err != nil {
return err
}
fmt.Fprint(w, base64.StdEncoding.EncodeToString(out))
return nil
}
// parseFromFile parses a value of the kubectl --from-file flag, which can optionally include an item name
// preceding the first equals sign.
func ParseFromFile(s string) (string, string) {
c := strings.SplitN(s, "=", 2)
if len(c) == 1 {
return "", c[0]
}
return c[0], c[1]
}
func readPrivKeysFromFile(filename string) ([]*rsa.PrivateKey, error) {
// #nosec G304 -- should open user provided file
b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
res, err := parsePrivKey(b)
if err == nil {
return []*rsa.PrivateKey{res}, nil
}
var secrets []*v1.Secret
// try to parse it as json/yaml encoded v1.List of secrets
var lst v1.List
if err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), b, &lst); err == nil {
for _, r := range lst.Items {
s, err := readSecret(scheme.Codecs.UniversalDecoder(), bytes.NewBuffer(r.Raw))
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
} else {
// try to parse it as json/yaml encoded secret
s, err := readSecret(scheme.Codecs.UniversalDecoder(), bytes.NewBuffer(b))
if err != nil {
return nil, err
}
secrets = append(secrets, s)
}
var keys []*rsa.PrivateKey
for _, s := range secrets {
tlsKey, ok := s.Data["tls.key"]
if !ok {
return nil, fmt.Errorf("secret must contain a 'tls.data' key")
}
pk, err := parsePrivKey(tlsKey)
if err != nil {
return nil, err
}
keys = append(keys, pk)
}
return keys, nil
}
func readPrivKey(filename string) (*rsa.PrivateKey, error) {
pks, err := readPrivKeysFromFile(filename)
if err != nil {
return nil, err
}
return pks[0], nil
}
func parsePrivKey(b []byte) (*rsa.PrivateKey, error) {
key, err := keyutil.ParsePrivateKeyPEM(b)
if err != nil {
return nil, err
}
switch rsaKey := key.(type) {
case *rsa.PrivateKey:
return rsaKey, nil
default:
return nil, fmt.Errorf("unexpected private key type %T", key)
}
}
func readPrivKeys(filenames []string) (map[string]*rsa.PrivateKey, error) {
res := map[string]*rsa.PrivateKey{}
for _, filename := range filenames {
pks, err := readPrivKeysFromFile(filename)
if err != nil {
return nil, err
}
for _, pk := range pks {
fingerprint, err := crypto.PublicKeyFingerprint(&pk.PublicKey)
if err != nil {
return nil, err
}
res[fingerprint] = pk
}
}
return res, nil
}
func UnsealSealedSecret(w io.Writer, in io.Reader, privKeysFilenames []string, outputFormat string, codecs runtimeserializer.CodecFactory) error {
privKeys, err := readPrivKeys(privKeysFilenames)
if err != nil {
return err
}
b, err := io.ReadAll(in)
if err != nil {
return err
}
ss, err := decodeSealedSecret(codecs, b)
if err != nil {
return err
}
sec, err := ss.Unseal(codecs, privKeys)
if err != nil {
return err
}
return resourceOutput(w, outputFormat, codecs, v1.SchemeGroupVersion, sec)
}
| {
// #nosec G304 -- should open user provided file
f, err := os.OpenFile(filename, os.O_RDWR, 0)
if err != nil {
return err
}
// #nosec G307 -- we are explicitly managing a potential error from f.Close() at the end of the function
defer f.Close()
b, err := io.ReadAll(f)
if err != nil {
return err
}
orig, err := decodeSealedSecret(codecs, b)
if err != nil {
return err
}
var buf bytes.Buffer
if err := Seal(clientConfig, outputFormat, in, &buf, codecs, pubKey, scope, allowEmptyData, orig.Name, orig.Namespace); err != nil {
return err
}
update, err := decodeSealedSecret(codecs, buf.Bytes())
if err != nil {
return err
}
// merge encrypted data and metadata
for k, v := range update.Spec.EncryptedData {
orig.Spec.EncryptedData[k] = v
}
for k, v := range update.Spec.Template.Annotations {
orig.Spec.Template.Annotations[k] = v
}
for k, v := range update.Spec.Template.Labels {
orig.Spec.Template.Labels[k] = v
}
for k, v := range update.Spec.Template.Data {
orig.Spec.Template.Data[k] = v
}
// updated sealed secret file in-place avoiding clobbering the file upon rendering errors.
var out bytes.Buffer
if err := sealedSecretOutput(&out, outputFormat, codecs, orig); err != nil {
return err
}
if err := f.Truncate(0); err != nil {
return err
}
if _, err := f.Seek(0, 0); err != nil {
return err
}
if _, err := io.Copy(f, &out); err != nil {
return err
}
// we explicitly call f.Close() to return a potential error when closing the file that wouldn't be returned in the deferred f.Close()
if err := f.Close(); err != nil {
return err
}
return nil
} | identifier_body |
TableLaporan.ts | <?php
declare(strict_types=1);
namespace tlm\his\FatmaPharmacy\views\IkiDokterUi;
use tlm\libs\LowEnd\components\DateTimeException;
use Yii;
use yii\db\Exception;
/**
* @copyright PT Affordable App (Jl Mampang Prapatan VI no. 15B, Tegal Parang, Mampang, Jakarta Selatan, Jakarta, Indonesia)
* @license Affordable App License
* @author Hendra Gunawan <the.liquid.metal@gmail.com>
* @version 1.0
* @since 1.0
* @category application
*
* @see http://localhost/ori-source/fatma-pharmacy/views/ikidokter/laporan.php the original file
*/
final class TableLaporan
{
private string $output;
/**
* @author Hendra Gunawan
* @throws DateTimeException
* @throws Exception
*/
public function __construct(
string $registerId,
string $actionUrl,
string $dokterBySmfUrl,
string $dokterSelect,
string $smfSelect
) {
$toUserDate = Yii::$app->dateTime->transformFunc("toUserDate");
$toUserFloat = Yii::$app->number->toUserFloat();
$h = fn(string $str): string => Yii::$app->hash($str);
ob_clean();
ob_start();
$daftarResep = [];
$connection = Yii::$app->dbFatma;
$post = $_POST;
$html = "";
$frs = 0;
$fornas = 0;
$lainnya = 0;
$no = 0;
if (isset($post["search"])) {
$smfIsAll = !isset($post["filtersmf"]) || $post["filtersmf"] == "all";
$dokterIsAll = !isset($post["filterdokter"]) || $post["filterdokter"] == "all";
$thSmf = $smfIsAll ? '<th>SMF</th>' : "";
$thDokter = $dokterIsAll ? '<th>Dokter</th>' : "";
$r = 0;
$koderacik = "";
$resepnow = "";
$racik = 0;
$totresep = 0;
$subtotal = 0;
foreach ($daftarResep as $resep) {
if (!$resep->kodeRacik || $resep->kodeRacik != $koderacik) {
$r++;
} else {
$racik++;
}
if ($resep->noResep != $resepnow) {
if ($resepnow) {
$tdSmf = $smfIsAll ? '<td></td>' : "";
$tdDokter = $dokterIsAll ? '<td></td>' : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td></td>
<td></td>
<td></td>
<td></td>
<td class='text-right'>" . $toUserFloat($subtotal) . "</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>";
$subtotal = 0;
}
$resepnow = $resep->noResep;
$totresep++;
if ($smfIsAll) {
$sql = /** @lang SQL */ "
-- FILE: ".__FILE__."
-- LINE: ".__LINE__."
SELECT nama_smf
FROM rsupf.master_smf
WHERE kode = '{$resep->idPegawai}'
LIMIT 1
";
$smfname = $connection->createCommand($sql)->queryScalar();
$tdSmf = "<td>".$smfname."</td>";
} else {
$tdSmf = "";
}
$tdDokter = $dokterIsAll ? "<td>{$resep->name}</td>" : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td>{$resep->noResep}</td>
<td>{$toUserDate($resep->tanggalPenjualan)}</td>
";
} else {
$tdSmf = $smfIsAll ? '<td></td>' : "";
$tdDokter = $dokterIsAll ? '<td></td>' : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td></td>
<td></td>
";
}
$subtotal += $resep->jumlahPenjualan * $resep->hargaJual;
if ($resep->formulariumNas == "1") {
$tdFornas = "v";
$fornas++;
} else {
$tdFornas = "";
}
if ($resep->formulariumNas == "0" && $resep->formulariumRs == "1") {
$tdFrs = "v";
$frs++;
} else {
$tdFrs = "";
}
if ($resep->formulariumNas == "0" && $resep->formulariumRs == "0") {
$tdLainnya = "v";
$lainnya++;
} else {
$tdLainnya = "";
}
$html .= '
<td>' . $resep->namaBarang . '</td>
<td class="text-right">' . $resep->jumlahPenjualan . '</td>
<td class="text-right">' . $toUserFloat($resep->jumlahPenjualan * $resep->hargaJual) . '</td>
<td>' . $tdFornas . '</td>
<td>' . $tdFrs . '</td>
<td>' . $tdLainnya . '</td>
<td></td>
</tr>';
$koderacik = $resep->kodeRacik;
$no++;
}
}
?>
<script type="text/tsx">
namespace his.FatmaPharmacy.views.IkiDokter.Laporan {
export interface Fields {
idSmf: "filtersmf";
idDokter: "filterdokter";
tanggalMulai: "mulai";
tanggalSelesai: "selesai";
}
}
</script>
<script>
tlm.app.registerModule(class extends spa.BaseModule {
static get version() {return "2.0.0"}
static get widgetName() {return "_<?= $registerId ?>"}
_structure = {
row_1: {
widthColumn: {
heading3: {text: tlm.stringRegistry._<?= $h("???") ?>}
}
},
row_2: {
widthColumn: {
paragraph: {text: " "}
}
},
form: {
class: ".saringFrm",
row_1: {
box: {
title: tlm.stringRegistry._<?= $h("Saring") ?>,
formGroup_1: {
label: tlm.stringRegistry._<?= $h("SMF") ?>,
select: {class: ".idSmfFld", name: "idSmf"}
},
formGroup_2: {
label: tlm.stringRegistry._<?= $h("Dokter") ?>,
select: {class: ".idDokterFld", name: "idDokter"}
},
formGroup_3: {
label: tlm.stringRegistry._<?= $h("Tanggal Mulai") ?>,
input: {class: ".tanggalMulaiFld", name: "tanggalMulai"}
},
formGroup_4: {
label: tlm.stringRegistry._<?= $h("Tanggal Selesai") ?>,
input: {class: ".tanggalSelesaiFld", name: "tanggalSelesai"}
}
}
},
row_2: {
column: {
class: "text-center",
SRButton: {sLabel: tlm.stringRegistry._<?= $h("Terapkan") ?>}
}
}
}
};
constructor(divElm) {
super();
divElm.innerHTML = spl.LayoutDrawer.draw(this._structure).content;
/** @type {HTMLSelectElement} */ const idSmfFld = divElm.querySelector(".idSmfFld");
/** @type {HTMLSelectElement} */ const idDokterFld = divElm.querySelector(".idDokterFld");
tlm.app.registerSelect("_<?= $smfSelect ?>", idSmfFld);
tlm.app.registerSelect("_<?= $dokterSelect ?>", idDokterFld);
this._selects.push(idSmfFld, idDokterFld);
const saringWgt = new spl.AjaxFormWidget({
element: divElm.querySelector(".saringFrm"),
/** @param {his.FatmaPharmacy.views.IkiDokter.Laporan.Fields} data */
loadData(data) {
idSmfFld.value = data.idSmf ?? "";
idDokterFld.value = data.idDokter ?? "";
tanggalMulaiWgt.value = data.tanggalMulai ?? "";
tanggalSelesaiWgt.value = data.tanggalSelesai ?? "";
},
resetBtnId: false,
actionUrl: "<?= $actionUrl ?>"
});
idSmfFld.addEventListener("change", (event) => {
$.post({
url: "<?= $dokterBySmfUrl ?>",
data: {q: event.target.value},
success(data) {idDokterFld.innerHTML = data}
});
});
let minTanggalMulai;
let maksTanggalSelesai;
const tanggalMulaiWgt = new spl.DateTimeWidget({
element: divElm.querySelector(".tanggalMulaiFld"),
// numberOfMonths: 1,
onBeforeOpenDatetimePicker() {
this._maxDate = maksTanggalSelesai;
},
onBeforeCloseDatetimePicker() {
minTanggalMulai = this._value;
},
...tlm.dateWidgetSetting
});
const tanggalSelesaiWgt = new spl.DateTimeWidget({
element: divElm.querySelector(".tanggalSelesaiFld"),
// numberOfMonths: 1,
onBeforeOpenDatetimePicker() {
this._minDate = minTanggalMulai;
},
onBeforeCloseDatetimePicker() {
maksTanggalSelesai = this._value;
},
...tlm.dateWidgetSetting
});
this._element = divElm;
divElm.moduleWidget = this;
this._widgets.push(saringWgt, tanggalMulaiWgt, tanggalSelesaiWgt);
tlm.app.registerWidget(this.constructor.widgetName, saringWgt);
}
});
</script>
<!-- TODO: html: convert to js -->
<div id="<?= $registerId ?>">
<h1>IKI Dokter</h1>
<form id="<?= $registerId ?>_frm"></form>
<table class="table table-striped">
<?php if (isset($post["search"])): ?>
<tr>
<td>Total Formularium RS</td>
<td>: <?= $frs ?> (<?= $toUserFloat($frs / $no * 100) ?> %)</td>
<td>Total Formularium Nasional</td>
<td>: <?= $fornas ?> (<?= $toUserFloat($fornas / $no * 100) ?> %)</td>
<td>Total Lainnya</td>
<td>: <?= $lainnya ?> (<?= $toUserFloat($lainnya / $no * 100) ?> %)</td>
</tr>
<?php endif ?>
</table>
<br/>
<br/>
<?php if (isset($post["search"])): ?>
<table class='table table-striped'>
<thead>
<tr>
<?= /** @noinspection PhpUndefinedVariableInspection */ $thSmf ?>
<?= /** @noinspection PhpUndefinedVariableInspection */ $thDokter ?>
<th>Resep</th>
<th>Tanggal</th>
<th>Nama Obat</th>
<th>Jumlah</th>
<th>Harga</th>
<th>Formularium Nasional</th>
<th>Formularium RS</th>
<th>Lainnya</th>
<th>Keterangan</th>
</tr>
</thead>
<tbody>
<?= $html ?>
</tbody>
</table>
<?php endif ?>
</div>
| ob_clean();
}
public function __toString(): string
{
return $this->output;
}
} | <?php
$this->output = ob_get_contents(); | random_line_split |
TableLaporan.ts | <?php
declare(strict_types=1);
namespace tlm\his\FatmaPharmacy\views\IkiDokterUi;
use tlm\libs\LowEnd\components\DateTimeException;
use Yii;
use yii\db\Exception;
/**
* @copyright PT Affordable App (Jl Mampang Prapatan VI no. 15B, Tegal Parang, Mampang, Jakarta Selatan, Jakarta, Indonesia)
* @license Affordable App License
* @author Hendra Gunawan <the.liquid.metal@gmail.com>
* @version 1.0
* @since 1.0
* @category application
*
* @see http://localhost/ori-source/fatma-pharmacy/views/ikidokter/laporan.php the original file
*/
final class TableLaporan
{
private string $output;
/**
* @author Hendra Gunawan
* @throws DateTimeException
* @throws Exception
*/
public function __construct(
string $registerId,
string $actionUrl,
string $dokterBySmfUrl,
string $dokterSelect,
string $smfSelect
) {
$toUserDate = Yii::$app->dateTime->transformFunc("toUserDate");
$toUserFloat = Yii::$app->number->toUserFloat();
$h = fn(string $str): string => Yii::$app->hash($str);
ob_clean();
ob_start();
$daftarResep = [];
$connection = Yii::$app->dbFatma;
$post = $_POST;
$html = "";
$frs = 0;
$fornas = 0;
$lainnya = 0;
$no = 0;
if (isset($post["search"])) {
$smfIsAll = !isset($post["filtersmf"]) || $post["filtersmf"] == "all";
$dokterIsAll = !isset($post["filterdokter"]) || $post["filterdokter"] == "all";
$thSmf = $smfIsAll ? '<th>SMF</th>' : "";
$thDokter = $dokterIsAll ? '<th>Dokter</th>' : "";
$r = 0;
$koderacik = "";
$resepnow = "";
$racik = 0;
$totresep = 0;
$subtotal = 0;
foreach ($daftarResep as $resep) {
if (!$resep->kodeRacik || $resep->kodeRacik != $koderacik) | else {
$racik++;
}
if ($resep->noResep != $resepnow) {
if ($resepnow) {
$tdSmf = $smfIsAll ? '<td></td>' : "";
$tdDokter = $dokterIsAll ? '<td></td>' : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td></td>
<td></td>
<td></td>
<td></td>
<td class='text-right'>" . $toUserFloat($subtotal) . "</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>";
$subtotal = 0;
}
$resepnow = $resep->noResep;
$totresep++;
if ($smfIsAll) {
$sql = /** @lang SQL */ "
-- FILE: ".__FILE__."
-- LINE: ".__LINE__."
SELECT nama_smf
FROM rsupf.master_smf
WHERE kode = '{$resep->idPegawai}'
LIMIT 1
";
$smfname = $connection->createCommand($sql)->queryScalar();
$tdSmf = "<td>".$smfname."</td>";
} else {
$tdSmf = "";
}
$tdDokter = $dokterIsAll ? "<td>{$resep->name}</td>" : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td>{$resep->noResep}</td>
<td>{$toUserDate($resep->tanggalPenjualan)}</td>
";
} else {
$tdSmf = $smfIsAll ? '<td></td>' : "";
$tdDokter = $dokterIsAll ? '<td></td>' : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td></td>
<td></td>
";
}
$subtotal += $resep->jumlahPenjualan * $resep->hargaJual;
if ($resep->formulariumNas == "1") {
$tdFornas = "v";
$fornas++;
} else {
$tdFornas = "";
}
if ($resep->formulariumNas == "0" && $resep->formulariumRs == "1") {
$tdFrs = "v";
$frs++;
} else {
$tdFrs = "";
}
if ($resep->formulariumNas == "0" && $resep->formulariumRs == "0") {
$tdLainnya = "v";
$lainnya++;
} else {
$tdLainnya = "";
}
$html .= '
<td>' . $resep->namaBarang . '</td>
<td class="text-right">' . $resep->jumlahPenjualan . '</td>
<td class="text-right">' . $toUserFloat($resep->jumlahPenjualan * $resep->hargaJual) . '</td>
<td>' . $tdFornas . '</td>
<td>' . $tdFrs . '</td>
<td>' . $tdLainnya . '</td>
<td></td>
</tr>';
$koderacik = $resep->kodeRacik;
$no++;
}
}
?>
<script type="text/tsx">
namespace his.FatmaPharmacy.views.IkiDokter.Laporan {
export interface Fields {
idSmf: "filtersmf";
idDokter: "filterdokter";
tanggalMulai: "mulai";
tanggalSelesai: "selesai";
}
}
</script>
<script>
tlm.app.registerModule(class extends spa.BaseModule {
static get version() {return "2.0.0"}
static get widgetName() {return "_<?= $registerId ?>"}
_structure = {
row_1: {
widthColumn: {
heading3: {text: tlm.stringRegistry._<?= $h("???") ?>}
}
},
row_2: {
widthColumn: {
paragraph: {text: " "}
}
},
form: {
class: ".saringFrm",
row_1: {
box: {
title: tlm.stringRegistry._<?= $h("Saring") ?>,
formGroup_1: {
label: tlm.stringRegistry._<?= $h("SMF") ?>,
select: {class: ".idSmfFld", name: "idSmf"}
},
formGroup_2: {
label: tlm.stringRegistry._<?= $h("Dokter") ?>,
select: {class: ".idDokterFld", name: "idDokter"}
},
formGroup_3: {
label: tlm.stringRegistry._<?= $h("Tanggal Mulai") ?>,
input: {class: ".tanggalMulaiFld", name: "tanggalMulai"}
},
formGroup_4: {
label: tlm.stringRegistry._<?= $h("Tanggal Selesai") ?>,
input: {class: ".tanggalSelesaiFld", name: "tanggalSelesai"}
}
}
},
row_2: {
column: {
class: "text-center",
SRButton: {sLabel: tlm.stringRegistry._<?= $h("Terapkan") ?>}
}
}
}
};
constructor(divElm) {
super();
divElm.innerHTML = spl.LayoutDrawer.draw(this._structure).content;
/** @type {HTMLSelectElement} */ const idSmfFld = divElm.querySelector(".idSmfFld");
/** @type {HTMLSelectElement} */ const idDokterFld = divElm.querySelector(".idDokterFld");
tlm.app.registerSelect("_<?= $smfSelect ?>", idSmfFld);
tlm.app.registerSelect("_<?= $dokterSelect ?>", idDokterFld);
this._selects.push(idSmfFld, idDokterFld);
const saringWgt = new spl.AjaxFormWidget({
element: divElm.querySelector(".saringFrm"),
/** @param {his.FatmaPharmacy.views.IkiDokter.Laporan.Fields} data */
loadData(data) {
idSmfFld.value = data.idSmf ?? "";
idDokterFld.value = data.idDokter ?? "";
tanggalMulaiWgt.value = data.tanggalMulai ?? "";
tanggalSelesaiWgt.value = data.tanggalSelesai ?? "";
},
resetBtnId: false,
actionUrl: "<?= $actionUrl ?>"
});
idSmfFld.addEventListener("change", (event) => {
$.post({
url: "<?= $dokterBySmfUrl ?>",
data: {q: event.target.value},
success(data) {idDokterFld.innerHTML = data}
});
});
let minTanggalMulai;
let maksTanggalSelesai;
const tanggalMulaiWgt = new spl.DateTimeWidget({
element: divElm.querySelector(".tanggalMulaiFld"),
// numberOfMonths: 1,
onBeforeOpenDatetimePicker() {
this._maxDate = maksTanggalSelesai;
},
onBeforeCloseDatetimePicker() {
minTanggalMulai = this._value;
},
...tlm.dateWidgetSetting
});
const tanggalSelesaiWgt = new spl.DateTimeWidget({
element: divElm.querySelector(".tanggalSelesaiFld"),
// numberOfMonths: 1,
onBeforeOpenDatetimePicker() {
this._minDate = minTanggalMulai;
},
onBeforeCloseDatetimePicker() {
maksTanggalSelesai = this._value;
},
...tlm.dateWidgetSetting
});
this._element = divElm;
divElm.moduleWidget = this;
this._widgets.push(saringWgt, tanggalMulaiWgt, tanggalSelesaiWgt);
tlm.app.registerWidget(this.constructor.widgetName, saringWgt);
}
});
</script>
<!-- TODO: html: convert to js -->
<div id="<?= $registerId ?>">
<h1>IKI Dokter</h1>
<form id="<?= $registerId ?>_frm"></form>
<table class="table table-striped">
<?php if (isset($post["search"])): ?>
<tr>
<td>Total Formularium RS</td>
<td>: <?= $frs ?> (<?= $toUserFloat($frs / $no * 100) ?> %)</td>
<td>Total Formularium Nasional</td>
<td>: <?= $fornas ?> (<?= $toUserFloat($fornas / $no * 100) ?> %)</td>
<td>Total Lainnya</td>
<td>: <?= $lainnya ?> (<?= $toUserFloat($lainnya / $no * 100) ?> %)</td>
</tr>
<?php endif ?>
</table>
<br/>
<br/>
<?php if (isset($post["search"])): ?>
<table class='table table-striped'>
<thead>
<tr>
<?= /** @noinspection PhpUndefinedVariableInspection */ $thSmf ?>
<?= /** @noinspection PhpUndefinedVariableInspection */ $thDokter ?>
<th>Resep</th>
<th>Tanggal</th>
<th>Nama Obat</th>
<th>Jumlah</th>
<th>Harga</th>
<th>Formularium Nasional</th>
<th>Formularium RS</th>
<th>Lainnya</th>
<th>Keterangan</th>
</tr>
</thead>
<tbody>
<?= $html ?>
</tbody>
</table>
<?php endif ?>
</div>
<?php
$this->output = ob_get_contents();
ob_clean();
}
public function __toString(): string
{
return $this->output;
}
}
| {
$r++;
} | conditional_block |
TableLaporan.ts | <?php
declare(strict_types=1);
namespace tlm\his\FatmaPharmacy\views\IkiDokterUi;
use tlm\libs\LowEnd\components\DateTimeException;
use Yii;
use yii\db\Exception;
/**
* @copyright PT Affordable App (Jl Mampang Prapatan VI no. 15B, Tegal Parang, Mampang, Jakarta Selatan, Jakarta, Indonesia)
* @license Affordable App License
* @author Hendra Gunawan <the.liquid.metal@gmail.com>
* @version 1.0
* @since 1.0
* @category application
*
* @see http://localhost/ori-source/fatma-pharmacy/views/ikidokter/laporan.php the original file
*/
final class TableLaporan
{
private string $output;
/**
* @author Hendra Gunawan
* @throws DateTimeException
* @throws Exception
*/
public function __construct(
string $registerId,
string $actionUrl,
string $dokterBySmfUrl,
string $dokterSelect,
string $smfSelect
) {
$toUserDate = Yii::$app->dateTime->transformFunc("toUserDate");
$toUserFloat = Yii::$app->number->toUserFloat();
$h = fn(string $str): string => Yii::$app->hash($str);
ob_clean();
ob_start();
$daftarResep = [];
$connection = Yii::$app->dbFatma;
$post = $_POST;
$html = "";
$frs = 0;
$fornas = 0;
$lainnya = 0;
$no = 0;
if (isset($post["search"])) {
$smfIsAll = !isset($post["filtersmf"]) || $post["filtersmf"] == "all";
$dokterIsAll = !isset($post["filterdokter"]) || $post["filterdokter"] == "all";
$thSmf = $smfIsAll ? '<th>SMF</th>' : "";
$thDokter = $dokterIsAll ? '<th>Dokter</th>' : "";
$r = 0;
$koderacik = "";
$resepnow = "";
$racik = 0;
$totresep = 0;
$subtotal = 0;
foreach ($daftarResep as $resep) {
if (!$resep->kodeRacik || $resep->kodeRacik != $koderacik) {
$r++;
} else {
$racik++;
}
if ($resep->noResep != $resepnow) {
if ($resepnow) {
$tdSmf = $smfIsAll ? '<td></td>' : "";
$tdDokter = $dokterIsAll ? '<td></td>' : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td></td>
<td></td>
<td></td>
<td></td>
<td class='text-right'>" . $toUserFloat($subtotal) . "</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>";
$subtotal = 0;
}
$resepnow = $resep->noResep;
$totresep++;
if ($smfIsAll) {
$sql = /** @lang SQL */ "
-- FILE: ".__FILE__."
-- LINE: ".__LINE__."
SELECT nama_smf
FROM rsupf.master_smf
WHERE kode = '{$resep->idPegawai}'
LIMIT 1
";
$smfname = $connection->createCommand($sql)->queryScalar();
$tdSmf = "<td>".$smfname."</td>";
} else {
$tdSmf = "";
}
$tdDokter = $dokterIsAll ? "<td>{$resep->name}</td>" : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td>{$resep->noResep}</td>
<td>{$toUserDate($resep->tanggalPenjualan)}</td>
";
} else {
$tdSmf = $smfIsAll ? '<td></td>' : "";
$tdDokter = $dokterIsAll ? '<td></td>' : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td></td>
<td></td>
";
}
$subtotal += $resep->jumlahPenjualan * $resep->hargaJual;
if ($resep->formulariumNas == "1") {
$tdFornas = "v";
$fornas++;
} else {
$tdFornas = "";
}
if ($resep->formulariumNas == "0" && $resep->formulariumRs == "1") {
$tdFrs = "v";
$frs++;
} else {
$tdFrs = "";
}
if ($resep->formulariumNas == "0" && $resep->formulariumRs == "0") {
$tdLainnya = "v";
$lainnya++;
} else {
$tdLainnya = "";
}
$html .= '
<td>' . $resep->namaBarang . '</td>
<td class="text-right">' . $resep->jumlahPenjualan . '</td>
<td class="text-right">' . $toUserFloat($resep->jumlahPenjualan * $resep->hargaJual) . '</td>
<td>' . $tdFornas . '</td>
<td>' . $tdFrs . '</td>
<td>' . $tdLainnya . '</td>
<td></td>
</tr>';
$koderacik = $resep->kodeRacik;
$no++;
}
}
?>
<script type="text/tsx">
namespace his.FatmaPharmacy.views.IkiDokter.Laporan {
export interface Fields {
idSmf: "filtersmf";
idDokter: "filterdokter";
tanggalMulai: "mulai";
tanggalSelesai: "selesai";
}
}
</script>
<script>
tlm.app.registerModule(class extends spa.BaseModule {
static get version() {return "2.0.0"}
static get widgetName() {return "_<?= $registerId ?>"}
_structure = {
row_1: {
widthColumn: {
heading3: {text: tlm.stringRegistry._<?= $h("???") ?>}
}
},
row_2: {
widthColumn: {
paragraph: {text: " "}
}
},
form: {
class: ".saringFrm",
row_1: {
box: {
title: tlm.stringRegistry._<?= $h("Saring") ?>,
formGroup_1: {
label: tlm.stringRegistry._<?= $h("SMF") ?>,
select: {class: ".idSmfFld", name: "idSmf"}
},
formGroup_2: {
label: tlm.stringRegistry._<?= $h("Dokter") ?>,
select: {class: ".idDokterFld", name: "idDokter"}
},
formGroup_3: {
label: tlm.stringRegistry._<?= $h("Tanggal Mulai") ?>,
input: {class: ".tanggalMulaiFld", name: "tanggalMulai"}
},
formGroup_4: {
label: tlm.stringRegistry._<?= $h("Tanggal Selesai") ?>,
input: {class: ".tanggalSelesaiFld", name: "tanggalSelesai"}
}
}
},
row_2: {
column: {
class: "text-center",
SRButton: {sLabel: tlm.stringRegistry._<?= $h("Terapkan") ?>}
}
}
}
};
constructor(divElm) {
super();
divElm.innerHTML = spl.LayoutDrawer.draw(this._structure).content;
/** @type {HTMLSelectElement} */ const idSmfFld = divElm.querySelector(".idSmfFld");
/** @type {HTMLSelectElement} */ const idDokterFld = divElm.querySelector(".idDokterFld");
tlm.app.registerSelect("_<?= $smfSelect ?>", idSmfFld);
tlm.app.registerSelect("_<?= $dokterSelect ?>", idDokterFld);
this._selects.push(idSmfFld, idDokterFld);
const saringWgt = new spl.AjaxFormWidget({
element: divElm.querySelector(".saringFrm"),
/** @param {his.FatmaPharmacy.views.IkiDokter.Laporan.Fields} data */
loadData(data) {
idSmfFld.value = data.idSmf ?? "";
idDokterFld.value = data.idDokter ?? "";
tanggalMulaiWgt.value = data.tanggalMulai ?? "";
tanggalSelesaiWgt.value = data.tanggalSelesai ?? "";
},
resetBtnId: false,
actionUrl: "<?= $actionUrl ?>"
});
idSmfFld.addEventListener("change", (event) => {
$.post({
url: "<?= $dokterBySmfUrl ?>",
data: {q: event.target.value},
success(data) {idDokterFld.innerHTML = data}
});
});
let minTanggalMulai;
let maksTanggalSelesai;
const tanggalMulaiWgt = new spl.DateTimeWidget({
element: divElm.querySelector(".tanggalMulaiFld"),
// numberOfMonths: 1,
onBeforeOpenDatetimePicker() {
this._maxDate = maksTanggalSelesai;
},
| () {
minTanggalMulai = this._value;
},
...tlm.dateWidgetSetting
});
const tanggalSelesaiWgt = new spl.DateTimeWidget({
element: divElm.querySelector(".tanggalSelesaiFld"),
// numberOfMonths: 1,
onBeforeOpenDatetimePicker() {
this._minDate = minTanggalMulai;
},
onBeforeCloseDatetimePicker() {
maksTanggalSelesai = this._value;
},
...tlm.dateWidgetSetting
});
this._element = divElm;
divElm.moduleWidget = this;
this._widgets.push(saringWgt, tanggalMulaiWgt, tanggalSelesaiWgt);
tlm.app.registerWidget(this.constructor.widgetName, saringWgt);
}
});
</script>
<!-- TODO: html: convert to js -->
<div id="<?= $registerId ?>">
<h1>IKI Dokter</h1>
<form id="<?= $registerId ?>_frm"></form>
<table class="table table-striped">
<?php if (isset($post["search"])): ?>
<tr>
<td>Total Formularium RS</td>
<td>: <?= $frs ?> (<?= $toUserFloat($frs / $no * 100) ?> %)</td>
<td>Total Formularium Nasional</td>
<td>: <?= $fornas ?> (<?= $toUserFloat($fornas / $no * 100) ?> %)</td>
<td>Total Lainnya</td>
<td>: <?= $lainnya ?> (<?= $toUserFloat($lainnya / $no * 100) ?> %)</td>
</tr>
<?php endif ?>
</table>
<br/>
<br/>
<?php if (isset($post["search"])): ?>
<table class='table table-striped'>
<thead>
<tr>
<?= /** @noinspection PhpUndefinedVariableInspection */ $thSmf ?>
<?= /** @noinspection PhpUndefinedVariableInspection */ $thDokter ?>
<th>Resep</th>
<th>Tanggal</th>
<th>Nama Obat</th>
<th>Jumlah</th>
<th>Harga</th>
<th>Formularium Nasional</th>
<th>Formularium RS</th>
<th>Lainnya</th>
<th>Keterangan</th>
</tr>
</thead>
<tbody>
<?= $html ?>
</tbody>
</table>
<?php endif ?>
</div>
<?php
$this->output = ob_get_contents();
ob_clean();
}
public function __toString(): string
{
return $this->output;
}
}
| onBeforeCloseDatetimePicker | identifier_name |
TableLaporan.ts | <?php
declare(strict_types=1);
namespace tlm\his\FatmaPharmacy\views\IkiDokterUi;
use tlm\libs\LowEnd\components\DateTimeException;
use Yii;
use yii\db\Exception;
/**
* @copyright PT Affordable App (Jl Mampang Prapatan VI no. 15B, Tegal Parang, Mampang, Jakarta Selatan, Jakarta, Indonesia)
* @license Affordable App License
* @author Hendra Gunawan <the.liquid.metal@gmail.com>
* @version 1.0
* @since 1.0
* @category application
*
* @see http://localhost/ori-source/fatma-pharmacy/views/ikidokter/laporan.php the original file
*/
final class TableLaporan
{
private string $output;
/**
* @author Hendra Gunawan
* @throws DateTimeException
* @throws Exception
*/
public function __construct(
string $registerId,
string $actionUrl,
string $dokterBySmfUrl,
string $dokterSelect,
string $smfSelect
) {
$toUserDate = Yii::$app->dateTime->transformFunc("toUserDate");
$toUserFloat = Yii::$app->number->toUserFloat();
$h = fn(string $str): string => Yii::$app->hash($str);
ob_clean();
ob_start();
$daftarResep = [];
$connection = Yii::$app->dbFatma;
$post = $_POST;
$html = "";
$frs = 0;
$fornas = 0;
$lainnya = 0;
$no = 0;
if (isset($post["search"])) {
$smfIsAll = !isset($post["filtersmf"]) || $post["filtersmf"] == "all";
$dokterIsAll = !isset($post["filterdokter"]) || $post["filterdokter"] == "all";
$thSmf = $smfIsAll ? '<th>SMF</th>' : "";
$thDokter = $dokterIsAll ? '<th>Dokter</th>' : "";
$r = 0;
$koderacik = "";
$resepnow = "";
$racik = 0;
$totresep = 0;
$subtotal = 0;
foreach ($daftarResep as $resep) {
if (!$resep->kodeRacik || $resep->kodeRacik != $koderacik) {
$r++;
} else {
$racik++;
}
if ($resep->noResep != $resepnow) {
if ($resepnow) {
$tdSmf = $smfIsAll ? '<td></td>' : "";
$tdDokter = $dokterIsAll ? '<td></td>' : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td></td>
<td></td>
<td></td>
<td></td>
<td class='text-right'>" . $toUserFloat($subtotal) . "</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>";
$subtotal = 0;
}
$resepnow = $resep->noResep;
$totresep++;
if ($smfIsAll) {
$sql = /** @lang SQL */ "
-- FILE: ".__FILE__."
-- LINE: ".__LINE__."
SELECT nama_smf
FROM rsupf.master_smf
WHERE kode = '{$resep->idPegawai}'
LIMIT 1
";
$smfname = $connection->createCommand($sql)->queryScalar();
$tdSmf = "<td>".$smfname."</td>";
} else {
$tdSmf = "";
}
$tdDokter = $dokterIsAll ? "<td>{$resep->name}</td>" : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td>{$resep->noResep}</td>
<td>{$toUserDate($resep->tanggalPenjualan)}</td>
";
} else {
$tdSmf = $smfIsAll ? '<td></td>' : "";
$tdDokter = $dokterIsAll ? '<td></td>' : "";
$html .= "
<tr>
$tdSmf
$tdDokter
<td></td>
<td></td>
";
}
$subtotal += $resep->jumlahPenjualan * $resep->hargaJual;
if ($resep->formulariumNas == "1") {
$tdFornas = "v";
$fornas++;
} else {
$tdFornas = "";
}
if ($resep->formulariumNas == "0" && $resep->formulariumRs == "1") {
$tdFrs = "v";
$frs++;
} else {
$tdFrs = "";
}
if ($resep->formulariumNas == "0" && $resep->formulariumRs == "0") {
$tdLainnya = "v";
$lainnya++;
} else {
$tdLainnya = "";
}
$html .= '
<td>' . $resep->namaBarang . '</td>
<td class="text-right">' . $resep->jumlahPenjualan . '</td>
<td class="text-right">' . $toUserFloat($resep->jumlahPenjualan * $resep->hargaJual) . '</td>
<td>' . $tdFornas . '</td>
<td>' . $tdFrs . '</td>
<td>' . $tdLainnya . '</td>
<td></td>
</tr>';
$koderacik = $resep->kodeRacik;
$no++;
}
}
?>
<script type="text/tsx">
namespace his.FatmaPharmacy.views.IkiDokter.Laporan {
export interface Fields {
idSmf: "filtersmf";
idDokter: "filterdokter";
tanggalMulai: "mulai";
tanggalSelesai: "selesai";
}
}
</script>
<script>
tlm.app.registerModule(class extends spa.BaseModule {
static get version() {return "2.0.0"}
static get widgetName() {return "_<?= $registerId ?>"}
_structure = {
row_1: {
widthColumn: {
heading3: {text: tlm.stringRegistry._<?= $h("???") ?>}
}
},
row_2: {
widthColumn: {
paragraph: {text: " "}
}
},
form: {
class: ".saringFrm",
row_1: {
box: {
title: tlm.stringRegistry._<?= $h("Saring") ?>,
formGroup_1: {
label: tlm.stringRegistry._<?= $h("SMF") ?>,
select: {class: ".idSmfFld", name: "idSmf"}
},
formGroup_2: {
label: tlm.stringRegistry._<?= $h("Dokter") ?>,
select: {class: ".idDokterFld", name: "idDokter"}
},
formGroup_3: {
label: tlm.stringRegistry._<?= $h("Tanggal Mulai") ?>,
input: {class: ".tanggalMulaiFld", name: "tanggalMulai"}
},
formGroup_4: {
label: tlm.stringRegistry._<?= $h("Tanggal Selesai") ?>,
input: {class: ".tanggalSelesaiFld", name: "tanggalSelesai"}
}
}
},
row_2: {
column: {
class: "text-center",
SRButton: {sLabel: tlm.stringRegistry._<?= $h("Terapkan") ?>}
}
}
}
};
constructor(divElm) {
super();
divElm.innerHTML = spl.LayoutDrawer.draw(this._structure).content;
/** @type {HTMLSelectElement} */ const idSmfFld = divElm.querySelector(".idSmfFld");
/** @type {HTMLSelectElement} */ const idDokterFld = divElm.querySelector(".idDokterFld");
tlm.app.registerSelect("_<?= $smfSelect ?>", idSmfFld);
tlm.app.registerSelect("_<?= $dokterSelect ?>", idDokterFld);
this._selects.push(idSmfFld, idDokterFld);
const saringWgt = new spl.AjaxFormWidget({
element: divElm.querySelector(".saringFrm"),
/** @param {his.FatmaPharmacy.views.IkiDokter.Laporan.Fields} data */
loadData(data) {
idSmfFld.value = data.idSmf ?? "";
idDokterFld.value = data.idDokter ?? "";
tanggalMulaiWgt.value = data.tanggalMulai ?? "";
tanggalSelesaiWgt.value = data.tanggalSelesai ?? "";
},
resetBtnId: false,
actionUrl: "<?= $actionUrl ?>"
});
idSmfFld.addEventListener("change", (event) => {
$.post({
url: "<?= $dokterBySmfUrl ?>",
data: {q: event.target.value},
success(data) {idDokterFld.innerHTML = data}
});
});
let minTanggalMulai;
let maksTanggalSelesai;
const tanggalMulaiWgt = new spl.DateTimeWidget({
element: divElm.querySelector(".tanggalMulaiFld"),
// numberOfMonths: 1,
onBeforeOpenDatetimePicker() | ,
onBeforeCloseDatetimePicker() {
minTanggalMulai = this._value;
},
...tlm.dateWidgetSetting
});
const tanggalSelesaiWgt = new spl.DateTimeWidget({
element: divElm.querySelector(".tanggalSelesaiFld"),
// numberOfMonths: 1,
onBeforeOpenDatetimePicker() {
this._minDate = minTanggalMulai;
},
onBeforeCloseDatetimePicker() {
maksTanggalSelesai = this._value;
},
...tlm.dateWidgetSetting
});
this._element = divElm;
divElm.moduleWidget = this;
this._widgets.push(saringWgt, tanggalMulaiWgt, tanggalSelesaiWgt);
tlm.app.registerWidget(this.constructor.widgetName, saringWgt);
}
});
</script>
<!-- TODO: html: convert to js -->
<div id="<?= $registerId ?>">
<h1>IKI Dokter</h1>
<form id="<?= $registerId ?>_frm"></form>
<table class="table table-striped">
<?php if (isset($post["search"])): ?>
<tr>
<td>Total Formularium RS</td>
<td>: <?= $frs ?> (<?= $toUserFloat($frs / $no * 100) ?> %)</td>
<td>Total Formularium Nasional</td>
<td>: <?= $fornas ?> (<?= $toUserFloat($fornas / $no * 100) ?> %)</td>
<td>Total Lainnya</td>
<td>: <?= $lainnya ?> (<?= $toUserFloat($lainnya / $no * 100) ?> %)</td>
</tr>
<?php endif ?>
</table>
<br/>
<br/>
<?php if (isset($post["search"])): ?>
<table class='table table-striped'>
<thead>
<tr>
<?= /** @noinspection PhpUndefinedVariableInspection */ $thSmf ?>
<?= /** @noinspection PhpUndefinedVariableInspection */ $thDokter ?>
<th>Resep</th>
<th>Tanggal</th>
<th>Nama Obat</th>
<th>Jumlah</th>
<th>Harga</th>
<th>Formularium Nasional</th>
<th>Formularium RS</th>
<th>Lainnya</th>
<th>Keterangan</th>
</tr>
</thead>
<tbody>
<?= $html ?>
</tbody>
</table>
<?php endif ?>
</div>
<?php
$this->output = ob_get_contents();
ob_clean();
}
public function __toString(): string
{
return $this->output;
}
}
| {
this._maxDate = maksTanggalSelesai;
} | identifier_body |
ItemView.js | /**
* @file View that builds the map by showing the items' images at their specified locations.
*/
"use strict";
/**
* @class ItemView
*/
function ItemView() {
// Private variables
// These are jQuery objects corresponding to elements
let $mapImage;
let $carpet;
let $backArrow;
let $forwardArrow;
let $avatar;
let $arrowsAndItemOrderNumbers;
let $itemOrderNumbers;
let previousAngle = 0;
let viewModel;
let itemsDetails;
let itemToShowBecauseItIsInTheURL;
let performAnimations;
// Private functions
const cacheJQueryObjects = () => {
$mapImage = $("#MapImage");
$carpet = $('#Carpet');
$arrowsAndItemOrderNumbers = $('#ArrowsAndItemOrderNumbers');
$backArrow = $('#ArrowBack');
$forwardArrow = $('#ArrowForward');
$itemOrderNumbers = $('#ItemOrderNumbers');
$avatar = $('#Avatar');
};
/**
* FIXME
* Now I don't fetch item details from the backend because the index.html file comes with
* them ready to use. I collect the details from the elements.
*
* See server-rendering/writer-home-page-generator.js to know how the details are incorporated
* in the page.
*/
const collectItemDetailsFromMap = () => {
itemsDetails = [];
const itemElements = document.querySelectorAll("[data-nid]");
itemElements.forEach((element) => {
itemsDetails.push({ "nid": element.dataset.nid,
"field_order_number": element.dataset.order,
"title": element.dataset.title,
"field_coordinate_x": element.dataset.xCoord,
"field_coordinate_y": element.dataset.yCoord,
"field_item_type": element.dataset.type,
"path": element.dataset.path });
});
// the viewModel needs to know about the items details as well
viewModel.setItemsDetails(itemsDetails);
}
const moveToStartingPointOfSpiral = () => {
// We are going to move the carpet to the starting point of the spiral
// We set the animation running. The viewModel will take care of closing
// the item content panel, if any. It will also close any contact me form.
viewModel.setAnimationToNextItemRunning(true);
const viewport = viewModel.getViewPort();
// Rotating the carpet to the horizontal position it's supposed to have
// at the starting point of the spiral
$carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] },
{ duration: 1000, easing: "linear", loop: false});
previousAngle = 0;
const mapImagePosition = $mapImage.position();
const currentTop = Math.round(mapImagePosition.top);
const currentLeft = Math.round(mapImagePosition.left);
let animationDuration = 1500;
// If the carpet is already very near the place it's going to,
// I want to get there very quickly so that the user can
// click on the arrows with no delay
// If I have the animation last 1500ms, the user may click on an arrow and
// nothing happens
if (Math.abs(currentTop - (viewport.height / 2 - 3500)) < 200 &&
Math.abs(currentLeft - (viewport.width / 2 - 3500)) < 200) {
animationDuration = 100;
}
// Now animating the carpet to go to the starting point of the spiral
$mapImage.animate({ top: viewport.height / 2 - 3500 ,
left: viewport.width / 2 - 3500 }, animationDuration, null,
() => {
// console.log('animation to spiral starting point completed');
// Animation completed
viewModel.setAnimationToNextItemRunning(false);
}
);
};
const clickOnArrowHandler = (event) => {
// console.log(event);
// console.log(viewModel.getAnimationToNextItemRunning());
// Only if we are not already flying to the next item, do the following
if (!viewModel.getAnimationToNextItemRunning()) {
let itemToVisitNext;
// Determining the item to visit next
if (!event && itemToShowBecauseItIsInTheURL) {
// This is in the case I have to move directly to an item because it's in the URL
itemToVisitNext = itemToShowBecauseItIsInTheURL;
itemToShowBecauseItIsInTheURL = undefined;
performAnimations = false;
// console.log("clickOnArrowHandler, itemToShowBecauseItIsInTheURL ", itemToShowBecauseItIsInTheURL);
// console.log("performAnimations ", performAnimations);
} else {
// the parameter tells if we are going forward or back
itemToVisitNext = viewModel.getItemToVisitNext(event.target.id === "ArrowForward");
}
if (itemToVisitNext) {
const viewport = viewModel.getViewPort();
// When performing the animation the View Model needs to know so that it
// can tell other views
viewModel.setAnimationToNextItemRunning(true);
// left and top attributes to give to the map to get to the item
const positionItemToVisitNext = { left: viewport.width / 2 - itemToVisitNext.field_coordinate_x,
top: viewport.height / 2 - itemToVisitNext.field_coordinate_y };
const mapImagePosition = $mapImage.position();
const currentTop = Math.round(mapImagePosition.top);
const currentLeft = Math.round(mapImagePosition.left);
| // The angle of the direction we take to get to the item. Used to rotate the carpet accordingly
const angle = Math.atan2(delta_y, delta_x) * (180 / Math.PI);
if (performAnimations) {
// Rotating the carpet
$carpet.velocity({ transform: ["rotateZ(" + angle + "deg)", "rotateZ(" + previousAngle + "deg)"] },
{ duration: 1000, easing: "linear", loop: false});
} else {
// Rotate the carpet with no animation
$carpet.css("transform", "rotateZ(" + angle + "deg)");
}
previousAngle = angle;
const maxDelta = Math.max(Math.abs(delta_x), Math.abs(delta_y));
// This is to make the carpet stop before covering the image
// We don't want the carpet to be over the item's image
const approachingFactor = maxDelta / 100;
const showingItemAtTheEndOfTheAnimation = () => {
viewModel.setAnimationToNextItemRunning(false);
updateItemOrderNumbers(itemToVisitNext);
viewModel.showItem();
}
if (performAnimations) {
$mapImage.animate({ top: positionItemToVisitNext.top + (delta_y / approachingFactor),
left: positionItemToVisitNext.left + (delta_x / approachingFactor)}, 1500, null,
() => {
showingItemAtTheEndOfTheAnimation();
}
);
} else {
$mapImage.css("top", positionItemToVisitNext.top + (delta_y / approachingFactor));
$mapImage.css("left", positionItemToVisitNext.left + (delta_x / approachingFactor));
showingItemAtTheEndOfTheAnimation();
// Now I can finally reset performAnimations to true to restart doing animations
performAnimations = true;
}
}
}
};
/**
* To update the order number of the item currently visited as shown between the arrows.
* The total number of items is shown as well.
*
* @param item
*/
const updateItemOrderNumbers = (item) => {
if (item)
$itemOrderNumbers.html("<span>" + item.field_order_number + "/" + viewModel.getNumberOfItems() + "</span>");
else
$itemOrderNumbers.html("<span>Click right arrow</span>");
};
/**
* This is about registering handlers for standard events like click
* @memberOf ItemView
*/
const setupStandardEventHandlers = () => {
//console.log("binding events");
$backArrow.bind('click', clickOnArrowHandler);
$forwardArrow.bind('click', clickOnArrowHandler);
};
/**
* registerEventHandlers is the standard name for the function that attaches event handlers
* I'm talking about my custom jquery events
* No standard events like click
* @memberOf ItemView
*/
const registerEventHandlers = () => {
// Hide the arrows only on small screens. On large screens keep them.
const hideNavigationArrows = () => {
if (viewModel.itIsASmallScreen())
$arrowsAndItemOrderNumbers.hide();
};
const showNavigationArrows = () => {
if (!$arrowsAndItemOrderNumbers.is(":visible") && $mapImage.is(":visible"))
$arrowsAndItemOrderNumbers.show();
};
// We have to hide the arrows when the item content dialog is showing
viewModel.attachEventHandler('ViewModel.itemcontent.beingshown', hideNavigationArrows);
// We restore the arrows when the item content dialog is hidden
viewModel.attachEventHandler('ViewModel.itemcontent.beinghidden', showNavigationArrows);
viewModel.attachEventHandler('ViewModel.contactme.beingshown', hideNavigationArrows);
viewModel.attachEventHandler('ViewModel.contactme.beinghidden', showNavigationArrows);
// Going to the home page. Have to hide the map, reset some variables, center the (hidden) map and more
viewModel.attachEventHandler('ViewModel.home.goto', () => {
// Going to the home, hiding everything and resetting some variables
// Moving the map back to the center
$mapImage.css({top: "calc(-3500px + 50vh)", left: "calc(-3500px + 50vw)"});
// Rotating back the carpet to horizontal direction
$carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] },
{ duration: 1000, easing: "linear", loop: false});
$mapImage.hide();
$carpet.hide();
$arrowsAndItemOrderNumbers.hide();
$avatar.hide();
itemToShowBecauseItIsInTheURL = undefined;
previousAngle = 0;
updateItemOrderNumbers(null);
});
// @see ViewModel::requestItemsDetailsFromModel
viewModel.attachEventHandler('ViewModel.map.show', () => {
if (!itemsDetails) {
// Exception! There is a bug here!
Sentry.captureMessage("itemsDetails not defined! ViewModel.map.show --- ItemView");
}
$mapImage.show();
$carpet.show();
$arrowsAndItemOrderNumbers.css('display', 'flex');
$avatar.show();
if (document.location.pathname === "/web-writer-tech-and-humanity") {
moveToStartingPointOfSpiral();
} else {
// If we are showing a specific item, we need to move the carpet to it
itemToShowBecauseItIsInTheURL = viewModel.getItemToShowBecauseItIsInTheURL();
if (itemToShowBecauseItIsInTheURL) {
// When showing an item because the user landed directly on the item's url, we simulate a click on an arrow
// that will move the carpet to the item
clickOnArrowHandler();
}
}
});
};
return {
init: (viewModelToUse) => {
viewModel = viewModelToUse;
performAnimations = true;
cacheJQueryObjects();
setupStandardEventHandlers();
registerEventHandlers();
collectItemDetailsFromMap();
}
}
} | // Differences in x and y we need to travel to get to the item from the current position
const delta_x = (currentLeft - positionItemToVisitNext.left);
const delta_y = (currentTop - positionItemToVisitNext.top);
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.