file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
size_cache_fs.go | package kafero
import (
"encoding/json"
"fmt"
"github.com/wangjia184/sortedset"
"io"
"math"
"os"
"path/filepath"
"sync"
"syscall"
"time"
)
// The SizeCacheFS is a cache file system composed of a cache layer and a base layer
// the cache layer has a maximal size, and files get evicted relative to their
// last use time (read or edited).
// If you change something on the file, need to change on base and cache
// even if cache is stale (invalidated), easier to just do it
type cacheFile struct {
Path string
Size int64
LastAccessTime int64
}
type SizeCacheFS struct {
base Fs
cache Fs
cacheSize int64
cacheTime time.Duration
currSize int64
files *sortedset.SortedSet
cacheL sync.Mutex
}
func NewSizeCacheFS(base Fs, cache Fs, cacheSize int64, cacheTime time.Duration) (*SizeCacheFS, error) {
if cacheSize < 0 {
cacheSize = 0
}
exists, err := Exists(cache, ".cacheindex")
if err != nil {
return nil, fmt.Errorf("error determining if cache index exists: %v", err)
}
var files []*cacheFile
if !exists {
err := Walk(cache, "", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
file := &cacheFile{
Path: path,
Size: info.Size(),
LastAccessTime: info.ModTime().UnixNano() / 1000000,
}
files = append(files, file)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("error building cache index: %v", err)
}
} else {
data, err := ReadFile(cache, ".cacheindex")
if err != nil {
return nil, fmt.Errorf("error reading cache index: %v", err)
}
if err := json.Unmarshal(data, &files); err != nil {
return nil, fmt.Errorf("error unmarshalling files: %v", err)
}
}
var currSize int64 = 0
set := sortedset.New()
for _, f := range files {
set.AddOrUpdate(f.Path, sortedset.SCORE(f.LastAccessTime), f)
currSize += f.Size
}
fs := &SizeCacheFS{
base: base,
cache: cache,
cacheSize: cacheSize,
cacheTime: cacheTime,
currSize: currSize,
files: set,
}
return fs, nil
}
func (u *SizeCacheFS) getCacheFile(name string) (info *cacheFile) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node == nil {
return nil
} else {
return node.Value.(*cacheFile)
}
}
func (u *SizeCacheFS) addToCache(info *cacheFile) error {
u.cacheL.Lock()
defer u.cacheL.Unlock()
// check if we aren't already inside
node := u.files.GetByKey(info.Path)
if node != nil {
file := node.Value.(*cacheFile)
u.currSize -= file.Size
}
// while we can pop files and the cache is full..
for u.currSize > 0 && u.currSize+info.Size > u.cacheSize {
node := u.files.PopMin()
// node CAN'T be nil as currSize > 0
file := node.Value.(*cacheFile)
if err := u.cache.Remove(file.Path); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.currSize -= file.Size
path := filepath.Dir(file.Path)
for path != "" && path != "." && path != "/" {
f, err := u.cache.Open(path)
if err != nil {
_ = f.Close()
return fmt.Errorf("error opening parent directory: %v", err)
}
dirs, err := f.Readdir(-1)
if err != nil {
_ = f.Close()
return fmt.Errorf("error reading parent directory: %v", err)
}
_ = f.Close()
if len(dirs) == 0 {
if err := u.cache.Remove(path); err != nil {
return fmt.Errorf("error removing parent directory: %v", err)
}
path = filepath.Dir(path)
} else {
break
}
}
}
u.files.AddOrUpdate(info.Path, sortedset.SCORE(info.LastAccessTime), info)
u.currSize += info.Size
return nil
}
func (u *SizeCacheFS) removeFromCache(name string) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node != nil {
// If we remove file that is open, the file will re-add itself in
// the cache on close. This is expected behavior as a removed open file
// will re-appear on close ?
u.files.Remove(name)
info := node.Value.(*cacheFile)
u.currSize -= info.Size
}
}
/*
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
*/
func (u *SizeCacheFS) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.cache.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
} else if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
} else {
return cacheMiss, nil, err
}
}
func (u *SizeCacheFS) copyToCache(name string) (*cacheFile, error) {
// If layer file exists, we need to remove it
// and replace it with current file
// TODO
// Get size, if size over our limit, evict one file
bfh, err := u.base.Open(name)
if err != nil {
if err == os.ErrNotExist {
return nil, err
} else {
return nil, fmt.Errorf("error opening base file: %v", err)
}
}
// First make sure the directory exists
exists, err := Exists(u.cache, filepath.Dir(name))
if err != nil {
return nil, err
}
if !exists {
err = u.cache.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return nil, err
}
}
// Create the file on the overlay
lfh, err := u.cache.Create(name)
if err != nil {
return nil, err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, fmt.Errorf("error copying layer to base: %v", err)
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, syscall.EIO
}
isDir := bfi.IsDir()
err = lfh.Close()
if err != nil {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, err
}
if err := bfh.Close(); err != nil {
return nil, fmt.Errorf("error closing base file: %v", err)
}
if err := u.cache.Chtimes(name, bfi.ModTime(), bfi.ModTime()); err != nil {
return nil, err
}
// if cache is stale and file already inside sorted set, we are just going to update it
// Create info
if !isDir {
info := &cacheFile{
Path: name,
Size: bfi.Size(),
LastAccessTime: time.Now().UnixNano() / 1000,
}
return info, nil
} else {
return nil, nil
}
}
func (u *SizeCacheFS) Chtimes(name string, atime, mtime time.Time) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chtimes(name, atime, mtime)
}
return u.base.Chtimes(name, atime, mtime)
}
func (u *SizeCacheFS) Chmod(name string, mode os.FileMode) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chmod(name, mode)
}
return u.base.Chmod(name, mode)
}
func (u *SizeCacheFS) Stat(name string) (os.FileInfo, error) {
return u.base.Stat(name)
}
func (u *SizeCacheFS) Rename(oldname, newname string) error {
exists, err := Exists(u.cache, oldname)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
info := u.getCacheFile(oldname)
u.removeFromCache(oldname)
info.Path = newname
if err := u.addToCache(info); err != nil {
return err
}
if err := u.cache.Rename(oldname, newname); err != nil {
return err
}
}
return u.base.Rename(oldname, newname)
}
func (u *SizeCacheFS) Remove(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return fmt.Errorf("error determining if file exists: %v", err)
}
// If cache file exists, update to ensure consistency
if exists {
if err := u.cache.Remove(name); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.removeFromCache(name)
}
return u.base.Remove(name)
}
func (u *SizeCacheFS) RemoveAll(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
err := Walk(u.cache, name, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return u.Remove(path)
} else {
return nil
}
})
if err != nil {
return err
}
// Remove the dirs
_ = u.cache.RemoveAll(name)
}
return u.base.RemoveAll(name)
}
func (u *SizeCacheFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
exists, err := Exists(u.base, name)
if err != nil {
return nil, fmt.Errorf("error determining if base file exists: %v", err)
}
if exists {
var err error
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
// It is not a dir, we cannot open a non existing dir
info = &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
}
}
var cacheFlag = flag
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
// Force read write mode
cacheFlag = (flag & (^os.O_WRONLY)) | os.O_RDWR
}
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.cache.OpenFile(name, cacheFlag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
uf := NewSizeCacheFile(bfi, lfi, flag, u, info)
return uf, nil
}
func (u *SizeCacheFS) Open(name string) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if !bfi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
case cacheStale:
if !fi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
} | lfile, err := u.cache.Open(name)
if err != nil && bfile == nil {
return nil, err
}
fi, err = u.cache.Stat(name)
if err != nil {
return nil, err
}
uf := NewSizeCacheFile(bfile, lfile, os.O_RDONLY, u, info)
return uf, nil
}
func (u *SizeCacheFS) Mkdir(name string, perm os.FileMode) error {
err := u.base.Mkdir(name, perm)
if err != nil {
return err
}
return u.cache.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
}
func (u *SizeCacheFS) Name() string {
return "SizeCacheFS"
}
func (u *SizeCacheFS) MkdirAll(name string, perm os.FileMode) error {
err := u.base.MkdirAll(name, perm)
if err != nil {
return err
}
return u.cache.MkdirAll(name, perm)
}
func (u *SizeCacheFS) Create(name string) (File, error) {
bfile, err := u.base.Create(name)
if err != nil {
return nil, err
}
lfile, err := u.cache.Create(name)
if err != nil {
// oops, see comment about OS_TRUNC above, should we remove? then we have to
// remember if the file did not exist before
_ = bfile.Close()
return nil, err
}
info := &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
// Ensure file is out
u.removeFromCache(name)
uf := NewSizeCacheFile(bfile, lfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, u, info)
return uf, nil
}
func (u *SizeCacheFS) Size() int64 {
return u.currSize
}
func (u *SizeCacheFS) Close() error {
// TODO close all open files
// Save index
var files []*cacheFile
nodes := u.files.GetByScoreRange(math.MinInt64, math.MaxInt64, nil)
for _, n := range nodes {
f := n.Value.(*cacheFile)
files = append(files, f)
}
data, err := json.Marshal(files)
if err != nil {
return fmt.Errorf("error marshalling files: %v", err)
}
if err := WriteFile(u.cache, ".cacheindex", data, 0644); err != nil {
return fmt.Errorf("error writing cache index: %v", err)
}
return nil
} |
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name) | random_line_split |
size_cache_fs.go | package kafero
import (
"encoding/json"
"fmt"
"github.com/wangjia184/sortedset"
"io"
"math"
"os"
"path/filepath"
"sync"
"syscall"
"time"
)
// The SizeCacheFS is a cache file system composed of a cache layer and a base layer
// the cache layer has a maximal size, and files get evicted relative to their
// last use time (read or edited).
// If you change something on the file, need to change on base and cache
// even if cache is stale (invalidated), easier to just do it
type cacheFile struct {
Path string
Size int64
LastAccessTime int64
}
type SizeCacheFS struct {
base Fs
cache Fs
cacheSize int64
cacheTime time.Duration
currSize int64
files *sortedset.SortedSet
cacheL sync.Mutex
}
func NewSizeCacheFS(base Fs, cache Fs, cacheSize int64, cacheTime time.Duration) (*SizeCacheFS, error) {
if cacheSize < 0 {
cacheSize = 0
}
exists, err := Exists(cache, ".cacheindex")
if err != nil {
return nil, fmt.Errorf("error determining if cache index exists: %v", err)
}
var files []*cacheFile
if !exists {
err := Walk(cache, "", func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
file := &cacheFile{
Path: path,
Size: info.Size(),
LastAccessTime: info.ModTime().UnixNano() / 1000000,
}
files = append(files, file)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("error building cache index: %v", err)
}
} else {
data, err := ReadFile(cache, ".cacheindex")
if err != nil {
return nil, fmt.Errorf("error reading cache index: %v", err)
}
if err := json.Unmarshal(data, &files); err != nil {
return nil, fmt.Errorf("error unmarshalling files: %v", err)
}
}
var currSize int64 = 0
set := sortedset.New()
for _, f := range files {
set.AddOrUpdate(f.Path, sortedset.SCORE(f.LastAccessTime), f)
currSize += f.Size
}
fs := &SizeCacheFS{
base: base,
cache: cache,
cacheSize: cacheSize,
cacheTime: cacheTime,
currSize: currSize,
files: set,
}
return fs, nil
}
func (u *SizeCacheFS) getCacheFile(name string) (info *cacheFile) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node == nil {
return nil
} else {
return node.Value.(*cacheFile)
}
}
func (u *SizeCacheFS) addToCache(info *cacheFile) error {
u.cacheL.Lock()
defer u.cacheL.Unlock()
// check if we aren't already inside
node := u.files.GetByKey(info.Path)
if node != nil {
file := node.Value.(*cacheFile)
u.currSize -= file.Size
}
// while we can pop files and the cache is full..
for u.currSize > 0 && u.currSize+info.Size > u.cacheSize {
node := u.files.PopMin()
// node CAN'T be nil as currSize > 0
file := node.Value.(*cacheFile)
if err := u.cache.Remove(file.Path); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.currSize -= file.Size
path := filepath.Dir(file.Path)
for path != "" && path != "." && path != "/" {
f, err := u.cache.Open(path)
if err != nil {
_ = f.Close()
return fmt.Errorf("error opening parent directory: %v", err)
}
dirs, err := f.Readdir(-1)
if err != nil {
_ = f.Close()
return fmt.Errorf("error reading parent directory: %v", err)
}
_ = f.Close()
if len(dirs) == 0 {
if err := u.cache.Remove(path); err != nil {
return fmt.Errorf("error removing parent directory: %v", err)
}
path = filepath.Dir(path)
} else {
break
}
}
}
u.files.AddOrUpdate(info.Path, sortedset.SCORE(info.LastAccessTime), info)
u.currSize += info.Size
return nil
}
func (u *SizeCacheFS) removeFromCache(name string) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node != nil {
// If we remove file that is open, the file will re-add itself in
// the cache on close. This is expected behavior as a removed open file
// will re-appear on close ?
u.files.Remove(name)
info := node.Value.(*cacheFile)
u.currSize -= info.Size
}
}
/*
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
*/
func (u *SizeCacheFS) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.cache.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
} else if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
} else {
return cacheMiss, nil, err
}
}
func (u *SizeCacheFS) copyToCache(name string) (*cacheFile, error) {
// If layer file exists, we need to remove it
// and replace it with current file
// TODO
// Get size, if size over our limit, evict one file
bfh, err := u.base.Open(name)
if err != nil {
if err == os.ErrNotExist {
return nil, err
} else {
return nil, fmt.Errorf("error opening base file: %v", err)
}
}
// First make sure the directory exists
exists, err := Exists(u.cache, filepath.Dir(name))
if err != nil {
return nil, err
}
if !exists {
err = u.cache.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return nil, err
}
}
// Create the file on the overlay
lfh, err := u.cache.Create(name)
if err != nil {
return nil, err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, fmt.Errorf("error copying layer to base: %v", err)
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, syscall.EIO
}
isDir := bfi.IsDir()
err = lfh.Close()
if err != nil {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, err
}
if err := bfh.Close(); err != nil {
return nil, fmt.Errorf("error closing base file: %v", err)
}
if err := u.cache.Chtimes(name, bfi.ModTime(), bfi.ModTime()); err != nil {
return nil, err
}
// if cache is stale and file already inside sorted set, we are just going to update it
// Create info
if !isDir {
info := &cacheFile{
Path: name,
Size: bfi.Size(),
LastAccessTime: time.Now().UnixNano() / 1000,
}
return info, nil
} else {
return nil, nil
}
}
func (u *SizeCacheFS) Chtimes(name string, atime, mtime time.Time) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chtimes(name, atime, mtime)
}
return u.base.Chtimes(name, atime, mtime)
}
func (u *SizeCacheFS) Chmod(name string, mode os.FileMode) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chmod(name, mode)
}
return u.base.Chmod(name, mode)
}
func (u *SizeCacheFS) Stat(name string) (os.FileInfo, error) {
return u.base.Stat(name)
}
func (u *SizeCacheFS) Rename(oldname, newname string) error {
exists, err := Exists(u.cache, oldname)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
info := u.getCacheFile(oldname)
u.removeFromCache(oldname)
info.Path = newname
if err := u.addToCache(info); err != nil {
return err
}
if err := u.cache.Rename(oldname, newname); err != nil {
return err
}
}
return u.base.Rename(oldname, newname)
}
func (u *SizeCacheFS) Remove(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return fmt.Errorf("error determining if file exists: %v", err)
}
// If cache file exists, update to ensure consistency
if exists {
if err := u.cache.Remove(name); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.removeFromCache(name)
}
return u.base.Remove(name)
}
func (u *SizeCacheFS) RemoveAll(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
err := Walk(u.cache, name, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return u.Remove(path)
} else {
return nil
}
})
if err != nil {
return err
}
// Remove the dirs
_ = u.cache.RemoveAll(name)
}
return u.base.RemoveAll(name)
}
func (u *SizeCacheFS) | (name string, flag int, perm os.FileMode) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
exists, err := Exists(u.base, name)
if err != nil {
return nil, fmt.Errorf("error determining if base file exists: %v", err)
}
if exists {
var err error
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
// It is not a dir, we cannot open a non existing dir
info = &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
}
}
var cacheFlag = flag
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
// Force read write mode
cacheFlag = (flag & (^os.O_WRONLY)) | os.O_RDWR
}
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.cache.OpenFile(name, cacheFlag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
uf := NewSizeCacheFile(bfi, lfi, flag, u, info)
return uf, nil
}
func (u *SizeCacheFS) Open(name string) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if !bfi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
case cacheStale:
if !fi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
}
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
lfile, err := u.cache.Open(name)
if err != nil && bfile == nil {
return nil, err
}
fi, err = u.cache.Stat(name)
if err != nil {
return nil, err
}
uf := NewSizeCacheFile(bfile, lfile, os.O_RDONLY, u, info)
return uf, nil
}
func (u *SizeCacheFS) Mkdir(name string, perm os.FileMode) error {
err := u.base.Mkdir(name, perm)
if err != nil {
return err
}
return u.cache.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
}
func (u *SizeCacheFS) Name() string {
return "SizeCacheFS"
}
func (u *SizeCacheFS) MkdirAll(name string, perm os.FileMode) error {
err := u.base.MkdirAll(name, perm)
if err != nil {
return err
}
return u.cache.MkdirAll(name, perm)
}
func (u *SizeCacheFS) Create(name string) (File, error) {
bfile, err := u.base.Create(name)
if err != nil {
return nil, err
}
lfile, err := u.cache.Create(name)
if err != nil {
// oops, see comment about OS_TRUNC above, should we remove? then we have to
// remember if the file did not exist before
_ = bfile.Close()
return nil, err
}
info := &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
// Ensure file is out
u.removeFromCache(name)
uf := NewSizeCacheFile(bfile, lfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, u, info)
return uf, nil
}
func (u *SizeCacheFS) Size() int64 {
return u.currSize
}
func (u *SizeCacheFS) Close() error {
// TODO close all open files
// Save index
var files []*cacheFile
nodes := u.files.GetByScoreRange(math.MinInt64, math.MaxInt64, nil)
for _, n := range nodes {
f := n.Value.(*cacheFile)
files = append(files, f)
}
data, err := json.Marshal(files)
if err != nil {
return fmt.Errorf("error marshalling files: %v", err)
}
if err := WriteFile(u.cache, ".cacheindex", data, 0644); err != nil {
return fmt.Errorf("error writing cache index: %v", err)
}
return nil
}
| OpenFile | identifier_name |
ikey.js | /*
Fluid Project
Copyright (c) 2006, 2007 University of Toronto. All rights reserved.
Licensed under the Educational Community License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.osedu.org/licenses/ECL-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Adaptive Technology Resource Centre, University of Toronto
130 St. George St., Toronto, Ontario, Canada
Telephone: (416) 978-4360
*/
/**
* Allows you to resort elements within a sortable container by using the keyboard. Requires
* the Draggables, Droppables and Sortables interface plugins. The container and each item inside
* the container must have an ID. Sortables are especially useful for lists.
*
* @see Plugins/Interface/Draggable
* @see Plugins/Interface/Droppable
* @see Plugins/Interface/Sortable
* @author Joshua Ryan
* @author Colin Clark
* @name Keyable
* @cat Plugins/Interface
* @option String accept The class name for items inside the container (mandatory)
* @option String activeclass The class for the container when one of its items has started to move
* @option String hoverclass The class for the container when an acceptable item is inside it
* @option String helperclass The helper is used to point to the place where the item will be
* moved. This is the class for the helper.
* @option Function onChange Callback that gets called when the sortable list changed. It takes
* an array of serialized elements
* @option String axis Use 'horizontally' or 'vertically' to constrain dragging to an axis
* @option DOMElement domNode The conatainer of keyable items
* @option Function onStart Callback function triggered when the dragging starts
* @option Function onStop Callback function triggered when the dragging stops
* @example $('ul').Keyable(
* {
* accept : 'sortableitem',
* activeclass : 'sortableactive',
* hoverclass : 'sortablehover',
* helperclass : 'sorthelper',
* domNode : $('ul').get(0)
* }
* )
*/
jQuery.iKey = {
// The node focused on for incoming actions
focusedNode : null,
// Sets the mode of keying vs mousing
keying : false,
/**
* Process down arrow events
*/
handleDownAction : function (isCtrl, event) {
var target = jQuery(jQuery.iKey.focusedNode).next();
var wrap;
if (!target || !this.isElement(target.get(0))) {
target = jQuery(jQuery.iKey.firstElement(
jQuery.iKey.focusedNode.get(0).parentNode)
);
wrap = true;
}
if (!isCtrl) |
else if (!wrap) {
jQuery(target).after(jQuery.iKey.focusedNode);
}
else {
jQuery(target).before(jQuery.iKey.focusedNode);
}
},
/**
* Process up arrow events
*/
handleUpAction : function(isCtrl, event) {
var target = jQuery(jQuery.iKey.focusedNode).prev();
var wrap = false;
if (!target || !this.isElement(target.get(0))) {
target = jQuery(jQuery.iKey.lastElement(
jQuery.iKey.focusedNode.get(0).parentNode)
);
wrap = true;
}
if (!isCtrl) {
this.focusNode(target, event);
}
else if (!wrap) {
jQuery(target).before(jQuery.iKey.focusedNode);
}
else {
jQuery(target).after(jQuery.iKey.focusedNode);
}
},
/**
* 'Focus' on a node to be the focus of future actions
*/
focusNode : function(aNode, event) {
// deselect any previously focused node
jQuery.iKey.deselectFocusedNode(event);
jQuery.iKey.focusedNode = aNode;
jQuery(aNode).removeClass(event.data.accept);
jQuery(aNode).addClass(event.data.activeclass);
},
/**
* 'Select' the focused node, similar to a user 'clicking' on an item for drag and drop
*/
selectFocusedNode : function(event) {
//if we are not in keyboard sort mode, set things up
if (jQuery.iKey.focusedNode == null) {
jQuery.iKey.focusedNode = jQuery('.' + event.data.accept, event.data.domNode).get(0);
}
if (jQuery.iKey.keying == true) {
jQuery.iKey.focusNode(jQuery.iKey.focusedNode, event);
}
},
/**
* Deselect the current selected node, similar to releasing the mouse button
*/
deselectFocusedNode : function(event) {
if (jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.activeclass);
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.hoverclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.accept);
jQuery.iKey.focusedNode = null;
}
},
/**
* End keyboard mode, for use when users switches to using the mouse for DnD type activities
*/
endKeyboardMode : function(event) {
if (jQuery.iKey.keying) {
jQuery.iKey.deselectFocusedNode(event);
jQuery(document)
.unbind('mousemove', jQuery.iKey.endKeyboardMode)
.unbind('mousedown', jQuery.iKey.endKeyboardMode);
}
jQuery.iKey.keying = false;
},
/**
* Change state from that of selecting a node to being ready to actually move the current node
*/
handleKeyDown : function (event) {
if (event.ctrlKey && jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.activeclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.hoverclass);
}
},
/**
* Change state from that of being ready to move a node to that of selecting a node from the list
*/
handleKeyUp : function (event) {
kCode = event.keyCode || event.which;
if (kCode == 17 && jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.hoverclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.activeclass);
}
},
/**
* Handle arrow key presses, could be either moving through the list to select a node or moving a node
*/
handleArrowKeyPress : function (event) {
kCode = event.keyCode || event.which;
// Pass any input other then arrow keys onto other event handlers
if (kCode < 37 || kCode > 40) {
return true;
}
// Listen for mouse actions to end keyboard mode
if (!jQuery.iKey.keying) {
jQuery.iKey.keying = true;
jQuery(document)
.bind('mousemove', event.data, jQuery.iKey.endKeyboardMode)
.bind('mousedown', event.data, jQuery.iKey.endKeyboardMode);
}
// Ensure a focused node
while (!jQuery.iKey.focusedNode) {
jQuery.iKey.selectFocusedNode(event);
}
// down arrow
if (kCode == 40 && (!event.data.axis || event.data.axis == 'vertically')) {
jQuery.iKey.handleDownAction(event.ctrlKey, event);
}
// up arrow
else if (kCode == 38 && (!event.data.axis || event.data.axis == 'vertically')) {
jQuery.iKey.handleUpAction(event.ctrlKey, event);
}
// right arrow
else if (kCode == 39 && (event.data.axis || event.data.axis == 'horizontally')) {
jQuery.iKey.handleDownAction(event.ctrlKey, event);
}
// left arrow
else if (kCode == 37 && (event.data.axis || event.data.axis == 'horizontally')) {
jQuery.iKey.handleUpAction(event.ctrlKey, event);
}
else {
return true;
}
return false;
},
/**
* Gets the first Element of a nodes child node list
*/
firstElement : function(node) {
var child = node.firstChild;
while (!this.isElement(child)) {
return child = child.nextSibling;
}
return child;
},
/**
* Gets the last Element of a nodes child node list
*/
lastElement : function(node) {
var child = node.lastChild;
while (!this.isElement(child)) {
child = child.previousSibling;
}
return child;
},
/**
* tests if the passed in node is an Element
*/
isElement : function(node) {
return node && node.nodeType == 1;
},
/**
* Builds the Keyable with the set parameters and binds all neeeded events.
*
* Gets called when ever a Keyable is created.
*/
build : function(o) {
if (!o) {
o = {};
}
return this.each(
function() {
if (this.isKeyable || !jQuery.iUtil) {
return;
}
var el = this;
var dhe = jQuery(this);
if (jQuery.browser.msie) {
dhe.each(
function() {
this.unselectable = "on";
}
);
}
else {
dhe.css('-moz-user-select', 'none');
dhe.css('user-select', 'none');
dhe.css('-khtml-user-select', 'none');
}
this.keyCfg = {
domNode : o.domNode ? o.domNode : false,
accept : o.accept || false,
activeclass : o.activeclass || false,
hoverclass : o.hoverclass || false,
helperclass : o.helperclass || false,
axis : /vertically|horizontally/.test(o.axis) ? o.axis : false,
onStart : o.onStart || o.onstart || false,
onStop : o.onStop || o.onstop || false
};
dhe.each(
function() {
jQuery(this).bind('keypress', el.keyCfg, jQuery.iKey.handleArrowKeyPress);
jQuery(this).bind('keydown', el.keyCfg, jQuery.iKey.handleKeyDown);
jQuery(this).bind('keyup', el.keyCfg, jQuery.iKey.handleKeyUp);
jQuery(this).bind('onfocus', el.keyCfg, jQuery.iKey.selectFocusedNode);
jQuery(this).bind('focus', el.keyCfg, jQuery.iKey.selectFocusedNode);
jQuery(this).bind('onblur', el.keyCfg, jQuery.iKey.endKeyboardMode);
jQuery(this).bind('onclick', el.keyCfg, jQuery.iKey.endKeyboardMode);
jQuery(this).bind('onmousedown', el.keyCfg, jQuery.iKey.endKeyboardMode);
jQuery(this).bind('onmousemove', el.keyCfg, jQuery.iKey.endKeyboardMode);
}
);
}
);
}
};
/**
* Destroy an existing draggable on a collection of elements
*
* @name DraggableDestroy
* @descr Destroy a draggable
* @type jQuery
* @cat Plugins/Interface
* @example $('#drag2').DraggableDestroy();
*/
jQuery.fn.extend(
{
KeyableDestroy : jQuery.iKey.destroy,
Keyable : jQuery.iKey.build
}
);
| {
this.focusNode(target, event);
} | conditional_block |
ikey.js | /*
Fluid Project
Copyright (c) 2006, 2007 University of Toronto. All rights reserved.
Licensed under the Educational Community License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.osedu.org/licenses/ECL-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Adaptive Technology Resource Centre, University of Toronto
130 St. George St., Toronto, Ontario, Canada
Telephone: (416) 978-4360
*/
/**
* Allows you to resort elements within a sortable container by using the keyboard. Requires
* the Draggables, Droppables and Sortables interface plugins. The container and each item inside
* the container must have an ID. Sortables are especially useful for lists.
*
* @see Plugins/Interface/Draggable
* @see Plugins/Interface/Droppable
* @see Plugins/Interface/Sortable
* @author Joshua Ryan
* @author Colin Clark
* @name Keyable
* @cat Plugins/Interface
* @option String accept The class name for items inside the container (mandatory)
* @option String activeclass The class for the container when one of its items has started to move
* @option String hoverclass The class for the container when an acceptable item is inside it
* @option String helperclass The helper is used to point to the place where the item will be
* moved. This is the class for the helper.
* @option Function onChange Callback that gets called when the sortable list changed. It takes
* an array of serialized elements
* @option String axis Use 'horizontally' or 'vertically' to constrain dragging to an axis
* @option DOMElement domNode The conatainer of keyable items
* @option Function onStart Callback function triggered when the dragging starts
* @option Function onStop Callback function triggered when the dragging stops
* @example $('ul').Keyable(
* {
* accept : 'sortableitem',
* activeclass : 'sortableactive',
* hoverclass : 'sortablehover',
* helperclass : 'sorthelper',
* domNode : $('ul').get(0)
* }
* )
*/
jQuery.iKey = {
// The node focused on for incoming actions
focusedNode : null,
// Sets the mode of keying vs mousing
keying : false,
/**
* Process down arrow events
*/
handleDownAction : function (isCtrl, event) {
var target = jQuery(jQuery.iKey.focusedNode).next();
var wrap;
if (!target || !this.isElement(target.get(0))) {
target = jQuery(jQuery.iKey.firstElement(
jQuery.iKey.focusedNode.get(0).parentNode)
);
wrap = true;
}
if (!isCtrl) {
this.focusNode(target, event);
}
else if (!wrap) {
jQuery(target).after(jQuery.iKey.focusedNode);
}
else {
jQuery(target).before(jQuery.iKey.focusedNode);
}
},
/**
* Process up arrow events
*/
handleUpAction : function(isCtrl, event) {
var target = jQuery(jQuery.iKey.focusedNode).prev();
var wrap = false;
if (!target || !this.isElement(target.get(0))) {
target = jQuery(jQuery.iKey.lastElement(
jQuery.iKey.focusedNode.get(0).parentNode)
);
wrap = true;
}
if (!isCtrl) {
this.focusNode(target, event);
}
else if (!wrap) {
jQuery(target).before(jQuery.iKey.focusedNode);
}
else {
jQuery(target).after(jQuery.iKey.focusedNode);
}
},
/**
* 'Focus' on a node to be the focus of future actions
*/
focusNode : function(aNode, event) {
// deselect any previously focused node
jQuery.iKey.deselectFocusedNode(event);
jQuery.iKey.focusedNode = aNode;
jQuery(aNode).removeClass(event.data.accept);
jQuery(aNode).addClass(event.data.activeclass);
},
/**
* 'Select' the focused node, similar to a user 'clicking' on an item for drag and drop
*/
selectFocusedNode : function(event) {
//if we are not in keyboard sort mode, set things up
if (jQuery.iKey.focusedNode == null) {
jQuery.iKey.focusedNode = jQuery('.' + event.data.accept, event.data.domNode).get(0);
}
if (jQuery.iKey.keying == true) {
jQuery.iKey.focusNode(jQuery.iKey.focusedNode, event);
}
},
/**
* Deselect the current selected node, similar to releasing the mouse button
*/
deselectFocusedNode : function(event) {
if (jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.activeclass);
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.hoverclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.accept);
jQuery.iKey.focusedNode = null;
}
},
/**
* End keyboard mode, for use when users switches to using the mouse for DnD type activities
*/
endKeyboardMode : function(event) {
if (jQuery.iKey.keying) {
jQuery.iKey.deselectFocusedNode(event);
jQuery(document)
.unbind('mousemove', jQuery.iKey.endKeyboardMode)
.unbind('mousedown', jQuery.iKey.endKeyboardMode);
}
jQuery.iKey.keying = false;
},
/**
* Change state from that of selecting a node to being ready to actually move the current node
*/
handleKeyDown : function (event) {
if (event.ctrlKey && jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.activeclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.hoverclass);
}
},
/**
* Change state from that of being ready to move a node to that of selecting a node from the list
*/
handleKeyUp : function (event) {
kCode = event.keyCode || event.which;
if (kCode == 17 && jQuery.iKey.focusedNode != null) {
jQuery(jQuery.iKey.focusedNode).removeClass(event.data.hoverclass);
jQuery(jQuery.iKey.focusedNode).addClass(event.data.activeclass);
}
},
/**
* Handle arrow key presses, could be either moving through the list to select a node or moving a node
*/
handleArrowKeyPress : function (event) {
kCode = event.keyCode || event.which;
// Pass any input other then arrow keys onto other event handlers
if (kCode < 37 || kCode > 40) {
return true;
}
// Listen for mouse actions to end keyboard mode
if (!jQuery.iKey.keying) {
jQuery.iKey.keying = true;
jQuery(document)
.bind('mousemove', event.data, jQuery.iKey.endKeyboardMode)
.bind('mousedown', event.data, jQuery.iKey.endKeyboardMode);
}
// Ensure a focused node
while (!jQuery.iKey.focusedNode) {
jQuery.iKey.selectFocusedNode(event);
}
// down arrow
if (kCode == 40 && (!event.data.axis || event.data.axis == 'vertically')) {
jQuery.iKey.handleDownAction(event.ctrlKey, event);
}
// up arrow
else if (kCode == 38 && (!event.data.axis || event.data.axis == 'vertically')) {
jQuery.iKey.handleUpAction(event.ctrlKey, event);
}
// right arrow
else if (kCode == 39 && (event.data.axis || event.data.axis == 'horizontally')) {
jQuery.iKey.handleDownAction(event.ctrlKey, event);
}
// left arrow
else if (kCode == 37 && (event.data.axis || event.data.axis == 'horizontally')) {
jQuery.iKey.handleUpAction(event.ctrlKey, event);
}
else {
return true;
}
return false;
},
/**
* Gets the first Element of a nodes child node list
*/
firstElement : function(node) {
var child = node.firstChild;
while (!this.isElement(child)) {
return child = child.nextSibling;
}
return child;
},
/**
* Gets the last Element of a nodes child node list
*/
lastElement : function(node) {
var child = node.lastChild;
while (!this.isElement(child)) {
child = child.previousSibling;
}
return child;
},
/**
* tests if the passed in node is an Element
*/
isElement : function(node) {
return node && node.nodeType == 1;
},
/**
* Builds the Keyable with the set parameters and binds all neeeded events.
*
* Gets called when ever a Keyable is created.
*/
build : function(o) {
if (!o) {
o = {};
}
return this.each(
function() {
if (this.isKeyable || !jQuery.iUtil) {
return;
}
var el = this;
var dhe = jQuery(this);
if (jQuery.browser.msie) {
dhe.each(
function() {
this.unselectable = "on";
}
); | dhe.css('-khtml-user-select', 'none');
}
this.keyCfg = {
domNode : o.domNode ? o.domNode : false,
accept : o.accept || false,
activeclass : o.activeclass || false,
hoverclass : o.hoverclass || false,
helperclass : o.helperclass || false,
axis : /vertically|horizontally/.test(o.axis) ? o.axis : false,
onStart : o.onStart || o.onstart || false,
onStop : o.onStop || o.onstop || false
};
dhe.each(
function() {
jQuery(this).bind('keypress', el.keyCfg, jQuery.iKey.handleArrowKeyPress);
jQuery(this).bind('keydown', el.keyCfg, jQuery.iKey.handleKeyDown);
jQuery(this).bind('keyup', el.keyCfg, jQuery.iKey.handleKeyUp);
jQuery(this).bind('onfocus', el.keyCfg, jQuery.iKey.selectFocusedNode);
jQuery(this).bind('focus', el.keyCfg, jQuery.iKey.selectFocusedNode);
jQuery(this).bind('onblur', el.keyCfg, jQuery.iKey.endKeyboardMode);
jQuery(this).bind('onclick', el.keyCfg, jQuery.iKey.endKeyboardMode);
jQuery(this).bind('onmousedown', el.keyCfg, jQuery.iKey.endKeyboardMode);
jQuery(this).bind('onmousemove', el.keyCfg, jQuery.iKey.endKeyboardMode);
}
);
}
);
}
};
/**
* Destroy an existing draggable on a collection of elements
*
* @name DraggableDestroy
* @descr Destroy a draggable
* @type jQuery
* @cat Plugins/Interface
* @example $('#drag2').DraggableDestroy();
*/
jQuery.fn.extend(
{
KeyableDestroy : jQuery.iKey.destroy,
Keyable : jQuery.iKey.build
}
); | }
else {
dhe.css('-moz-user-select', 'none');
dhe.css('user-select', 'none'); | random_line_split |
b_get_data.py | # -*- coding: UTF-8 -*-
""" Main lib for wmillfailprev Project
"""
import os
import pandas as pd
import numpy as np
import datetime
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import metrics, model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC | pd.set_option('display.width', 200)
def get_data(file):
'file = full path of the original file. obter os dataframes dos ficheiros'
df = pd.read_csv(file, sep=';')
df = time_transform(df)
df = timestamp_round_down(df)
df = df.bfill()
return df
def logs_cols_uniform(df):
'A partir de uma lista de draframe uniformizar os nomes relacionados com tempo e turbine_ID'
df = df.rename(columns={'TimeDetected': 'Timestamp', 'UnitTitle':'Turbine_ID'})
return df
def timestamp_round_down(df, time_column='Timestamp'):
'Arredondar os intervalos de tempo para os 10 minutos anteriores'
df[time_column] = df.apply(lambda x: x[time_column] - datetime.timedelta(minutes=x[time_column].minute % 10,seconds=x[time_column].second, microseconds=x[time_column].microsecond),axis=1)
return df
def time_transform(df, time_column='Timestamp'):
'Transformar as colunas referentes a tempo no data type tempo'
df[time_column] = pd.to_datetime(df[time_column]).dt.tz_convert(None)
# df[time_column] = df[time_column]
return df
def component(component, col):
pair_comp_col=[]
for i in col:
if component in i:
pair_comp_col.append(i)
return pair_comp_col
def component_df_creation(df):
# Retornar dataframes por tipo de componente
time_id = ['Timestamp', 'Turbine_ID']
pair_hyd = component('Hyd', df.columns)
pair_trafo = component('Trafo', df.columns)
pair_gear = component('Gear', df.columns)
pair_gen = component('Gen', df.columns)
pair_rot = component('Rtr', df.columns)
pair_amb = component('Amb', df.columns)
pair_blds = component('Blds', df.columns)
pair_cont = component('Cont', df.columns)
pair_nac = component('Nac', df.columns)
pair_spin = component('Spin', df.columns)
pair_bus = component('Busbar', df.columns)
pair_vol = component('Volt', df.columns)
#Create DF for each component
df_generator = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_gen_bear = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_transformer = df[time_id + pair_trafo + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_hydraulic = df[time_id + pair_hyd + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_gearbox = df[time_id + pair_gear + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
return df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox
def fail_dummies(df):
'''Uniformização da tabela de logs e transformação com get_dummies'''
# Colunas a manter
fail_cols_manter = ['Timestamp', 'Turbine_ID', 'Component']
df = df[fail_cols_manter]
# transformação de Get_Dummies
df = pd.get_dummies(df, columns=['Component'])
return df
def sig_fail_merge_dfs(sig_df, fail_df, component):
'fazer o merge com o failures e desevolver o já dummyfied'
#filtrar o componente
fail_df = fail_df[fail_df['Component'] == component]
# aplicar o dummies
fail_df = fail_dummies(fail_df)
# fazer o merge
df_merged = sig_df.merge(fail_df, on=['Timestamp','Turbine_ID'], how='outer')
# colocar zeros
df_merged.rename(columns= {'Component_' + component:'Component'}, inplace=True)
df_merged['Component'] = df_merged['Component'].fillna(0)
df_merged = df_merged.sort_values(by=['Turbine_ID','Timestamp'])
df_merged.fillna(0, inplace=True)
return df_merged
def fill_na_by_turbine(df, turbines_list):
df_ = pd.DataFrame(columns=df.columns, dtype='int64')
for turbine in turbines_list:
df1 = df.loc[df['Turbine_ID']==turbine]
if df1['Component'].nunique()>1:
index = df1[df1['Component']==1]
index['date'] = index['Timestamp']
index = index[['date','Timestamp', 'Turbine_ID']]
df_merged = df1.merge(index, how='left', on=['Turbine_ID','Timestamp'])
df_merged = df_merged.fillna(method='bfill')
#If there is not a failure after, hold present date
df_merged['date'] = df_merged['date'].fillna(df_merged['Timestamp'])
df_merged['TTF'] = round((df_merged['date'] - df_merged['Timestamp']) / np.timedelta64(1, 'D'),0)
df_merged = df_merged.fillna(method='Bfill')
else:
df_merged = df1
df_merged['date'] = df_merged['Timestamp']
df_merged['TTF'] = 0 # df_merged['date'] - df_merged['Timestamp']
# df_merged = df_merged.fillna(method='Bfill')
#Drop Column Date
df_final = df_merged.drop(columns='date')
#df_final['TTF'] = df_final['TTF'].dt.days
df_ = pd.concat([df_, df_final])
df_['Timestamp'] = pd.to_datetime(df_['Timestamp'])
return df_
def fill_na_by_turb_predict(df, turbines_list):
df = df.fillna(method='bfill')
return df
def Failure_Time_Horizon(days, period):
if 2 <= days <= period:
Flag=1
else:
Flag=0
return Flag
def aplic_var_target(df, period):
nome = str(period)
nome = nome+'_days'
df[nome] = df.apply(lambda x: Failure_Time_Horizon(x['TTF'], period),axis=1)
return df
def group_por_frequency(df, period='Dia', strategy='mean'):
'Função para agregar o data-frame pela medida de tempo pretendida, periodo _Dia_ ou _Hora_'
if period == 'Dia':
df['Date'] = df['Timestamp'].dt.date
elif period == 'Hora':
df['Date'] = df.apply(lambda x: x['Timestamp'] - datetime.timedelta(hours=x['Timestamp'].hour % -1, minutes=x['Timestamp'].minute, seconds=x['Timestamp'].second, microseconds=x['Timestamp'].microsecond),axis=1)
else:
print('Medida de tempo não suportada')
if strategy == 'max':
df = df.groupby(by=['Turbine_ID','Date']).max().reset_index().drop(columns='Timestamp')
else:
df = df.groupby(by=['Turbine_ID','Date']).mean().reset_index()
df['Date'] = pd.to_datetime(df['Date'])
return df
def add_features(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[5:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def add_feat_predict(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[2:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def prepare_test_df(df, meses=3):
if 'Timestamp' in df.keys():
last_date = df['Timestamp'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_test = df[df['Timestamp'] >= split]
else:
last_date = df['Date'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_test = df[df['Date'] >= split]
return df_test
def prepare_train_df(df, meses=3):
if 'Timestamp' in df.keys():
last_date = df['Timestamp'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_train = df[df['Timestamp'] < split]
else:
last_date = df['Date'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_train = df[df['Date'] < split]
# df_test = df[df['Timestamp'] >= split]
return df_train
if __name__ == "__main__":
print('001 - Obtendo os dados')
# Obter o caminho dos ficheiros.
root_dir = os.path.abspath('..')
csv_path = os.path.join(root_dir, 'rawdata')
# Importar o dataset de failures
failures_path = os.path.join(csv_path, 'wind-farm-1-failures-training.csv')
failures_df = get_data(failures_path)
# Importar o dataset de signals
signals_path = os.path.join(csv_path, 'wind-farm-1-signals-training.csv')
signals_df = get_data(signals_path)
# Cortar colunas que não têm valores
cols_to_drop = ['Prod_LatestAvg_ActPwrGen2', 'Prod_LatestAvg_ReactPwrGen2']
signals_df = signals_df.drop(columns=cols_to_drop)
print('002 - Criar o dicionário com os Dataframes originais')
df_dict = {'failures_df':failures_df, 'signals_df':signals_df}
print('003 - Criar os datasets por componentes')
df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox = component_df_creation(signals_df)
print('004 - Criar o dicionário de datasets por componentes')
comp_df_dict = {'df_generator': df_generator,'df_hydraulic': df_hydraulic,'df_gen_bear': df_gen_bear,'df_transformer': df_transformer,'df_gearbox': df_gearbox}
print('005 - fazer cópia dos dataframes')
comp_prep_df_dict = comp_df_dict.copy()
print('006 - Merge com o dataframe de falhas')
component_list = ['GENERATOR', 'HYDRAULIC_GROUP', 'GENERATOR_BEARING', 'TRANSFORMER','GEARBOX']
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = sig_fail_merge_dfs(sig_df=comp_prep_df_dict[key],fail_df=failures_df,component=component_list[i])
print('007 - Fillna by turbine')
turbine_list = ['T11', 'T06', 'T01', 'T09', 'T07']
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = fill_na_by_turbine(comp_prep_df_dict[key],turbine_list)
print('008 - Criação da variável alvo')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = aplic_var_target(comp_prep_df_dict[key], 60)
print('009 - retirar as colunas que não se relacionam com a variavel alvo')
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = comp_prep_df_dict[key].drop(columns=feat_drop_list[i])
print('010 - agrupar pela medida de tempo seleccionada')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = group_por_frequency(comp_prep_df_dict[key], period='Dia')
print('011 - Adicionar medidas de alisamento')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = add_features(comp_prep_df_dict[key], rolling_win_size=10)
print(comp_prep_df_dict[key].shape) | from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss, roc_curve, precision_score, recall_score,confusion_matrix,f1_score,fbeta_score, make_scorer
| random_line_split |
b_get_data.py | # -*- coding: UTF-8 -*-
""" Main lib for wmillfailprev Project
"""
import os
import pandas as pd
import numpy as np
import datetime
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import metrics, model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss, roc_curve, precision_score, recall_score,confusion_matrix,f1_score,fbeta_score, make_scorer
pd.set_option('display.width', 200)
def get_data(file):
'file = full path of the original file. obter os dataframes dos ficheiros'
df = pd.read_csv(file, sep=';')
df = time_transform(df)
df = timestamp_round_down(df)
df = df.bfill()
return df
def logs_cols_uniform(df):
'A partir de uma lista de draframe uniformizar os nomes relacionados com tempo e turbine_ID'
df = df.rename(columns={'TimeDetected': 'Timestamp', 'UnitTitle':'Turbine_ID'})
return df
def timestamp_round_down(df, time_column='Timestamp'):
'Arredondar os intervalos de tempo para os 10 minutos anteriores'
df[time_column] = df.apply(lambda x: x[time_column] - datetime.timedelta(minutes=x[time_column].minute % 10,seconds=x[time_column].second, microseconds=x[time_column].microsecond),axis=1)
return df
def time_transform(df, time_column='Timestamp'):
'Transformar as colunas referentes a tempo no data type tempo'
df[time_column] = pd.to_datetime(df[time_column]).dt.tz_convert(None)
# df[time_column] = df[time_column]
return df
def component(component, col):
pair_comp_col=[]
for i in col:
if component in i:
pair_comp_col.append(i)
return pair_comp_col
def component_df_creation(df):
# Retornar dataframes por tipo de componente
time_id = ['Timestamp', 'Turbine_ID']
pair_hyd = component('Hyd', df.columns)
pair_trafo = component('Trafo', df.columns)
pair_gear = component('Gear', df.columns)
pair_gen = component('Gen', df.columns)
pair_rot = component('Rtr', df.columns)
pair_amb = component('Amb', df.columns)
pair_blds = component('Blds', df.columns)
pair_cont = component('Cont', df.columns)
pair_nac = component('Nac', df.columns)
pair_spin = component('Spin', df.columns)
pair_bus = component('Busbar', df.columns)
pair_vol = component('Volt', df.columns)
#Create DF for each component
df_generator = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_gen_bear = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_transformer = df[time_id + pair_trafo + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_hydraulic = df[time_id + pair_hyd + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_gearbox = df[time_id + pair_gear + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
return df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox
def fail_dummies(df):
'''Uniformização da tabela de logs e transformação com get_dummies'''
# Colunas a manter
fail_cols_manter = ['Timestamp', 'Turbine_ID', 'Component']
df = df[fail_cols_manter]
# transformação de Get_Dummies
df = pd.get_dummies(df, columns=['Component'])
return df
def sig_fail_merge_dfs(sig_df, fail_df, component):
'fazer o merge com o failures e desevolver o já dummyfied'
#filtrar o componente
fail_df = fail_df[fail_df['Component'] == component]
# aplicar o dummies
fail_df = fail_dummies(fail_df)
# fazer o merge
df_merged = sig_df.merge(fail_df, on=['Timestamp','Turbine_ID'], how='outer')
# colocar zeros
df_merged.rename(columns= {'Component_' + component:'Component'}, inplace=True)
df_merged['Component'] = df_merged['Component'].fillna(0)
df_merged = df_merged.sort_values(by=['Turbine_ID','Timestamp'])
df_merged.fillna(0, inplace=True)
return df_merged
def fill_na | rbines_list):
df_ = pd.DataFrame(columns=df.columns, dtype='int64')
for turbine in turbines_list:
df1 = df.loc[df['Turbine_ID']==turbine]
if df1['Component'].nunique()>1:
index = df1[df1['Component']==1]
index['date'] = index['Timestamp']
index = index[['date','Timestamp', 'Turbine_ID']]
df_merged = df1.merge(index, how='left', on=['Turbine_ID','Timestamp'])
df_merged = df_merged.fillna(method='bfill')
#If there is not a failure after, hold present date
df_merged['date'] = df_merged['date'].fillna(df_merged['Timestamp'])
df_merged['TTF'] = round((df_merged['date'] - df_merged['Timestamp']) / np.timedelta64(1, 'D'),0)
df_merged = df_merged.fillna(method='Bfill')
else:
df_merged = df1
df_merged['date'] = df_merged['Timestamp']
df_merged['TTF'] = 0 # df_merged['date'] - df_merged['Timestamp']
# df_merged = df_merged.fillna(method='Bfill')
#Drop Column Date
df_final = df_merged.drop(columns='date')
#df_final['TTF'] = df_final['TTF'].dt.days
df_ = pd.concat([df_, df_final])
df_['Timestamp'] = pd.to_datetime(df_['Timestamp'])
return df_
def fill_na_by_turb_predict(df, turbines_list):
df = df.fillna(method='bfill')
return df
def Failure_Time_Horizon(days, period):
if 2 <= days <= period:
Flag=1
else:
Flag=0
return Flag
def aplic_var_target(df, period):
nome = str(period)
nome = nome+'_days'
df[nome] = df.apply(lambda x: Failure_Time_Horizon(x['TTF'], period),axis=1)
return df
def group_por_frequency(df, period='Dia', strategy='mean'):
'Função para agregar o data-frame pela medida de tempo pretendida, periodo _Dia_ ou _Hora_'
if period == 'Dia':
df['Date'] = df['Timestamp'].dt.date
elif period == 'Hora':
df['Date'] = df.apply(lambda x: x['Timestamp'] - datetime.timedelta(hours=x['Timestamp'].hour % -1, minutes=x['Timestamp'].minute, seconds=x['Timestamp'].second, microseconds=x['Timestamp'].microsecond),axis=1)
else:
print('Medida de tempo não suportada')
if strategy == 'max':
df = df.groupby(by=['Turbine_ID','Date']).max().reset_index().drop(columns='Timestamp')
else:
df = df.groupby(by=['Turbine_ID','Date']).mean().reset_index()
df['Date'] = pd.to_datetime(df['Date'])
return df
def add_features(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[5:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def add_feat_predict(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[2:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def prepare_test_df(df, meses=3):
if 'Timestamp' in df.keys():
last_date = df['Timestamp'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_test = df[df['Timestamp'] >= split]
else:
last_date = df['Date'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_test = df[df['Date'] >= split]
return df_test
def prepare_train_df(df, meses=3):
if 'Timestamp' in df.keys():
last_date = df['Timestamp'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_train = df[df['Timestamp'] < split]
else:
last_date = df['Date'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_train = df[df['Date'] < split]
# df_test = df[df['Timestamp'] >= split]
return df_train
if __name__ == "__main__":
print('001 - Obtendo os dados')
# Obter o caminho dos ficheiros.
root_dir = os.path.abspath('..')
csv_path = os.path.join(root_dir, 'rawdata')
# Importar o dataset de failures
failures_path = os.path.join(csv_path, 'wind-farm-1-failures-training.csv')
failures_df = get_data(failures_path)
# Importar o dataset de signals
signals_path = os.path.join(csv_path, 'wind-farm-1-signals-training.csv')
signals_df = get_data(signals_path)
# Cortar colunas que não têm valores
cols_to_drop = ['Prod_LatestAvg_ActPwrGen2', 'Prod_LatestAvg_ReactPwrGen2']
signals_df = signals_df.drop(columns=cols_to_drop)
print('002 - Criar o dicionário com os Dataframes originais')
df_dict = {'failures_df':failures_df, 'signals_df':signals_df}
print('003 - Criar os datasets por componentes')
df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox = component_df_creation(signals_df)
print('004 - Criar o dicionário de datasets por componentes')
comp_df_dict = {'df_generator': df_generator,'df_hydraulic': df_hydraulic,'df_gen_bear': df_gen_bear,'df_transformer': df_transformer,'df_gearbox': df_gearbox}
print('005 - fazer cópia dos dataframes')
comp_prep_df_dict = comp_df_dict.copy()
print('006 - Merge com o dataframe de falhas')
component_list = ['GENERATOR', 'HYDRAULIC_GROUP', 'GENERATOR_BEARING', 'TRANSFORMER','GEARBOX']
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = sig_fail_merge_dfs(sig_df=comp_prep_df_dict[key],fail_df=failures_df,component=component_list[i])
print('007 - Fillna by turbine')
turbine_list = ['T11', 'T06', 'T01', 'T09', 'T07']
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = fill_na_by_turbine(comp_prep_df_dict[key],turbine_list)
print('008 - Criação da variável alvo')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = aplic_var_target(comp_prep_df_dict[key], 60)
print('009 - retirar as colunas que não se relacionam com a variavel alvo')
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = comp_prep_df_dict[key].drop(columns=feat_drop_list[i])
print('010 - agrupar pela medida de tempo seleccionada')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = group_por_frequency(comp_prep_df_dict[key], period='Dia')
print('011 - Adicionar medidas de alisamento')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = add_features(comp_prep_df_dict[key], rolling_win_size=10)
print(comp_prep_df_dict[key].shape)
| _by_turbine(df, tu | identifier_name |
b_get_data.py | # -*- coding: UTF-8 -*-
""" Main lib for wmillfailprev Project
"""
import os
import pandas as pd
import numpy as np
import datetime
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import metrics, model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss, roc_curve, precision_score, recall_score,confusion_matrix,f1_score,fbeta_score, make_scorer
pd.set_option('display.width', 200)
def get_data(file):
'file = full path of the original file. obter os dataframes dos ficheiros'
df = pd.read_csv(file, sep=';')
df = time_transform(df)
df = timestamp_round_down(df)
df = df.bfill()
return df
def logs_cols_uniform(df):
'A partir de uma lista de draframe uniformizar os nomes relacionados com tempo e turbine_ID'
df = df.rename(columns={'TimeDetected': 'Timestamp', 'UnitTitle':'Turbine_ID'})
return df
def timestamp_round_down(df, time_column='Timestamp'):
'Arredondar os intervalos de tempo para os 10 minutos anteriores'
df[time_column] = df.apply(lambda x: x[time_column] - datetime.timedelta(minutes=x[time_column].minute % 10,seconds=x[time_column].second, microseconds=x[time_column].microsecond),axis=1)
return df
def time_transform(df, time_column='Timestamp'):
'Transformar as colunas referentes a tempo no data type tempo'
df[time_column] = pd.to_datetime(df[time_column]).dt.tz_convert(None)
# df[time_column] = df[time_column]
return df
def component(component, col):
pair_comp_col=[]
for i in col:
if component in i:
pair_comp_col.append(i)
return pair_comp_col
def component_df_creation(df):
# Retornar dataframes por tipo de componente
time_id = ['Timestamp', 'Turbine_ID']
pair_hyd = component('Hyd', df.columns)
pair_trafo = component('Trafo', df.columns)
pair_gear = component('Gear', df.columns)
pair_gen = component('Gen', df.columns)
pair_rot = component('Rtr', df.columns)
pair_amb = component('Amb', df.columns)
pair_blds = component('Blds', df.columns)
pair_cont = component('Cont', df.columns)
pair_nac = component('Nac', df.columns)
pair_spin = component('Spin', df.columns)
pair_bus = component('Busbar', df.columns)
pair_vol = component('Volt', df.columns)
#Create DF for each component
df_generator = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_gen_bear = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_transformer = df[time_id + pair_trafo + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_hydraulic = df[time_id + pair_hyd + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_gearbox = df[time_id + pair_gear + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
return df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox
def fail_dummies(df):
'''Uniformização da tabela de logs e transformação com get_dummies'''
# Colunas a manter
fail_cols_manter = ['Timestamp', 'Turbine_ID', 'Component']
df = df[fail_cols_manter]
# transformação de Get_Dummies
df = pd.get_dummies(df, columns=['Component'])
return df
def sig_fail_merge_dfs(sig_df, fail_df, component):
'fazer o merge com o failures e desevolver o já dummyfied'
#filtrar o componente
fail_df = fail_df[fail_df['Component'] == component]
# aplicar o dummies
fail_df = fail_dummies(fail_df)
# fazer o merge
df_merged = sig_df.merge(fail_df, on=['Timestamp','Turbine_ID'], how='outer')
# colocar zeros
df_merged.rename(columns= {'Component_' + component:'Component'}, inplace=True)
df_merged['Component'] = df_merged['Component'].fillna(0)
df_merged = df_merged.sort_values(by=['Turbine_ID','Timestamp'])
df_merged.fillna(0, inplace=True)
return df_merged
def fill_na_by_turbine(df, turbines_list):
df_ = pd.DataFrame(columns=df.columns, dtype='int64')
for turbine in turbines_list:
df1 = df.loc[df['Turbine_ID']==turbine]
if df1['Component'].nunique()>1:
index = df1[df1['Component']==1]
index['date'] = index['Timestamp']
index = index[['date','Timestamp', 'Turbine_ID']]
df_merged = df1.merge(index, how='left', on=['Turbine_ID','Timestamp'])
df_merged = df_merged.fillna(method='bfill')
#If there is not a failure after, hold present date
df_merged['date'] = df_merged['date'].fillna(df_merged['Timestamp'])
df_merged['TTF'] = round((df_merged['date'] - df_merged['Timestamp']) / np.timedelta64(1, 'D'),0)
df_merged = df_merged.fillna(method='Bfill')
else:
df_merged = df1
df_merged['date'] = df_merged['Timestamp']
df_merged['TTF'] = 0 # df_merged['date'] - df_merged['Timestamp']
# df_merged = df_merged.fillna(method='Bfill')
#Drop Column Date
df_final = df_merged.drop(columns='date')
#df_final['TTF'] = df_final['TTF'].dt.days
df_ = pd.concat([df_, df_final])
df_['Timestamp'] = pd.to_datetime(df_['Timestamp'])
return df_
def fill_na_by_turb_predict(df, turbines_list):
df = df.fillna(method='bfill')
return df
def Failure_Time_Horizon(days, period):
if 2 <= days <= period:
Flag=1
else:
Flag=0
return Flag
def aplic_var_target(df, period):
nome = str(period)
nome = nome+'_days'
df[nome] = df.apply(lambda x: Failure_Time_Horizon(x['TTF'], period),axis=1)
return df
def group_por_frequency(df, period='Dia', strategy='mean'):
'Função para agregar o data-frame pela medida de tempo pretendida, periodo _Dia_ ou _Hora_'
if period == 'Dia':
df['Date'] = df['Timestamp'].dt.date
elif period == 'Hora':
df['Date'] = df.apply(lambda x: x['Timestamp'] - datetime.timedelta(hours=x['Timestamp'].hour % -1, minutes=x['Timestamp'].minute, seconds=x['Timestamp'].second, microseconds=x['Timestamp'].microsecond),axis=1)
else:
print('Medida de tempo não suportada')
if strategy == 'max':
df = df.groupby(by=['Turbine_ID','Date']).max().reset_index().drop(columns='Timestamp')
else:
df = df.groupby(by=['Turbine_ID','Date']).mean().reset_index()
df['Date'] = pd.to_datetime(df['Date'])
return df
def add_features(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[5:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def add_feat_predict(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[2:]:
sensor_col | or_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def prepare_test_df(df, meses=3):
if 'Timestamp' in df.keys():
last_date = df['Timestamp'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_test = df[df['Timestamp'] >= split]
else:
last_date = df['Date'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_test = df[df['Date'] >= split]
return df_test
def prepare_train_df(df, meses=3):
if 'Timestamp' in df.keys():
last_date = df['Timestamp'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_train = df[df['Timestamp'] < split]
else:
last_date = df['Date'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_train = df[df['Date'] < split]
# df_test = df[df['Timestamp'] >= split]
return df_train
if __name__ == "__main__":
print('001 - Obtendo os dados')
# Obter o caminho dos ficheiros.
root_dir = os.path.abspath('..')
csv_path = os.path.join(root_dir, 'rawdata')
# Importar o dataset de failures
failures_path = os.path.join(csv_path, 'wind-farm-1-failures-training.csv')
failures_df = get_data(failures_path)
# Importar o dataset de signals
signals_path = os.path.join(csv_path, 'wind-farm-1-signals-training.csv')
signals_df = get_data(signals_path)
# Cortar colunas que não têm valores
cols_to_drop = ['Prod_LatestAvg_ActPwrGen2', 'Prod_LatestAvg_ReactPwrGen2']
signals_df = signals_df.drop(columns=cols_to_drop)
print('002 - Criar o dicionário com os Dataframes originais')
df_dict = {'failures_df':failures_df, 'signals_df':signals_df}
print('003 - Criar os datasets por componentes')
df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox = component_df_creation(signals_df)
print('004 - Criar o dicionário de datasets por componentes')
comp_df_dict = {'df_generator': df_generator,'df_hydraulic': df_hydraulic,'df_gen_bear': df_gen_bear,'df_transformer': df_transformer,'df_gearbox': df_gearbox}
print('005 - fazer cópia dos dataframes')
comp_prep_df_dict = comp_df_dict.copy()
print('006 - Merge com o dataframe de falhas')
component_list = ['GENERATOR', 'HYDRAULIC_GROUP', 'GENERATOR_BEARING', 'TRANSFORMER','GEARBOX']
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = sig_fail_merge_dfs(sig_df=comp_prep_df_dict[key],fail_df=failures_df,component=component_list[i])
print('007 - Fillna by turbine')
turbine_list = ['T11', 'T06', 'T01', 'T09', 'T07']
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = fill_na_by_turbine(comp_prep_df_dict[key],turbine_list)
print('008 - Criação da variável alvo')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = aplic_var_target(comp_prep_df_dict[key], 60)
print('009 - retirar as colunas que não se relacionam com a variavel alvo')
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = comp_prep_df_dict[key].drop(columns=feat_drop_list[i])
print('010 - agrupar pela medida de tempo seleccionada')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = group_por_frequency(comp_prep_df_dict[key], period='Dia')
print('011 - Adicionar medidas de alisamento')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = add_features(comp_prep_df_dict[key], rolling_win_size=10)
print(comp_prep_df_dict[key].shape)
| s.append(i)
sens | conditional_block |
b_get_data.py | # -*- coding: UTF-8 -*-
""" Main lib for wmillfailprev Project
"""
import os
import pandas as pd
import numpy as np
import datetime
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import metrics, model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss, roc_curve, precision_score, recall_score,confusion_matrix,f1_score,fbeta_score, make_scorer
pd.set_option('display.width', 200)
def get_data(file):
'file = full path of the original file. obter os dataframes dos ficheiros'
df = pd.read_csv(file, sep=';')
df = time_transform(df)
df = timestamp_round_down(df)
df = df.bfill()
return df
def logs_cols_uniform(df):
'A partir de uma lista de draframe uniformizar os nomes relacionados com tempo e turbine_ID'
df = df.rename(columns={'TimeDetected': 'Timestamp', 'UnitTitle':'Turbine_ID'})
return df
def timestamp_round_down(df, time_column='Timestamp'):
'Arredondar os intervalos de tempo para os 10 minutos anteriores'
df[time_column] = df.apply(lambda x: x[time_column] - datetime.timedelta(minutes=x[time_column].minute % 10,seconds=x[time_column].second, microseconds=x[time_column].microsecond),axis=1)
return df
def time_transform(df, time_column='Timestamp'):
'Transformar as colunas referentes a tempo no data type tempo'
df[time_column] = pd.to_datetime(df[time_column]).dt.tz_convert(None)
# df[time_column] = df[time_column]
return df
def component(component, col):
pair_comp_col=[]
for i in col:
if component in i:
pair_comp_col.append(i)
return pair_comp_col
def component_df_creation(df):
# Retornar dataframes por tipo de componente
time_id = ['Timestamp', 'Turbine_ID']
pair_hyd = component('Hyd', df.columns)
pair_trafo = component('Trafo', df.columns)
pair_gear = component('Gear', df.columns)
pair_gen = component('Gen', df.columns)
pair_rot = component('Rtr', df.columns)
pair_amb = component('Amb', df.columns)
pair_blds = component('Blds', df.columns)
pair_cont = component('Cont', df.columns)
pair_nac = component('Nac', df.columns)
pair_spin = component('Spin', df.columns)
pair_bus = component('Busbar', df.columns)
pair_vol = component('Volt', df.columns)
#Create DF for each component
df_generator = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_gen_bear = df[time_id + pair_gen + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
df_transformer = df[time_id + pair_trafo + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_hydraulic = df[time_id + pair_hyd + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_vol]
df_gearbox = df[time_id + pair_gear + pair_rot + pair_amb + pair_blds + pair_cont + pair_nac + pair_spin + pair_bus + pair_hyd]
return df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox
def fail_dummies(df):
'''Uniformização da tabela de logs e transformação com get_dummies'''
# Colunas a manter
fail_cols_manter = ['Timestamp', 'Turbine_ID', 'Component']
df = df[fail_cols_manter]
# transformação de Get_Dummies
df = pd.get_dummies(df, columns=['Component'])
return df
def sig_fail_merge_dfs(sig_df, fail_df, component):
'fazer o merge com o failures e desevolver o já dummyfied'
#filtrar o componente
fail_df = fail_df[fail_df['Component'] == component]
# aplicar o dummies
fail_df = fail_dummies(fail_df)
# fazer o merge
df_merged = sig_df.merge(fail_df, on=['Timestamp','Turbine_ID'], how='outer')
# colocar zeros
df_merged.rename(columns= {'Component_' + component:'Component'}, inplace=True)
df_merged['Component'] = df_merged['Component'].fillna(0)
df_merged = df_merged.sort_values(by=['Turbine_ID','Timestamp'])
df_merged.fillna(0, inplace=True)
return df_merged
def fill_na_by_turbine(df, turbines_list):
df_ = pd.DataFrame(columns=df.columns, dtype='int64')
for turbine in turbines_list:
df1 = df.loc[df['Turbine_ID']==turbine]
if df1['Component'].nunique()>1:
index = df1[df1['Component']==1]
index['date'] = index['Timestamp']
index = index[['date','Timestamp', 'Turbine_ID']]
df_merged = df1.merge(index, how='left', on=['Turbine_ID','Timestamp'])
df_merged = df_merged.fillna(method='bfill')
#If there is not a failure after, hold present date
df_merged['date'] = df_merged['date'].fillna(df_merged['Timestamp'])
df_merged['TTF'] = round((df_merged['date'] - df_merged['Timestamp']) / np.timedelta64(1, 'D'),0)
df_merged = df_merged.fillna(method='Bfill')
else:
df_merged = df1
df_merged['date'] = df_merged['Timestamp']
df_merged['TTF'] = 0 # df_merged['date'] - df_merged['Timestamp']
# df_merged = df_merged.fillna(method='Bfill')
#Drop Column Date
df_final = df_merged.drop(columns='date')
#df_final['TTF'] = df_final['TTF'].dt.days
df_ = pd.concat([df_, df_final])
df_['Timestamp'] = pd.to_datetime(df_['Timestamp'])
return df_
def fill_na_by_turb_predict(df, turbines_list):
df = df.fillna(method='bfill')
return df
def Failure_Time_Horizon(days, period):
if 2 <= days <= period:
Flag=1
else:
Flag=0
return Flag
def aplic_var_target(df, period):
nome = str(period)
nome = nome+'_days'
df[nome] = df.apply(lambda x: Failure_Time_Horizon(x['TTF'], period),axis=1)
return df
def group_por_frequency(df, period='Dia', strategy='mean'):
'Função | features(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date', 'TTF', '60_days', 'Component']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[5:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def add_feat_predict(df_in, rolling_win_size=15):
"""Add rolling average and rolling standard deviation for sensors readings using fixed rolling window size.
"""
cols =['Turbine_ID', 'Date']
other_cols = []
for i in df_in.columns:
if i not in cols:
other_cols.append(i)
all_cols = cols + other_cols
df_in = df_in[all_cols]
sensor_cols = []
for i in df_in.columns[2:]:
sensor_cols.append(i)
sensor_av_cols = [nm+'_av' for nm in sensor_cols]
sensor_sd_cols = [nm+'_sd' for nm in sensor_cols]
df_out = pd.DataFrame()
ws = rolling_win_size
#calculate rolling stats for each engine id
for m_id in pd.unique(df_in.Turbine_ID):
# get a subset for each engine sensors
df_engine = df_in[df_in['Turbine_ID'] == m_id]
df_sub = df_engine[sensor_cols]
# get rolling mean for the subset
av = df_sub.rolling(ws, min_periods=1).mean()
av.columns = sensor_av_cols
# get the rolling standard deviation for the subset
sd = df_sub.rolling(ws, min_periods=1).std().fillna(0)
sd.columns = sensor_sd_cols
# combine the two new subset dataframes columns to the engine subset
new_ftrs = pd.concat([df_engine,av,sd], axis=1)
# add the new features rows to the output dataframe
df_out = pd.concat([df_out,new_ftrs])
df_out = df_out.sort_values(by=['Turbine_ID', 'Date'] )
return df_out
def prepare_test_df(df, meses=3):
if 'Timestamp' in df.keys():
last_date = df['Timestamp'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_test = df[df['Timestamp'] >= split]
else:
last_date = df['Date'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_test = df[df['Date'] >= split]
return df_test
def prepare_train_df(df, meses=3):
if 'Timestamp' in df.keys():
last_date = df['Timestamp'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_train = df[df['Timestamp'] < split]
else:
last_date = df['Date'].iloc[-1]
split = last_date - pd.DateOffset(months=meses)
df_train = df[df['Date'] < split]
# df_test = df[df['Timestamp'] >= split]
return df_train
if __name__ == "__main__":
print('001 - Obtendo os dados')
# Obter o caminho dos ficheiros.
root_dir = os.path.abspath('..')
csv_path = os.path.join(root_dir, 'rawdata')
# Importar o dataset de failures
failures_path = os.path.join(csv_path, 'wind-farm-1-failures-training.csv')
failures_df = get_data(failures_path)
# Importar o dataset de signals
signals_path = os.path.join(csv_path, 'wind-farm-1-signals-training.csv')
signals_df = get_data(signals_path)
# Cortar colunas que não têm valores
cols_to_drop = ['Prod_LatestAvg_ActPwrGen2', 'Prod_LatestAvg_ReactPwrGen2']
signals_df = signals_df.drop(columns=cols_to_drop)
print('002 - Criar o dicionário com os Dataframes originais')
df_dict = {'failures_df':failures_df, 'signals_df':signals_df}
print('003 - Criar os datasets por componentes')
df_generator, df_gen_bear, df_transformer, df_hydraulic, df_gearbox = component_df_creation(signals_df)
print('004 - Criar o dicionário de datasets por componentes')
comp_df_dict = {'df_generator': df_generator,'df_hydraulic': df_hydraulic,'df_gen_bear': df_gen_bear,'df_transformer': df_transformer,'df_gearbox': df_gearbox}
print('005 - fazer cópia dos dataframes')
comp_prep_df_dict = comp_df_dict.copy()
print('006 - Merge com o dataframe de falhas')
component_list = ['GENERATOR', 'HYDRAULIC_GROUP', 'GENERATOR_BEARING', 'TRANSFORMER','GEARBOX']
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = sig_fail_merge_dfs(sig_df=comp_prep_df_dict[key],fail_df=failures_df,component=component_list[i])
print('007 - Fillna by turbine')
turbine_list = ['T11', 'T06', 'T01', 'T09', 'T07']
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = fill_na_by_turbine(comp_prep_df_dict[key],turbine_list)
print('008 - Criação da variável alvo')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = aplic_var_target(comp_prep_df_dict[key], 60)
print('009 - retirar as colunas que não se relacionam com a variavel alvo')
for i, key in enumerate(comp_prep_df_dict):
comp_prep_df_dict[key] = comp_prep_df_dict[key].drop(columns=feat_drop_list[i])
print('010 - agrupar pela medida de tempo seleccionada')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = group_por_frequency(comp_prep_df_dict[key], period='Dia')
print('011 - Adicionar medidas de alisamento')
for key in comp_prep_df_dict:
comp_prep_df_dict[key] = add_features(comp_prep_df_dict[key], rolling_win_size=10)
print(comp_prep_df_dict[key].shape)
| para agregar o data-frame pela medida de tempo pretendida, periodo _Dia_ ou _Hora_'
if period == 'Dia':
df['Date'] = df['Timestamp'].dt.date
elif period == 'Hora':
df['Date'] = df.apply(lambda x: x['Timestamp'] - datetime.timedelta(hours=x['Timestamp'].hour % -1, minutes=x['Timestamp'].minute, seconds=x['Timestamp'].second, microseconds=x['Timestamp'].microsecond),axis=1)
else:
print('Medida de tempo não suportada')
if strategy == 'max':
df = df.groupby(by=['Turbine_ID','Date']).max().reset_index().drop(columns='Timestamp')
else:
df = df.groupby(by=['Turbine_ID','Date']).mean().reset_index()
df['Date'] = pd.to_datetime(df['Date'])
return df
def add_ | identifier_body |
main.go | package main
import (
"database/sql"
"encoding/gob"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/gorilla/securecookie"
"github.com/gorilla/sessions"
_ "github.com/mattn/go-sqlite3"
"log"
"net/http"
"strings"
"text/template"
)
var database *sql.DB
var store *sessions.CookieStore
var tpl *template.Template
type User struct {
Username string
Password string
Authenticated bool
}
type UserData struct {
Id int `db:id`
Username string `json:"username", db:"username"`
Password string `json:"password", db:"password"`
}
type Bookmark struct {
Url string `json:"url", db:"url"`
TagName string `json:"tag", db:"tagName"`
BookmarkId int `db:"bookmarkId"`
TagId int `db:tagId`
}
func handleRequests(){
router := mux.NewRouter()
router.HandleFunc("/", index)
router.HandleFunc("/add_user", AddUser).Methods("POST")
router.HandleFunc("/login", login).Methods("POST")
router.HandleFunc("/logout", AuthMiddleware(logout)).Methods("GET")
router.HandleFunc("/add_bookmark", AuthMiddleware(AddBookmark)).Methods("POST")
router.HandleFunc("/update_bookmark", AuthMiddleware(UpdateBookmark)).Methods("PUT")
router.HandleFunc("/delete_tag", AuthMiddleware(DeleteTag)).Methods("DELETE")
router.HandleFunc("/list_bookmarks", AuthMiddleware(ListBookmarks)).Methods("GET")
log.Fatal(http.ListenAndServe(":10000", router))
}
func getUser(s *sessions.Session) User {
val := s.Values["user"]
fmt.Println("val",val)
user, ok := val.(User)
if !ok {
fmt.Println("did not find user session")
return User{Authenticated: false}
}
fmt.Println(val.(User))
fmt.Println("user.username",user.Username)
return user
}
//if basic auth headers exists, proceed to pass request to services
//if not, check if session user is authenticated
func AuthMiddleware(handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
username, password, _ := r.BasicAuth()
fmt.Println(r.BasicAuth())
if username=="" || !checkUsernameAndPassword(username, password) {
//w.Header().Set("WWW-Authenticate", `Basic realm="Please enter your username and password for this site"`)
//w.WriteHeader(401)
//w.Write([]byte("Unauthorised.\n"))
//w.Write([]byte("checking session instead.\n"))
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
user := getUser(session)
fmt.Println(user)
if auth := user.Authenticated; !auth {
session.AddFlash("You don't have access!")
err = session.Save(r, w)
if err != nil {
fmt.Printf("You don't have access!")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("You don't have access!")
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
fmt.Println("authenticated via user session")
handler(w, r)
return
}
fmt.Println("authenticated via basic auth")
handler(w, r)
}
}
func checkUsernameAndPassword(username, password string) bool {
fmt.Println("[checkUsernameAndPassword]")
correctPassword := retrieveUserPassword(username)
return password == correctPassword
}
func index(w http.ResponseWriter, r *http.Request) |
func AddUser( w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := r.FormValue("password")
res , _ := database.Exec("INSERT INTO users(username,password) VALUES (?,?)",username,password)
fmt.Println(res)
fmt.Fprintf(w, "User successfully added")
http.Redirect(w, r, "/", http.StatusFound)
}
func login(w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := retrieveUserPassword(username)
session, err := store.Get(r, "cookie-name")
if err != nil {
}
// Where authentication could be done
if r.FormValue("password") != password {
if r.FormValue("password") == "" {
session.AddFlash("Must enter a password")
}
session.AddFlash("The password was incorrect")
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
user := &User{
Username: username,
Password: password,
Authenticated: true,
}
session.Values["user"] = user
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("login successful")
//expiration := time.Now().Add(365 * 24 * time.Hour)
//cookie := http.Cookie{Name: "username", Value: username, Expires: expiration}
//http.SetCookie(w, &cookie)
//cookie2:= http.Cookie{Name: "password", Value: password, Expires: expiration}
//http.SetCookie(w, &cookie2)
http.Redirect(w, r, "/", http.StatusFound)
}
func logout(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["user"] = User{}
session.Options.MaxAge = -1
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func AddBookmark( w http.ResponseWriter, r *http.Request){
fmt.Println("add bookmark service reached")
url := r.FormValue("url")
username := getUserName(w,r)
userId := retrieveUserId(username)
fmt.Println(url," inserted for user", username )
res, _ := database.Exec("INSERT INTO bookmarks(url,userid) VALUES(?,?)",url,userId)
fmt.Println(res)
http.Redirect(w, r, "/", http.StatusFound)
//fmt.Fprintf(w, "Bookmark added")
}
func UpdateBookmark(w http.ResponseWriter, r *http.Request){
fmt.Println("update bookmark service hit")
url := r.FormValue("url")
tagname := r.FormValue("tagname")
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
res, _ := database.Exec("INSERT OR IGNORE INTO tags(name) VALUES(?)",tagname)
if res != nil {}
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1",tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
fmt.Println(tagId,bookmarkId)
fmt.Println(err)
res2, _ := database.Exec("INSERT INTO tag_bookmark(tagid,bookmarkid) VALUES(?,?)",tagId,bookmarkId)
if res2 != nil {}
fmt.Fprintf(w, "tag added")
}else {
fmt.Fprintf(w, "password incorrect")
}
}
func DeleteTag(w http.ResponseWriter, r *http.Request){
fmt.Println("deleting tag from bookmark")
fmt.Println(r)
tagname := r.URL.Query().Get("tag")
url := r.URL.Query().Get("url")
//url := r.FormValue("url")
//tagname := r.FormValue("tagname")
fmt.Println(url,tagname)
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1", tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
res2, _ := database.Exec("DELETE FROM tag_bookmark WHERE tagid=$1 AND bookmarkId=$2", tagId, bookmarkId)
fmt.Println(res2)
fmt.Fprintf(w, "tag deleted")
}
}
func ListBookmarks(w http.ResponseWriter, r *http.Request) {
fmt.Println("ListBookmarks service hit")
tags := r.URL.Query().Get("tags")
fmt.Println("tags:",tags)
username := getUserName(w,r)
fmt.Println("username:",username)
userId := retrieveUserId(username)
fmt.Println("userId:",userId)
var bookmark Bookmark
queryString := fmt.Sprintf("SELECT bookmarks.id as bookmarkId, bookmarks.url as url, " +
"coalesce(tags.name,'') AS tagName, coalesce(tags.id,0) AS tagId FROM bookmarks " +
"LEFT JOIN tag_bookmark " +
"ON tag_bookmark.bookmarkid = bookmarks.id LEFT JOIN tags " +
"ON tags.id = tag_bookmark.tagid WHERE bookmarks.userid=%d", userId)
//if tags != "" {
// oldQueryString := queryString
// queryString = fmt.Sprintf(oldQueryString+" AND tagName IN (%s)", "'"+
// strings.Replace(tags, ",", "','", -1)+"'")
//}
rows, err := database.Query(queryString)
if err != nil && err != sql.ErrNoRows {
// log the error
fmt.Fprintf(w, "" )
return
}
defer rows.Close()
bookmarkResults := make(map[string][]string)
//tagResults := make(map[int][]string)
for rows.Next(){
err := rows.Scan(&bookmark.BookmarkId,&bookmark.Url,&bookmark.TagName,&bookmark.TagId)
if err != nil && err != sql.ErrNoRows {
// log the error
}
//bookmarkRecord := []string{bookmark.tagId,bookmark.tagName}
//bookmarkRecord[bookmark.tagId]=bookmark.tagName
//bookmarkResults[bookmark.url] = append(bookmarkResults[bookmark.url], bookmarkRecord)
bookmarkResults[bookmark.Url] = append(bookmarkResults[bookmark.Url], bookmark.TagName)
//fmt.Println(bookmark.Url, bookmark.TagName)
}
for key := range bookmarkResults {
if len(tags) != 0 {
//&& !strings.Contains(strings.Join(bookmarkResults[key], ","),tags)
tagsList := strings.Split(tags, ",")
//tagName = bookmarkResults[key]
for _, tag := range tagsList {
if !stringInSlice(tag,bookmarkResults[key]){
delete(bookmarkResults, key)
}
}
}
}
type bookmarkJson struct {
Url string `json:"url"`
TagName []string `json:"tagName"`
}
var data []bookmarkJson
for key := range bookmarkResults {
bookmarkj := bookmarkJson{Url: key, TagName: bookmarkResults[key]}
data=append(data, bookmarkj )
}
bJsondata, _ := json.Marshal(data)
jsonData := string(bJsondata)
fmt.Println(jsonData)
fmt.Fprintf(w, jsonData )
}
func retrieveUserPassword(username string) string {
var dbUser UserData
err := database.QueryRow("SELECT password, id FROM users WHERE username=$1", username).
Scan(&dbUser.Password,&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserPassword] user not found in DB",username)
panic(err)
}
return dbUser.Password
}
func retrieveUserId(username string) int {
var dbUser UserData
err := database.QueryRow("SELECT id FROM users WHERE username=$1", username).
Scan(&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserId] user not found in DB",username)
panic(err)
}
return dbUser.Id
}
func retrieveBookmarkId(url string, userId int) int {
var bookmarkId int
err := database.QueryRow("SELECT id FROM bookmarks WHERE url=$1 AND userid=$2",url,userId).Scan(&bookmarkId)
fmt.Println(err)
return bookmarkId
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
func getUserName (w http.ResponseWriter,r *http.Request) string {
var username string
username, _, ok := r.BasicAuth()
if !ok {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return ""
}
fmt.Println("username retrieved from session")
user := getUser(session)
username = user.Username
return username
}
fmt.Println("username retrieved from basic auth")
return username
}
func initDB(){
database, _ = sql.Open("sqlite3", "./cisco.db")
createUsersTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS users (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT," +
"username VARCHAR (20) NOT NULL, " +
"password VARCHAR (20) NOT NULL)")
createUsersTable.Exec()
createBookmarksTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS bookmarks " +
"(id INTEGER PRIMARY KEY AUTOINCREMENT," +
"url VARCHAR (70) , userid REFERENCES users(id),CONSTRAINT unq UNIQUE (url, userid))")
createBookmarksTable.Exec()
createTagsTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS tags (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT, " +
"name VARCHAR (20) NOT NULL UNIQUE)")
createTagsTable.Exec()
createTagbookmarkTable, _ := database.Prepare( "CREATE TABLE IF NOT EXISTS tag_bookmark " +
"(tagid INTEGER, bookmarkid INTEGER, UNIQUE (tagid, bookmarkid) )")
//createTagbookmarkTable, _ := database.Prepare( "CREATE TABLE IF NOT EXISTS tag_bookmark " +
// "(tagid REFERENCES tags(id), bookmarkid REFERENCES bookmarks(id), PRIMARY KEY(tagid, bookmarkid) )")
createTagbookmarkTable.Exec()
fmt.Println("DB initialized")
}
func initSession(){
authKeyOne := securecookie.GenerateRandomKey(64)
encryptionKeyOne := securecookie.GenerateRandomKey(32)
store = sessions.NewCookieStore(
authKeyOne,
encryptionKeyOne,
)
store.Options = &sessions.Options{
MaxAge: 600 * 15,
HttpOnly: true,
}
gob.Register(User{})
tpl = template.Must(template.ParseGlob("templates/*.gohtml"))
}
func main() {
initDB()
initSession()
handleRequests()
}
| {
session, err := store.Get(r, "cookie-name")
if err != nil {
}
user := getUser(session)
fmt.Println("[serving main page]",user)
tpl.ExecuteTemplate(w, "index.gohtml", user)
} | identifier_body |
main.go | package main
import (
"database/sql"
"encoding/gob"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/gorilla/securecookie"
"github.com/gorilla/sessions"
_ "github.com/mattn/go-sqlite3"
"log"
"net/http"
"strings"
"text/template"
)
var database *sql.DB
var store *sessions.CookieStore
var tpl *template.Template
type User struct {
Username string
Password string
Authenticated bool
}
type UserData struct {
Id int `db:id`
Username string `json:"username", db:"username"`
Password string `json:"password", db:"password"`
}
type Bookmark struct {
Url string `json:"url", db:"url"`
TagName string `json:"tag", db:"tagName"`
BookmarkId int `db:"bookmarkId"`
TagId int `db:tagId`
}
func handleRequests(){
router := mux.NewRouter()
router.HandleFunc("/", index)
router.HandleFunc("/add_user", AddUser).Methods("POST")
router.HandleFunc("/login", login).Methods("POST")
router.HandleFunc("/logout", AuthMiddleware(logout)).Methods("GET")
router.HandleFunc("/add_bookmark", AuthMiddleware(AddBookmark)).Methods("POST")
router.HandleFunc("/update_bookmark", AuthMiddleware(UpdateBookmark)).Methods("PUT")
router.HandleFunc("/delete_tag", AuthMiddleware(DeleteTag)).Methods("DELETE")
router.HandleFunc("/list_bookmarks", AuthMiddleware(ListBookmarks)).Methods("GET")
log.Fatal(http.ListenAndServe(":10000", router))
}
func getUser(s *sessions.Session) User {
val := s.Values["user"]
fmt.Println("val",val)
user, ok := val.(User)
if !ok {
fmt.Println("did not find user session")
return User{Authenticated: false}
}
fmt.Println(val.(User))
fmt.Println("user.username",user.Username)
return user
}
//if basic auth headers exists, proceed to pass request to services
//if not, check if session user is authenticated
func AuthMiddleware(handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
username, password, _ := r.BasicAuth()
fmt.Println(r.BasicAuth())
if username=="" || !checkUsernameAndPassword(username, password) {
//w.Header().Set("WWW-Authenticate", `Basic realm="Please enter your username and password for this site"`)
//w.WriteHeader(401)
//w.Write([]byte("Unauthorised.\n"))
//w.Write([]byte("checking session instead.\n"))
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
user := getUser(session)
fmt.Println(user)
if auth := user.Authenticated; !auth {
session.AddFlash("You don't have access!")
err = session.Save(r, w)
if err != nil {
fmt.Printf("You don't have access!")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("You don't have access!")
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
fmt.Println("authenticated via user session")
handler(w, r)
return
}
fmt.Println("authenticated via basic auth")
handler(w, r)
}
}
func checkUsernameAndPassword(username, password string) bool {
fmt.Println("[checkUsernameAndPassword]")
correctPassword := retrieveUserPassword(username)
return password == correctPassword
}
func index(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
}
user := getUser(session)
fmt.Println("[serving main page]",user)
tpl.ExecuteTemplate(w, "index.gohtml", user)
}
func AddUser( w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := r.FormValue("password")
res , _ := database.Exec("INSERT INTO users(username,password) VALUES (?,?)",username,password)
fmt.Println(res)
fmt.Fprintf(w, "User successfully added")
http.Redirect(w, r, "/", http.StatusFound)
}
func login(w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := retrieveUserPassword(username)
session, err := store.Get(r, "cookie-name")
if err != nil {
}
// Where authentication could be done
if r.FormValue("password") != password {
if r.FormValue("password") == "" {
session.AddFlash("Must enter a password")
}
session.AddFlash("The password was incorrect")
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
user := &User{
Username: username,
Password: password,
Authenticated: true,
}
session.Values["user"] = user
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("login successful")
//expiration := time.Now().Add(365 * 24 * time.Hour)
//cookie := http.Cookie{Name: "username", Value: username, Expires: expiration}
//http.SetCookie(w, &cookie)
//cookie2:= http.Cookie{Name: "password", Value: password, Expires: expiration}
//http.SetCookie(w, &cookie2)
http.Redirect(w, r, "/", http.StatusFound)
}
func logout(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["user"] = User{}
session.Options.MaxAge = -1
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func AddBookmark( w http.ResponseWriter, r *http.Request){
fmt.Println("add bookmark service reached")
url := r.FormValue("url")
username := getUserName(w,r)
userId := retrieveUserId(username)
fmt.Println(url," inserted for user", username )
res, _ := database.Exec("INSERT INTO bookmarks(url,userid) VALUES(?,?)",url,userId)
fmt.Println(res)
http.Redirect(w, r, "/", http.StatusFound)
//fmt.Fprintf(w, "Bookmark added")
}
func UpdateBookmark(w http.ResponseWriter, r *http.Request){
fmt.Println("update bookmark service hit")
url := r.FormValue("url")
tagname := r.FormValue("tagname")
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
res, _ := database.Exec("INSERT OR IGNORE INTO tags(name) VALUES(?)",tagname)
if res != nil {}
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1",tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
fmt.Println(tagId,bookmarkId)
fmt.Println(err)
res2, _ := database.Exec("INSERT INTO tag_bookmark(tagid,bookmarkid) VALUES(?,?)",tagId,bookmarkId)
if res2 != nil {}
fmt.Fprintf(w, "tag added")
}else {
fmt.Fprintf(w, "password incorrect")
}
}
func DeleteTag(w http.ResponseWriter, r *http.Request){
fmt.Println("deleting tag from bookmark")
fmt.Println(r)
tagname := r.URL.Query().Get("tag")
url := r.URL.Query().Get("url")
//url := r.FormValue("url")
//tagname := r.FormValue("tagname")
fmt.Println(url,tagname)
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1", tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
res2, _ := database.Exec("DELETE FROM tag_bookmark WHERE tagid=$1 AND bookmarkId=$2", tagId, bookmarkId)
fmt.Println(res2)
fmt.Fprintf(w, "tag deleted")
}
}
func ListBookmarks(w http.ResponseWriter, r *http.Request) {
fmt.Println("ListBookmarks service hit")
tags := r.URL.Query().Get("tags")
fmt.Println("tags:",tags)
username := getUserName(w,r)
fmt.Println("username:",username)
userId := retrieveUserId(username)
fmt.Println("userId:",userId)
var bookmark Bookmark
queryString := fmt.Sprintf("SELECT bookmarks.id as bookmarkId, bookmarks.url as url, " +
"coalesce(tags.name,'') AS tagName, coalesce(tags.id,0) AS tagId FROM bookmarks " +
"LEFT JOIN tag_bookmark " +
"ON tag_bookmark.bookmarkid = bookmarks.id LEFT JOIN tags " +
"ON tags.id = tag_bookmark.tagid WHERE bookmarks.userid=%d", userId)
//if tags != "" {
// oldQueryString := queryString
// queryString = fmt.Sprintf(oldQueryString+" AND tagName IN (%s)", "'"+
// strings.Replace(tags, ",", "','", -1)+"'")
//}
rows, err := database.Query(queryString)
if err != nil && err != sql.ErrNoRows {
// log the error
fmt.Fprintf(w, "" )
return
}
defer rows.Close()
bookmarkResults := make(map[string][]string)
//tagResults := make(map[int][]string)
for rows.Next(){
err := rows.Scan(&bookmark.BookmarkId,&bookmark.Url,&bookmark.TagName,&bookmark.TagId)
if err != nil && err != sql.ErrNoRows {
// log the error
}
//bookmarkRecord := []string{bookmark.tagId,bookmark.tagName}
//bookmarkRecord[bookmark.tagId]=bookmark.tagName
//bookmarkResults[bookmark.url] = append(bookmarkResults[bookmark.url], bookmarkRecord)
bookmarkResults[bookmark.Url] = append(bookmarkResults[bookmark.Url], bookmark.TagName)
//fmt.Println(bookmark.Url, bookmark.TagName)
}
for key := range bookmarkResults {
if len(tags) != 0 {
//&& !strings.Contains(strings.Join(bookmarkResults[key], ","),tags)
tagsList := strings.Split(tags, ",")
//tagName = bookmarkResults[key]
for _, tag := range tagsList {
if !stringInSlice(tag,bookmarkResults[key]){
delete(bookmarkResults, key)
}
}
}
}
type bookmarkJson struct {
Url string `json:"url"`
TagName []string `json:"tagName"`
}
var data []bookmarkJson
for key := range bookmarkResults {
bookmarkj := bookmarkJson{Url: key, TagName: bookmarkResults[key]}
data=append(data, bookmarkj )
}
bJsondata, _ := json.Marshal(data)
jsonData := string(bJsondata)
fmt.Println(jsonData)
fmt.Fprintf(w, jsonData )
}
func retrieveUserPassword(username string) string {
var dbUser UserData
err := database.QueryRow("SELECT password, id FROM users WHERE username=$1", username).
Scan(&dbUser.Password,&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserPassword] user not found in DB",username)
panic(err) | func retrieveUserId(username string) int {
var dbUser UserData
err := database.QueryRow("SELECT id FROM users WHERE username=$1", username).
Scan(&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserId] user not found in DB",username)
panic(err)
}
return dbUser.Id
}
func retrieveBookmarkId(url string, userId int) int {
var bookmarkId int
err := database.QueryRow("SELECT id FROM bookmarks WHERE url=$1 AND userid=$2",url,userId).Scan(&bookmarkId)
fmt.Println(err)
return bookmarkId
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
func getUserName (w http.ResponseWriter,r *http.Request) string {
var username string
username, _, ok := r.BasicAuth()
if !ok {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return ""
}
fmt.Println("username retrieved from session")
user := getUser(session)
username = user.Username
return username
}
fmt.Println("username retrieved from basic auth")
return username
}
func initDB(){
database, _ = sql.Open("sqlite3", "./cisco.db")
createUsersTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS users (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT," +
"username VARCHAR (20) NOT NULL, " +
"password VARCHAR (20) NOT NULL)")
createUsersTable.Exec()
createBookmarksTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS bookmarks " +
"(id INTEGER PRIMARY KEY AUTOINCREMENT," +
"url VARCHAR (70) , userid REFERENCES users(id),CONSTRAINT unq UNIQUE (url, userid))")
createBookmarksTable.Exec()
createTagsTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS tags (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT, " +
"name VARCHAR (20) NOT NULL UNIQUE)")
createTagsTable.Exec()
createTagbookmarkTable, _ := database.Prepare( "CREATE TABLE IF NOT EXISTS tag_bookmark " +
"(tagid INTEGER, bookmarkid INTEGER, UNIQUE (tagid, bookmarkid) )")
//createTagbookmarkTable, _ := database.Prepare( "CREATE TABLE IF NOT EXISTS tag_bookmark " +
// "(tagid REFERENCES tags(id), bookmarkid REFERENCES bookmarks(id), PRIMARY KEY(tagid, bookmarkid) )")
createTagbookmarkTable.Exec()
fmt.Println("DB initialized")
}
func initSession(){
authKeyOne := securecookie.GenerateRandomKey(64)
encryptionKeyOne := securecookie.GenerateRandomKey(32)
store = sessions.NewCookieStore(
authKeyOne,
encryptionKeyOne,
)
store.Options = &sessions.Options{
MaxAge: 600 * 15,
HttpOnly: true,
}
gob.Register(User{})
tpl = template.Must(template.ParseGlob("templates/*.gohtml"))
}
func main() {
initDB()
initSession()
handleRequests()
} | }
return dbUser.Password
} | random_line_split |
main.go | package main
import (
"database/sql"
"encoding/gob"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/gorilla/securecookie"
"github.com/gorilla/sessions"
_ "github.com/mattn/go-sqlite3"
"log"
"net/http"
"strings"
"text/template"
)
var database *sql.DB
var store *sessions.CookieStore
var tpl *template.Template
type User struct {
Username string
Password string
Authenticated bool
}
type UserData struct {
Id int `db:id`
Username string `json:"username", db:"username"`
Password string `json:"password", db:"password"`
}
type Bookmark struct {
Url string `json:"url", db:"url"`
TagName string `json:"tag", db:"tagName"`
BookmarkId int `db:"bookmarkId"`
TagId int `db:tagId`
}
func handleRequests(){
router := mux.NewRouter()
router.HandleFunc("/", index)
router.HandleFunc("/add_user", AddUser).Methods("POST")
router.HandleFunc("/login", login).Methods("POST")
router.HandleFunc("/logout", AuthMiddleware(logout)).Methods("GET")
router.HandleFunc("/add_bookmark", AuthMiddleware(AddBookmark)).Methods("POST")
router.HandleFunc("/update_bookmark", AuthMiddleware(UpdateBookmark)).Methods("PUT")
router.HandleFunc("/delete_tag", AuthMiddleware(DeleteTag)).Methods("DELETE")
router.HandleFunc("/list_bookmarks", AuthMiddleware(ListBookmarks)).Methods("GET")
log.Fatal(http.ListenAndServe(":10000", router))
}
func getUser(s *sessions.Session) User {
val := s.Values["user"]
fmt.Println("val",val)
user, ok := val.(User)
if !ok {
fmt.Println("did not find user session")
return User{Authenticated: false}
}
fmt.Println(val.(User))
fmt.Println("user.username",user.Username)
return user
}
//if basic auth headers exists, proceed to pass request to services
//if not, check if session user is authenticated
func AuthMiddleware(handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
username, password, _ := r.BasicAuth()
fmt.Println(r.BasicAuth())
if username=="" || !checkUsernameAndPassword(username, password) {
//w.Header().Set("WWW-Authenticate", `Basic realm="Please enter your username and password for this site"`)
//w.WriteHeader(401)
//w.Write([]byte("Unauthorised.\n"))
//w.Write([]byte("checking session instead.\n"))
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
user := getUser(session)
fmt.Println(user)
if auth := user.Authenticated; !auth {
session.AddFlash("You don't have access!")
err = session.Save(r, w)
if err != nil {
fmt.Printf("You don't have access!")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("You don't have access!")
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
fmt.Println("authenticated via user session")
handler(w, r)
return
}
fmt.Println("authenticated via basic auth")
handler(w, r)
}
}
func checkUsernameAndPassword(username, password string) bool {
fmt.Println("[checkUsernameAndPassword]")
correctPassword := retrieveUserPassword(username)
return password == correctPassword
}
func index(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
}
user := getUser(session)
fmt.Println("[serving main page]",user)
tpl.ExecuteTemplate(w, "index.gohtml", user)
}
func AddUser( w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := r.FormValue("password")
res , _ := database.Exec("INSERT INTO users(username,password) VALUES (?,?)",username,password)
fmt.Println(res)
fmt.Fprintf(w, "User successfully added")
http.Redirect(w, r, "/", http.StatusFound)
}
func login(w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := retrieveUserPassword(username)
session, err := store.Get(r, "cookie-name")
if err != nil {
}
// Where authentication could be done
if r.FormValue("password") != password {
if r.FormValue("password") == "" {
session.AddFlash("Must enter a password")
}
session.AddFlash("The password was incorrect")
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
user := &User{
Username: username,
Password: password,
Authenticated: true,
}
session.Values["user"] = user
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("login successful")
//expiration := time.Now().Add(365 * 24 * time.Hour)
//cookie := http.Cookie{Name: "username", Value: username, Expires: expiration}
//http.SetCookie(w, &cookie)
//cookie2:= http.Cookie{Name: "password", Value: password, Expires: expiration}
//http.SetCookie(w, &cookie2)
http.Redirect(w, r, "/", http.StatusFound)
}
func logout(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["user"] = User{}
session.Options.MaxAge = -1
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func AddBookmark( w http.ResponseWriter, r *http.Request){
fmt.Println("add bookmark service reached")
url := r.FormValue("url")
username := getUserName(w,r)
userId := retrieveUserId(username)
fmt.Println(url," inserted for user", username )
res, _ := database.Exec("INSERT INTO bookmarks(url,userid) VALUES(?,?)",url,userId)
fmt.Println(res)
http.Redirect(w, r, "/", http.StatusFound)
//fmt.Fprintf(w, "Bookmark added")
}
func UpdateBookmark(w http.ResponseWriter, r *http.Request){
fmt.Println("update bookmark service hit")
url := r.FormValue("url")
tagname := r.FormValue("tagname")
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
res, _ := database.Exec("INSERT OR IGNORE INTO tags(name) VALUES(?)",tagname)
if res != nil {}
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1",tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
fmt.Println(tagId,bookmarkId)
fmt.Println(err)
res2, _ := database.Exec("INSERT INTO tag_bookmark(tagid,bookmarkid) VALUES(?,?)",tagId,bookmarkId)
if res2 != nil {}
fmt.Fprintf(w, "tag added")
}else {
fmt.Fprintf(w, "password incorrect")
}
}
func DeleteTag(w http.ResponseWriter, r *http.Request){
fmt.Println("deleting tag from bookmark")
fmt.Println(r)
tagname := r.URL.Query().Get("tag")
url := r.URL.Query().Get("url")
//url := r.FormValue("url")
//tagname := r.FormValue("tagname")
fmt.Println(url,tagname)
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1", tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
res2, _ := database.Exec("DELETE FROM tag_bookmark WHERE tagid=$1 AND bookmarkId=$2", tagId, bookmarkId)
fmt.Println(res2)
fmt.Fprintf(w, "tag deleted")
}
}
func ListBookmarks(w http.ResponseWriter, r *http.Request) {
fmt.Println("ListBookmarks service hit")
tags := r.URL.Query().Get("tags")
fmt.Println("tags:",tags)
username := getUserName(w,r)
fmt.Println("username:",username)
userId := retrieveUserId(username)
fmt.Println("userId:",userId)
var bookmark Bookmark
queryString := fmt.Sprintf("SELECT bookmarks.id as bookmarkId, bookmarks.url as url, " +
"coalesce(tags.name,'') AS tagName, coalesce(tags.id,0) AS tagId FROM bookmarks " +
"LEFT JOIN tag_bookmark " +
"ON tag_bookmark.bookmarkid = bookmarks.id LEFT JOIN tags " +
"ON tags.id = tag_bookmark.tagid WHERE bookmarks.userid=%d", userId)
//if tags != "" {
// oldQueryString := queryString
// queryString = fmt.Sprintf(oldQueryString+" AND tagName IN (%s)", "'"+
// strings.Replace(tags, ",", "','", -1)+"'")
//}
rows, err := database.Query(queryString)
if err != nil && err != sql.ErrNoRows {
// log the error
fmt.Fprintf(w, "" )
return
}
defer rows.Close()
bookmarkResults := make(map[string][]string)
//tagResults := make(map[int][]string)
for rows.Next(){
err := rows.Scan(&bookmark.BookmarkId,&bookmark.Url,&bookmark.TagName,&bookmark.TagId)
if err != nil && err != sql.ErrNoRows {
// log the error
}
//bookmarkRecord := []string{bookmark.tagId,bookmark.tagName}
//bookmarkRecord[bookmark.tagId]=bookmark.tagName
//bookmarkResults[bookmark.url] = append(bookmarkResults[bookmark.url], bookmarkRecord)
bookmarkResults[bookmark.Url] = append(bookmarkResults[bookmark.Url], bookmark.TagName)
//fmt.Println(bookmark.Url, bookmark.TagName)
}
for key := range bookmarkResults {
if len(tags) != 0 {
//&& !strings.Contains(strings.Join(bookmarkResults[key], ","),tags)
tagsList := strings.Split(tags, ",")
//tagName = bookmarkResults[key]
for _, tag := range tagsList {
if !stringInSlice(tag,bookmarkResults[key]){
delete(bookmarkResults, key)
}
}
}
}
type bookmarkJson struct {
Url string `json:"url"`
TagName []string `json:"tagName"`
}
var data []bookmarkJson
for key := range bookmarkResults {
bookmarkj := bookmarkJson{Url: key, TagName: bookmarkResults[key]}
data=append(data, bookmarkj )
}
bJsondata, _ := json.Marshal(data)
jsonData := string(bJsondata)
fmt.Println(jsonData)
fmt.Fprintf(w, jsonData )
}
func retrieveUserPassword(username string) string {
var dbUser UserData
err := database.QueryRow("SELECT password, id FROM users WHERE username=$1", username).
Scan(&dbUser.Password,&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserPassword] user not found in DB",username)
panic(err)
}
return dbUser.Password
}
func retrieveUserId(username string) int {
var dbUser UserData
err := database.QueryRow("SELECT id FROM users WHERE username=$1", username).
Scan(&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserId] user not found in DB",username)
panic(err)
}
return dbUser.Id
}
func retrieveBookmarkId(url string, userId int) int {
var bookmarkId int
err := database.QueryRow("SELECT id FROM bookmarks WHERE url=$1 AND userid=$2",url,userId).Scan(&bookmarkId)
fmt.Println(err)
return bookmarkId
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
func getUserName (w http.ResponseWriter,r *http.Request) string {
var username string
username, _, ok := r.BasicAuth()
if !ok {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return ""
}
fmt.Println("username retrieved from session")
user := getUser(session)
username = user.Username
return username
}
fmt.Println("username retrieved from basic auth")
return username
}
func | (){
database, _ = sql.Open("sqlite3", "./cisco.db")
createUsersTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS users (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT," +
"username VARCHAR (20) NOT NULL, " +
"password VARCHAR (20) NOT NULL)")
createUsersTable.Exec()
createBookmarksTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS bookmarks " +
"(id INTEGER PRIMARY KEY AUTOINCREMENT," +
"url VARCHAR (70) , userid REFERENCES users(id),CONSTRAINT unq UNIQUE (url, userid))")
createBookmarksTable.Exec()
createTagsTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS tags (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT, " +
"name VARCHAR (20) NOT NULL UNIQUE)")
createTagsTable.Exec()
createTagbookmarkTable, _ := database.Prepare( "CREATE TABLE IF NOT EXISTS tag_bookmark " +
"(tagid INTEGER, bookmarkid INTEGER, UNIQUE (tagid, bookmarkid) )")
//createTagbookmarkTable, _ := database.Prepare( "CREATE TABLE IF NOT EXISTS tag_bookmark " +
// "(tagid REFERENCES tags(id), bookmarkid REFERENCES bookmarks(id), PRIMARY KEY(tagid, bookmarkid) )")
createTagbookmarkTable.Exec()
fmt.Println("DB initialized")
}
func initSession(){
authKeyOne := securecookie.GenerateRandomKey(64)
encryptionKeyOne := securecookie.GenerateRandomKey(32)
store = sessions.NewCookieStore(
authKeyOne,
encryptionKeyOne,
)
store.Options = &sessions.Options{
MaxAge: 600 * 15,
HttpOnly: true,
}
gob.Register(User{})
tpl = template.Must(template.ParseGlob("templates/*.gohtml"))
}
func main() {
initDB()
initSession()
handleRequests()
}
| initDB | identifier_name |
main.go | package main
import (
"database/sql"
"encoding/gob"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/gorilla/securecookie"
"github.com/gorilla/sessions"
_ "github.com/mattn/go-sqlite3"
"log"
"net/http"
"strings"
"text/template"
)
var database *sql.DB
var store *sessions.CookieStore
var tpl *template.Template
type User struct {
Username string
Password string
Authenticated bool
}
type UserData struct {
Id int `db:id`
Username string `json:"username", db:"username"`
Password string `json:"password", db:"password"`
}
type Bookmark struct {
Url string `json:"url", db:"url"`
TagName string `json:"tag", db:"tagName"`
BookmarkId int `db:"bookmarkId"`
TagId int `db:tagId`
}
func handleRequests(){
router := mux.NewRouter()
router.HandleFunc("/", index)
router.HandleFunc("/add_user", AddUser).Methods("POST")
router.HandleFunc("/login", login).Methods("POST")
router.HandleFunc("/logout", AuthMiddleware(logout)).Methods("GET")
router.HandleFunc("/add_bookmark", AuthMiddleware(AddBookmark)).Methods("POST")
router.HandleFunc("/update_bookmark", AuthMiddleware(UpdateBookmark)).Methods("PUT")
router.HandleFunc("/delete_tag", AuthMiddleware(DeleteTag)).Methods("DELETE")
router.HandleFunc("/list_bookmarks", AuthMiddleware(ListBookmarks)).Methods("GET")
log.Fatal(http.ListenAndServe(":10000", router))
}
func getUser(s *sessions.Session) User {
val := s.Values["user"]
fmt.Println("val",val)
user, ok := val.(User)
if !ok {
fmt.Println("did not find user session")
return User{Authenticated: false}
}
fmt.Println(val.(User))
fmt.Println("user.username",user.Username)
return user
}
//if basic auth headers exists, proceed to pass request to services
//if not, check if session user is authenticated
func AuthMiddleware(handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
username, password, _ := r.BasicAuth()
fmt.Println(r.BasicAuth())
if username=="" || !checkUsernameAndPassword(username, password) {
//w.Header().Set("WWW-Authenticate", `Basic realm="Please enter your username and password for this site"`)
//w.WriteHeader(401)
//w.Write([]byte("Unauthorised.\n"))
//w.Write([]byte("checking session instead.\n"))
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
user := getUser(session)
fmt.Println(user)
if auth := user.Authenticated; !auth {
session.AddFlash("You don't have access!")
err = session.Save(r, w)
if err != nil {
fmt.Printf("You don't have access!")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("You don't have access!")
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
fmt.Println("authenticated via user session")
handler(w, r)
return
}
fmt.Println("authenticated via basic auth")
handler(w, r)
}
}
func checkUsernameAndPassword(username, password string) bool {
fmt.Println("[checkUsernameAndPassword]")
correctPassword := retrieveUserPassword(username)
return password == correctPassword
}
func index(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
}
user := getUser(session)
fmt.Println("[serving main page]",user)
tpl.ExecuteTemplate(w, "index.gohtml", user)
}
func AddUser( w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := r.FormValue("password")
res , _ := database.Exec("INSERT INTO users(username,password) VALUES (?,?)",username,password)
fmt.Println(res)
fmt.Fprintf(w, "User successfully added")
http.Redirect(w, r, "/", http.StatusFound)
}
func login(w http.ResponseWriter, r *http.Request) {
username := r.FormValue("username")
password := retrieveUserPassword(username)
session, err := store.Get(r, "cookie-name")
if err != nil {
}
// Where authentication could be done
if r.FormValue("password") != password {
if r.FormValue("password") == "" {
session.AddFlash("Must enter a password")
}
session.AddFlash("The password was incorrect")
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/forbidden", http.StatusFound)
return
}
user := &User{
Username: username,
Password: password,
Authenticated: true,
}
session.Values["user"] = user
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Printf("login successful")
//expiration := time.Now().Add(365 * 24 * time.Hour)
//cookie := http.Cookie{Name: "username", Value: username, Expires: expiration}
//http.SetCookie(w, &cookie)
//cookie2:= http.Cookie{Name: "password", Value: password, Expires: expiration}
//http.SetCookie(w, &cookie2)
http.Redirect(w, r, "/", http.StatusFound)
}
func logout(w http.ResponseWriter, r *http.Request) {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values["user"] = User{}
session.Options.MaxAge = -1
err = session.Save(r, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/", http.StatusFound)
}
func AddBookmark( w http.ResponseWriter, r *http.Request){
fmt.Println("add bookmark service reached")
url := r.FormValue("url")
username := getUserName(w,r)
userId := retrieveUserId(username)
fmt.Println(url," inserted for user", username )
res, _ := database.Exec("INSERT INTO bookmarks(url,userid) VALUES(?,?)",url,userId)
fmt.Println(res)
http.Redirect(w, r, "/", http.StatusFound)
//fmt.Fprintf(w, "Bookmark added")
}
func UpdateBookmark(w http.ResponseWriter, r *http.Request){
fmt.Println("update bookmark service hit")
url := r.FormValue("url")
tagname := r.FormValue("tagname")
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
res, _ := database.Exec("INSERT OR IGNORE INTO tags(name) VALUES(?)",tagname)
if res != nil {}
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1",tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
fmt.Println(tagId,bookmarkId)
fmt.Println(err)
res2, _ := database.Exec("INSERT INTO tag_bookmark(tagid,bookmarkid) VALUES(?,?)",tagId,bookmarkId)
if res2 != nil {}
fmt.Fprintf(w, "tag added")
}else {
fmt.Fprintf(w, "password incorrect")
}
}
func DeleteTag(w http.ResponseWriter, r *http.Request){
fmt.Println("deleting tag from bookmark")
fmt.Println(r)
tagname := r.URL.Query().Get("tag")
url := r.URL.Query().Get("url")
//url := r.FormValue("url")
//tagname := r.FormValue("tagname")
fmt.Println(url,tagname)
username := getUserName(w,r)
userId := retrieveUserId(username)
if userId != 0 {
bookmarkId := retrieveBookmarkId(url, userId)
var tagId int
err := database.QueryRow("SELECT id from tags WHERE name=$1", tagname).Scan(&tagId)
if err != nil {
fmt.Println(err)
}
res2, _ := database.Exec("DELETE FROM tag_bookmark WHERE tagid=$1 AND bookmarkId=$2", tagId, bookmarkId)
fmt.Println(res2)
fmt.Fprintf(w, "tag deleted")
}
}
func ListBookmarks(w http.ResponseWriter, r *http.Request) {
fmt.Println("ListBookmarks service hit")
tags := r.URL.Query().Get("tags")
fmt.Println("tags:",tags)
username := getUserName(w,r)
fmt.Println("username:",username)
userId := retrieveUserId(username)
fmt.Println("userId:",userId)
var bookmark Bookmark
queryString := fmt.Sprintf("SELECT bookmarks.id as bookmarkId, bookmarks.url as url, " +
"coalesce(tags.name,'') AS tagName, coalesce(tags.id,0) AS tagId FROM bookmarks " +
"LEFT JOIN tag_bookmark " +
"ON tag_bookmark.bookmarkid = bookmarks.id LEFT JOIN tags " +
"ON tags.id = tag_bookmark.tagid WHERE bookmarks.userid=%d", userId)
//if tags != "" {
// oldQueryString := queryString
// queryString = fmt.Sprintf(oldQueryString+" AND tagName IN (%s)", "'"+
// strings.Replace(tags, ",", "','", -1)+"'")
//}
rows, err := database.Query(queryString)
if err != nil && err != sql.ErrNoRows {
// log the error
fmt.Fprintf(w, "" )
return
}
defer rows.Close()
bookmarkResults := make(map[string][]string)
//tagResults := make(map[int][]string)
for rows.Next(){
err := rows.Scan(&bookmark.BookmarkId,&bookmark.Url,&bookmark.TagName,&bookmark.TagId)
if err != nil && err != sql.ErrNoRows {
// log the error
}
//bookmarkRecord := []string{bookmark.tagId,bookmark.tagName}
//bookmarkRecord[bookmark.tagId]=bookmark.tagName
//bookmarkResults[bookmark.url] = append(bookmarkResults[bookmark.url], bookmarkRecord)
bookmarkResults[bookmark.Url] = append(bookmarkResults[bookmark.Url], bookmark.TagName)
//fmt.Println(bookmark.Url, bookmark.TagName)
}
for key := range bookmarkResults {
if len(tags) != 0 {
//&& !strings.Contains(strings.Join(bookmarkResults[key], ","),tags)
tagsList := strings.Split(tags, ",")
//tagName = bookmarkResults[key]
for _, tag := range tagsList {
if !stringInSlice(tag,bookmarkResults[key]){
delete(bookmarkResults, key)
}
}
}
}
type bookmarkJson struct {
Url string `json:"url"`
TagName []string `json:"tagName"`
}
var data []bookmarkJson
for key := range bookmarkResults {
bookmarkj := bookmarkJson{Url: key, TagName: bookmarkResults[key]}
data=append(data, bookmarkj )
}
bJsondata, _ := json.Marshal(data)
jsonData := string(bJsondata)
fmt.Println(jsonData)
fmt.Fprintf(w, jsonData )
}
func retrieveUserPassword(username string) string {
var dbUser UserData
err := database.QueryRow("SELECT password, id FROM users WHERE username=$1", username).
Scan(&dbUser.Password,&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserPassword] user not found in DB",username)
panic(err)
}
return dbUser.Password
}
func retrieveUserId(username string) int {
var dbUser UserData
err := database.QueryRow("SELECT id FROM users WHERE username=$1", username).
Scan(&dbUser.Id)
if err != nil {
fmt.Println("[retrieveUserId] user not found in DB",username)
panic(err)
}
return dbUser.Id
}
func retrieveBookmarkId(url string, userId int) int {
var bookmarkId int
err := database.QueryRow("SELECT id FROM bookmarks WHERE url=$1 AND userid=$2",url,userId).Scan(&bookmarkId)
fmt.Println(err)
return bookmarkId
}
func stringInSlice(a string, list []string) bool {
for _, b := range list |
return false
}
func getUserName (w http.ResponseWriter,r *http.Request) string {
var username string
username, _, ok := r.BasicAuth()
if !ok {
session, err := store.Get(r, "cookie-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return ""
}
fmt.Println("username retrieved from session")
user := getUser(session)
username = user.Username
return username
}
fmt.Println("username retrieved from basic auth")
return username
}
func initDB(){
database, _ = sql.Open("sqlite3", "./cisco.db")
createUsersTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS users (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT," +
"username VARCHAR (20) NOT NULL, " +
"password VARCHAR (20) NOT NULL)")
createUsersTable.Exec()
createBookmarksTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS bookmarks " +
"(id INTEGER PRIMARY KEY AUTOINCREMENT," +
"url VARCHAR (70) , userid REFERENCES users(id),CONSTRAINT unq UNIQUE (url, userid))")
createBookmarksTable.Exec()
createTagsTable, _ := database.Prepare("CREATE TABLE IF NOT EXISTS tags (" +
"id INTEGER PRIMARY KEY AUTOINCREMENT, " +
"name VARCHAR (20) NOT NULL UNIQUE)")
createTagsTable.Exec()
createTagbookmarkTable, _ := database.Prepare( "CREATE TABLE IF NOT EXISTS tag_bookmark " +
"(tagid INTEGER, bookmarkid INTEGER, UNIQUE (tagid, bookmarkid) )")
//createTagbookmarkTable, _ := database.Prepare( "CREATE TABLE IF NOT EXISTS tag_bookmark " +
// "(tagid REFERENCES tags(id), bookmarkid REFERENCES bookmarks(id), PRIMARY KEY(tagid, bookmarkid) )")
createTagbookmarkTable.Exec()
fmt.Println("DB initialized")
}
func initSession(){
authKeyOne := securecookie.GenerateRandomKey(64)
encryptionKeyOne := securecookie.GenerateRandomKey(32)
store = sessions.NewCookieStore(
authKeyOne,
encryptionKeyOne,
)
store.Options = &sessions.Options{
MaxAge: 600 * 15,
HttpOnly: true,
}
gob.Register(User{})
tpl = template.Must(template.ParseGlob("templates/*.gohtml"))
}
func main() {
initDB()
initSession()
handleRequests()
}
| {
if b == a {
return true
}
} | conditional_block |
alpha_beta.py | import numpy as np
#!pip install pygame
import pygame
#from copy import deepcopy
pygame.init()
#-----------
# Modifications (Matthieu, 15/04):
# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.
# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)
# Les indices de la liste correspondant à chaque coupe sont par exemple :
# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)
# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy
# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)
# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout
# Algo alpha beta
# Pbs :
# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini
# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de %
# sont gagnées par l'ia contre un algo qui joue aléatoirement
# Améliorer la fonction d'évaluation qui est pour l'instant très basique
##-------------
# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),
# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.
# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.
# A chaque tour, le joueur doit choisir un numéro de coupelle.
# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.
#
# modifs du 17.03 par Léo:
# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé
# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant
# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.
#Notions de classe:
#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes
#Explication de l'algorithme minimax général (page 52) :
#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):
#cas où la coupelle n'existe pas, ou correspond à un coup non admissible
print("Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.")
nCoupe = int(input())
self.deplacer(joueur,nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur,-np.inf,np.inf)
for idCoupe in self.arbreFils.keys():
print("coupe = ",idCoupe," : valeur = ",self.arbreFils[idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value==self.arbreFils[idCoupe].value:
self.deplacer(joueur,idCoupe)
break
self.jouer()
def partieFinie(self):
#True si le plateau ne contient plus aucune graine
limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit
self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste
def afficherScores(self):
print("score J1........."+str(self.scores[0]))
print("score MinMax....."+str(self.scores[1]))
def evaluation(self,joueur):
adversaire = (joueur+1)%2
return self.scores[joueur]-self.scores[adversaire]
#Fonction principale
def jouer(self):
if (not self.partieFinie()) :
self.afficherPlateau()
self.afficherScores()
if (self.tour==0):
self.tourDuJoueur()
else:
self.tourOrdi()
print("\n")
else:
self.afficherPlateau()
self.afficherScores()
print("Partie Finie !")
#plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta
def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
| #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
#coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible
if self.tour==joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha,self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta,self.value)
return self.value
t = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)
t.jouer() | #si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.minimax(joueurMaximisant)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): | identifier_body |
alpha_beta.py | import numpy as np
#!pip install pygame
import pygame
#from copy import deepcopy
pygame.init()
#-----------
# Modifications (Matthieu, 15/04):
# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.
# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)
# Les indices de la liste correspondant à chaque coupe sont par exemple :
# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)
# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy
# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)
# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout
# Algo alpha beta
# Pbs :
# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini
# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de %
# sont gagnées par l'ia contre un algo qui joue aléatoirement
# Améliorer la fonction d'évaluation qui est pour l'instant très basique
##-------------
# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),
# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.
# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.
# A chaque tour, le joueur doit choisir un numéro de coupelle.
# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.
#
# modifs du 17.03 par Léo:
# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé
# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant
# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.
#Notions de classe:
#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes
#Explication de l'algorithme minimax général (page 52) :
#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
| #on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):
#cas où la coupelle n'existe pas, ou correspond à un coup non admissible
print("Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.")
nCoupe = int(input())
self.deplacer(joueur,nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur,-np.inf,np.inf)
for idCoupe in self.arbreFils.keys():
print("coupe = ",idCoupe," : valeur = ",self.arbreFils[idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value==self.arbreFils[idCoupe].value:
self.deplacer(joueur,idCoupe)
break
self.jouer()
def partieFinie(self):
#True si le plateau ne contient plus aucune graine
limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit
self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste
def afficherScores(self):
print("score J1........."+str(self.scores[0]))
print("score MinMax....."+str(self.scores[1]))
def evaluation(self,joueur):
adversaire = (joueur+1)%2
return self.scores[joueur]-self.scores[adversaire]
#Fonction principale
def jouer(self):
if (not self.partieFinie()) :
self.afficherPlateau()
self.afficherScores()
if (self.tour==0):
self.tourDuJoueur()
else:
self.tourOrdi()
print("\n")
else:
self.afficherPlateau()
self.afficherScores()
print("Partie Finie !")
#plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta
def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.minimax(joueurMaximisant)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
#coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible
if self.tour==joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha,self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta,self.value)
return self.value
t = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)
t.jouer() | coupesAdmissibles = []
| random_line_split |
alpha_beta.py | import numpy as np
#!pip install pygame
import pygame
#from copy import deepcopy
pygame.init()
#-----------
# Modifications (Matthieu, 15/04):
# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.
# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)
# Les indices de la liste correspondant à chaque coupe sont par exemple :
# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)
# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy
# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)
# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout
# Algo alpha beta
# Pbs :
# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini
# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de %
# sont gagnées par l'ia contre un algo qui joue aléatoirement
# Améliorer la fonction d'évaluation qui est pour l'instant très basique
##-------------
# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),
# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.
# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.
# A chaque tour, le joueur doit choisir un numéro de coupelle.
# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.
#
# modifs du 17.03 par Léo:
# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé
# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant
# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.
#Notions de classe:
#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes
#Explication de l'algorithme minimax général (page 52) :
#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2 | .grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):
#cas où la coupelle n'existe pas, ou correspond à un coup non admissible
print("Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.")
nCoupe = int(input())
self.deplacer(joueur,nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur,-np.inf,np.inf)
for idCoupe in self.arbreFils.keys():
print("coupe = ",idCoupe," : valeur = ",self.arbreFils[idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value==self.arbreFils[idCoupe].value:
self.deplacer(joueur,idCoupe)
break
self.jouer()
def partieFinie(self):
#True si le plateau ne contient plus aucune graine
limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit
self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste
def afficherScores(self):
print("score J1........."+str(self.scores[0]))
print("score MinMax....."+str(self.scores[1]))
def evaluation(self,joueur):
adversaire = (joueur+1)%2
return self.scores[joueur]-self.scores[adversaire]
#Fonction principale
def jouer(self):
if (not self.partieFinie()) :
self.afficherPlateau()
self.afficherScores()
if (self.tour==0):
self.tourDuJoueur()
else:
self.tourOrdi()
print("\n")
else:
self.afficherPlateau()
self.afficherScores()
print("Partie Finie !")
#plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta
def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.minimax(joueurMaximisant)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
#coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible
if self.tour==joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha,self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta,self.value)
return self.value
t = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)
t.jouer() |
if self | identifier_name |
alpha_beta.py | import numpy as np
#!pip install pygame
import pygame
#from copy import deepcopy
pygame.init()
#-----------
# Modifications (Matthieu, 15/04):
# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.
# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)
# Les indices de la liste correspondant à chaque coupe sont par exemple :
# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)
# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy
# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)
# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout
# Algo alpha beta
# Pbs :
# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini
# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de %
# sont gagnées par l'ia contre un algo qui joue aléatoirement
# Améliorer la fonction d'évaluation qui est pour l'instant très basique
##-------------
# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),
# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.
# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.
# A chaque tour, le joueur doit choisir un numéro de coupelle.
# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.
#
# modifs du 17.03 par Léo:
# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé
# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant
# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.
#Notions de classe:
#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes
#Explication de l'algorithme minimax général (page 52) :
#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
| mpte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):
#cas où la coupelle n'existe pas, ou correspond à un coup non admissible
print("Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.")
nCoupe = int(input())
self.deplacer(joueur,nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur,-np.inf,np.inf)
for idCoupe in self.arbreFils.keys():
print("coupe = ",idCoupe," : valeur = ",self.arbreFils[idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value==self.arbreFils[idCoupe].value:
self.deplacer(joueur,idCoupe)
break
self.jouer()
def partieFinie(self):
#True si le plateau ne contient plus aucune graine
limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit
self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste
def afficherScores(self):
print("score J1........."+str(self.scores[0]))
print("score MinMax....."+str(self.scores[1]))
def evaluation(self,joueur):
adversaire = (joueur+1)%2
return self.scores[joueur]-self.scores[adversaire]
#Fonction principale
def jouer(self):
if (not self.partieFinie()) :
self.afficherPlateau()
self.afficherScores()
if (self.tour==0):
self.tourDuJoueur()
else:
self.tourOrdi()
print("\n")
else:
self.afficherPlateau()
self.afficherScores()
print("Partie Finie !")
#plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta
def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.minimax(joueurMaximisant)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
#coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible
if self.tour==joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha,self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta,self.value)
return self.value
t = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)
t.jouer() | while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On co | conditional_block |
minilab.py | #!/usr/bin/python
"""
Minilab is a network lab simulator based on
mininet. Its goal is to provide an easy way
to setup and test any kind of complex network.
"""
import subprocess
import shutil
import shlex
import argparse
import yaml
import os
import sys
import glob
from mininet.net import Mininet
from mininet.node import Host, OVSSwitch, RemoteController
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from jinja2 import FileSystemLoader, Environment
from nat import *
class ManageableHost(Host):
def __init__(self, name, ip=None, inNamespace=True,
root_dir=None, ssh_template=None,
auth_keys=None, **kwargs):
self.name = name
self.ssh_template = ssh_template
self.auth_keys = auth_keys
self.root_dir = root_dir
self.ssh_pid_file = None
self.mounted_dirs = []
Host.__init__(self, name, inNamespace, **kwargs)
def list_processes(self):
process_list = []
my_ns_symlink = '/proc/%s/ns/net' % self.pid
for symlink in glob.glob('/proc/[1-9]*/ns/net'):
pid = None
try:
if os.path.samefile(my_ns_symlink, symlink):
pid = symlink.split('/')[2]
except:
pass
else:
if pid and int(pid) != self.pid:
process_list.append(pid)
return process_list
def stop_all_processes(self):
info('**** Stopping all remaining processes on %s\n' % self.name)
running_processes = self.list_processes()
for process in running_processes:
cmd = "kill -9 %s" % process
info('**** killing process id %s\n' % process)
subprocess.call(shlex.split(cmd))
def stop_processes(self):
self.stop_ssh_server()
self.stop_all_processes()
def create_ssh_config(self):
self.ssh_pid_file = os.path.join(self.root_dir, "var", "run",
"sshd.pid")
|
def copy_auth_keys(self):
ssh_dir = os.path.join(self.root_dir, 'root/.ssh')
if not os.path.exists(ssh_dir):
os.mkdir(ssh_dir, 0700)
key_file = open(self.auth_keys)
destination = open(os.path.join(ssh_dir, 'authorized_keys'), 'wb')
destination.write(key_file.read())
destination.close()
key_file.close()
def start_ssh_server(self):
if self.auth_keys:
self.copy_auth_keys()
ssh_config = self.create_ssh_config()
host_config_path = os.path.join(self.root_dir,
'etc/ssh/sshd_config')
sshf = open(host_config_path, 'wb')
sshf.write(ssh_config)
sshf.close()
info('**** Starting ssh server on %s\n' % self.name)
start_ssh = '/usr/sbin/sshd -f %s' % host_config_path
self.cmd(shlex.split(start_ssh))
def stop_ssh_server(self):
info('**** Stopping ssh server on %s\n' % self.name)
kill_ssh = "/bin/kill $(cat %s)" % self.ssh_pid_file
self.cmd(shlex.split(kill_ssh))
def clean_all(self):
pass
def mount_root_fs(hostname, lab_dir, root_fs):
info('**** Mounting filesystem for %s\n' % hostname)
if not os.path.exists(lab_dir):
os.mkdir(lab_dir)
host_dir = os.path.join(lab_dir, hostname)
work_dir = os.path.join(host_dir, 'work')
upper_dir = os.path.join(host_dir, 'upper')
merged_dir = os.path.join(host_dir, 'merged')
if not os.path.exists(host_dir):
os.mkdir(host_dir)
os.mkdir(work_dir)
os.mkdir(upper_dir)
os.mkdir(merged_dir)
cmd = "mount -t overlay overlay -o lowerdir=%s,upperdir=%s,workdir=%s %s" % \
(root_fs, upper_dir, work_dir, merged_dir)
mount_root = shlex.split(cmd)
subprocess.call(mount_root)
host_proc = os.path.join(merged_dir, 'proc')
cmd_p = "mount -t proc proc %s" % host_proc
mount_proc = shlex.split(cmd_p)
subprocess.call(mount_proc)
host_sys = os.path.join(merged_dir, 'sys')
cmd_s = "mount -t sysfs sysfs %s" % host_sys
mount_sys = shlex.split(cmd_s)
subprocess.call(mount_sys)
return merged_dir
def umount_root_fs(hostname, lab_dir):
info('**** Unmounting filesystem for %s\n' % hostname)
host_dir = os.path.join(lab_dir, hostname)
merged_dir = os.path.join(host_dir, 'merged')
host_proc = os.path.join(merged_dir, 'proc')
host_sys = os.path.join(merged_dir, 'sys')
for mount_point in [host_sys, host_proc, merged_dir]:
subprocess.call(shlex.split("umount %s" % mount_point))
# fixme: currently need to umount /sys
# subprocess.call(shlex.split("umount %s" % '/sys'))
def load_config(config_file):
cfg = open(config_file)
config = yaml.load(cfg)
cfg.close()
return config
def setup_controllers(net, topology):
for controller in topology['controllers']:
ctrl = RemoteController(controller['name'],
ip=controller['ip'],
port=controller['port'])
info('*** Adding controller\n')
net.addController(ctrl)
def setup_hosts(net, switches, config, topology):
info('*** Adding hosts\n')
hosts = {}
ssh_template = None
auth_keys = None
if 'ssh' in config:
template = config['ssh']['template']
tmpl_dir = config['ssh']['tmpl_dir']
env = Environment(loader=FileSystemLoader(tmpl_dir))
ssh_template = env.get_template(template)
if 'authorized_keys' in config['ssh']:
auth_keys = config['ssh']['authorized_keys']
for host in topology['hosts']:
if host['is_manageable']:
root_dir = mount_root_fs(host['name'], config['ml_dir'],
config['rootfs'])
new_host = net.addHost(host['name'], ip=None,
cls=ManageableHost,
root_dir=root_dir,
ssh_template=ssh_template,
auth_keys=auth_keys)
else:
new_host = net.addHost(host['name'])
for link in host['links']:
switch = switches[link['sw']]
lnk = net.addLink(new_host, switch)
if 'ip' in link:
ip, netmask = link['ip'].split('/')
new_host.setIP(ip, prefixLen=netmask, intf=lnk.intf1)
if 'gw' in host:
new_host.sendCmd('ip route add default via %s' % host['gw'])
new_host.waiting = False
if not host['name'] in hosts:
hosts[host['name']] = {'node': new_host, 'rootfs': new_host.name}
return hosts
def setup_switches(net, topology):
switches = {}
info('*** Adding switches\n')
# first loop : create switches
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
switches[switch['name']] = net.addSwitch(switch['name'],
dpid=switch['dpid'],
cls=OVSSwitch,
protocols=protocols)
# second loop: add links between switches
for switch in topology['switches']:
if 'links' in switch:
for peer in switch['links']:
net.addLink(switches[switch['name']],
switches[peer])
return switches
def setup_nat(net, topology):
node = None
if 'nat' in topology:
info('*** Setup nat gateway node\n')
node = connectToInternet(net,
switch=topology['nat']['switch']['name'],
node_name=topology['nat']['node']['name'],
ip_address=topology['nat']['node']['ip'])
info('** Starting nat\n')
startNAT(node,
inetIntf=topology['nat']['ext_iface'],
intIP=topology['nat']['node']['ip'])
return node
def fix_switch_protocols(topology):
""" force protocols versions as mininet < 2.2.0 is not doing its job"""
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
cmd = "ovs-vsctl set Bridge %s protocols=%s" % (switch['name'],
protocols)
subprocess.call(shlex.split(cmd))
def set_oob_switch_standalone(topology):
if 'nat' in topology:
switch = topology['nat']['switch']['name']
cmd = shlex.split("ovs-vsctl set-fail-mode %s standalone " % switch)
subprocess.call(cmd)
cmd2 = shlex.split("ovs-vsctl del-controller %s" % switch)
subprocess.call(cmd2)
def tear_down_nat(node):
info('** Stopping nat\n')
stopNAT(node)
def start(net, topology):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.start_ssh_server()
info('** Starting network\n')
net.start()
fix_switch_protocols(topology)
set_oob_switch_standalone(topology)
CLI(net)
def stop(net, config):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.stop_processes()
umount_root_fs(name, config['ml_dir'])
info('** Stopping network\n')
net.stop()
def setup_topo(config, topology):
nat_node = None
try:
net = Mininet(controller=RemoteController)
setup_controllers(net, topology)
switches = setup_switches(net, topology)
setup_hosts(net, switches, config, topology)
nat_node = setup_nat(net, topology)
start(net, topology)
except Exception, e:
info('** Error spawning topology\n')
print e
cleanup_all(config, topology)
sys.exit(1)
if nat_node:
tear_down_nat(nat_node)
stop(net, config)
def cleanup_all(config, topology, hard_cleanup=False):
for host in topology['hosts']:
if host['is_manageable']:
host_dir = os.path.join(config['ml_dir'], host['name'])
host_root_dir = os.path.join(host_dir, 'merged')
for directory in ['sys', 'proc']:
mount_point = os.path.join(host_root_dir, directory)
subprocess.call(shlex.split("umount %s" % mount_point))
subprocess.call(shlex.split("umount %s" % host_root_dir))
if hard_cleanup:
shutil.rmtree(host_dir)
# clean mininet
subprocess.call(shlex.split("mn -c"))
if __name__ == '__main__':
setLogLevel('info')
parser = argparse.ArgumentParser(description='Minilab arguments.')
parser.add_argument('--config', dest='config', type=str,
default='config.yaml',
help='minilab config file (default: config.yaml)')
parser.add_argument('topology', metavar='topology', type=str,
help='topology configuration file')
parser.add_argument('--cleanup', dest='cleanup', action='store_true',
help='cleanup minilab setup')
parser.add_argument('--reset', dest='hard_cleanup', action='store_true',
help='cleanup and destroy all hosts directories')
args = parser.parse_args()
minilab_config = load_config(args.config)
topo_config = load_config(args.topology)
cleanup = args.cleanup
if args.cleanup:
if args.hard_cleanup:
info('** Cleaning minilab from scratch\n')
cleanup_all(minilab_config, topo_config, hard_cleanup=True)
else:
info('** Cleaning minilab\n')
cleanup_all(minilab_config, topo_config)
info('** Cleaning done\n')
sys.exit(0)
setup_topo(minilab_config, topo_config) | return self.ssh_template.render(pid_file=self.ssh_pid_file,
host_dir=self.root_dir) | random_line_split |
minilab.py | #!/usr/bin/python
"""
Minilab is a network lab simulator based on
mininet. Its goal is to provide an easy way
to setup and test any kind of complex network.
"""
import subprocess
import shutil
import shlex
import argparse
import yaml
import os
import sys
import glob
from mininet.net import Mininet
from mininet.node import Host, OVSSwitch, RemoteController
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from jinja2 import FileSystemLoader, Environment
from nat import *
class ManageableHost(Host):
def __init__(self, name, ip=None, inNamespace=True,
root_dir=None, ssh_template=None,
auth_keys=None, **kwargs):
self.name = name
self.ssh_template = ssh_template
self.auth_keys = auth_keys
self.root_dir = root_dir
self.ssh_pid_file = None
self.mounted_dirs = []
Host.__init__(self, name, inNamespace, **kwargs)
def list_processes(self):
process_list = []
my_ns_symlink = '/proc/%s/ns/net' % self.pid
for symlink in glob.glob('/proc/[1-9]*/ns/net'):
pid = None
try:
if os.path.samefile(my_ns_symlink, symlink):
pid = symlink.split('/')[2]
except:
pass
else:
if pid and int(pid) != self.pid:
process_list.append(pid)
return process_list
def stop_all_processes(self):
info('**** Stopping all remaining processes on %s\n' % self.name)
running_processes = self.list_processes()
for process in running_processes:
cmd = "kill -9 %s" % process
info('**** killing process id %s\n' % process)
subprocess.call(shlex.split(cmd))
def stop_processes(self):
self.stop_ssh_server()
self.stop_all_processes()
def create_ssh_config(self):
self.ssh_pid_file = os.path.join(self.root_dir, "var", "run",
"sshd.pid")
return self.ssh_template.render(pid_file=self.ssh_pid_file,
host_dir=self.root_dir)
def copy_auth_keys(self):
ssh_dir = os.path.join(self.root_dir, 'root/.ssh')
if not os.path.exists(ssh_dir):
os.mkdir(ssh_dir, 0700)
key_file = open(self.auth_keys)
destination = open(os.path.join(ssh_dir, 'authorized_keys'), 'wb')
destination.write(key_file.read())
destination.close()
key_file.close()
def start_ssh_server(self):
if self.auth_keys:
self.copy_auth_keys()
ssh_config = self.create_ssh_config()
host_config_path = os.path.join(self.root_dir,
'etc/ssh/sshd_config')
sshf = open(host_config_path, 'wb')
sshf.write(ssh_config)
sshf.close()
info('**** Starting ssh server on %s\n' % self.name)
start_ssh = '/usr/sbin/sshd -f %s' % host_config_path
self.cmd(shlex.split(start_ssh))
def stop_ssh_server(self):
info('**** Stopping ssh server on %s\n' % self.name)
kill_ssh = "/bin/kill $(cat %s)" % self.ssh_pid_file
self.cmd(shlex.split(kill_ssh))
def clean_all(self):
pass
def mount_root_fs(hostname, lab_dir, root_fs):
info('**** Mounting filesystem for %s\n' % hostname)
if not os.path.exists(lab_dir):
os.mkdir(lab_dir)
host_dir = os.path.join(lab_dir, hostname)
work_dir = os.path.join(host_dir, 'work')
upper_dir = os.path.join(host_dir, 'upper')
merged_dir = os.path.join(host_dir, 'merged')
if not os.path.exists(host_dir):
os.mkdir(host_dir)
os.mkdir(work_dir)
os.mkdir(upper_dir)
os.mkdir(merged_dir)
cmd = "mount -t overlay overlay -o lowerdir=%s,upperdir=%s,workdir=%s %s" % \
(root_fs, upper_dir, work_dir, merged_dir)
mount_root = shlex.split(cmd)
subprocess.call(mount_root)
host_proc = os.path.join(merged_dir, 'proc')
cmd_p = "mount -t proc proc %s" % host_proc
mount_proc = shlex.split(cmd_p)
subprocess.call(mount_proc)
host_sys = os.path.join(merged_dir, 'sys')
cmd_s = "mount -t sysfs sysfs %s" % host_sys
mount_sys = shlex.split(cmd_s)
subprocess.call(mount_sys)
return merged_dir
def umount_root_fs(hostname, lab_dir):
info('**** Unmounting filesystem for %s\n' % hostname)
host_dir = os.path.join(lab_dir, hostname)
merged_dir = os.path.join(host_dir, 'merged')
host_proc = os.path.join(merged_dir, 'proc')
host_sys = os.path.join(merged_dir, 'sys')
for mount_point in [host_sys, host_proc, merged_dir]:
subprocess.call(shlex.split("umount %s" % mount_point))
# fixme: currently need to umount /sys
# subprocess.call(shlex.split("umount %s" % '/sys'))
def load_config(config_file):
cfg = open(config_file)
config = yaml.load(cfg)
cfg.close()
return config
def setup_controllers(net, topology):
for controller in topology['controllers']:
ctrl = RemoteController(controller['name'],
ip=controller['ip'],
port=controller['port'])
info('*** Adding controller\n')
net.addController(ctrl)
def setup_hosts(net, switches, config, topology):
info('*** Adding hosts\n')
hosts = {}
ssh_template = None
auth_keys = None
if 'ssh' in config:
template = config['ssh']['template']
tmpl_dir = config['ssh']['tmpl_dir']
env = Environment(loader=FileSystemLoader(tmpl_dir))
ssh_template = env.get_template(template)
if 'authorized_keys' in config['ssh']:
auth_keys = config['ssh']['authorized_keys']
for host in topology['hosts']:
if host['is_manageable']:
root_dir = mount_root_fs(host['name'], config['ml_dir'],
config['rootfs'])
new_host = net.addHost(host['name'], ip=None,
cls=ManageableHost,
root_dir=root_dir,
ssh_template=ssh_template,
auth_keys=auth_keys)
else:
new_host = net.addHost(host['name'])
for link in host['links']:
switch = switches[link['sw']]
lnk = net.addLink(new_host, switch)
if 'ip' in link:
ip, netmask = link['ip'].split('/')
new_host.setIP(ip, prefixLen=netmask, intf=lnk.intf1)
if 'gw' in host:
new_host.sendCmd('ip route add default via %s' % host['gw'])
new_host.waiting = False
if not host['name'] in hosts:
hosts[host['name']] = {'node': new_host, 'rootfs': new_host.name}
return hosts
def setup_switches(net, topology):
switches = {}
info('*** Adding switches\n')
# first loop : create switches
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
switches[switch['name']] = net.addSwitch(switch['name'],
dpid=switch['dpid'],
cls=OVSSwitch,
protocols=protocols)
# second loop: add links between switches
for switch in topology['switches']:
if 'links' in switch:
for peer in switch['links']:
net.addLink(switches[switch['name']],
switches[peer])
return switches
def setup_nat(net, topology):
node = None
if 'nat' in topology:
info('*** Setup nat gateway node\n')
node = connectToInternet(net,
switch=topology['nat']['switch']['name'],
node_name=topology['nat']['node']['name'],
ip_address=topology['nat']['node']['ip'])
info('** Starting nat\n')
startNAT(node,
inetIntf=topology['nat']['ext_iface'],
intIP=topology['nat']['node']['ip'])
return node
def fix_switch_protocols(topology):
|
def set_oob_switch_standalone(topology):
if 'nat' in topology:
switch = topology['nat']['switch']['name']
cmd = shlex.split("ovs-vsctl set-fail-mode %s standalone " % switch)
subprocess.call(cmd)
cmd2 = shlex.split("ovs-vsctl del-controller %s" % switch)
subprocess.call(cmd2)
def tear_down_nat(node):
info('** Stopping nat\n')
stopNAT(node)
def start(net, topology):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.start_ssh_server()
info('** Starting network\n')
net.start()
fix_switch_protocols(topology)
set_oob_switch_standalone(topology)
CLI(net)
def stop(net, config):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.stop_processes()
umount_root_fs(name, config['ml_dir'])
info('** Stopping network\n')
net.stop()
def setup_topo(config, topology):
nat_node = None
try:
net = Mininet(controller=RemoteController)
setup_controllers(net, topology)
switches = setup_switches(net, topology)
setup_hosts(net, switches, config, topology)
nat_node = setup_nat(net, topology)
start(net, topology)
except Exception, e:
info('** Error spawning topology\n')
print e
cleanup_all(config, topology)
sys.exit(1)
if nat_node:
tear_down_nat(nat_node)
stop(net, config)
def cleanup_all(config, topology, hard_cleanup=False):
for host in topology['hosts']:
if host['is_manageable']:
host_dir = os.path.join(config['ml_dir'], host['name'])
host_root_dir = os.path.join(host_dir, 'merged')
for directory in ['sys', 'proc']:
mount_point = os.path.join(host_root_dir, directory)
subprocess.call(shlex.split("umount %s" % mount_point))
subprocess.call(shlex.split("umount %s" % host_root_dir))
if hard_cleanup:
shutil.rmtree(host_dir)
# clean mininet
subprocess.call(shlex.split("mn -c"))
if __name__ == '__main__':
setLogLevel('info')
parser = argparse.ArgumentParser(description='Minilab arguments.')
parser.add_argument('--config', dest='config', type=str,
default='config.yaml',
help='minilab config file (default: config.yaml)')
parser.add_argument('topology', metavar='topology', type=str,
help='topology configuration file')
parser.add_argument('--cleanup', dest='cleanup', action='store_true',
help='cleanup minilab setup')
parser.add_argument('--reset', dest='hard_cleanup', action='store_true',
help='cleanup and destroy all hosts directories')
args = parser.parse_args()
minilab_config = load_config(args.config)
topo_config = load_config(args.topology)
cleanup = args.cleanup
if args.cleanup:
if args.hard_cleanup:
info('** Cleaning minilab from scratch\n')
cleanup_all(minilab_config, topo_config, hard_cleanup=True)
else:
info('** Cleaning minilab\n')
cleanup_all(minilab_config, topo_config)
info('** Cleaning done\n')
sys.exit(0)
setup_topo(minilab_config, topo_config)
| """ force protocols versions as mininet < 2.2.0 is not doing its job"""
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
cmd = "ovs-vsctl set Bridge %s protocols=%s" % (switch['name'],
protocols)
subprocess.call(shlex.split(cmd)) | identifier_body |
minilab.py | #!/usr/bin/python
"""
Minilab is a network lab simulator based on
mininet. Its goal is to provide an easy way
to setup and test any kind of complex network.
"""
import subprocess
import shutil
import shlex
import argparse
import yaml
import os
import sys
import glob
from mininet.net import Mininet
from mininet.node import Host, OVSSwitch, RemoteController
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from jinja2 import FileSystemLoader, Environment
from nat import *
class ManageableHost(Host):
def __init__(self, name, ip=None, inNamespace=True,
root_dir=None, ssh_template=None,
auth_keys=None, **kwargs):
self.name = name
self.ssh_template = ssh_template
self.auth_keys = auth_keys
self.root_dir = root_dir
self.ssh_pid_file = None
self.mounted_dirs = []
Host.__init__(self, name, inNamespace, **kwargs)
def list_processes(self):
process_list = []
my_ns_symlink = '/proc/%s/ns/net' % self.pid
for symlink in glob.glob('/proc/[1-9]*/ns/net'):
pid = None
try:
if os.path.samefile(my_ns_symlink, symlink):
pid = symlink.split('/')[2]
except:
pass
else:
if pid and int(pid) != self.pid:
process_list.append(pid)
return process_list
def stop_all_processes(self):
info('**** Stopping all remaining processes on %s\n' % self.name)
running_processes = self.list_processes()
for process in running_processes:
cmd = "kill -9 %s" % process
info('**** killing process id %s\n' % process)
subprocess.call(shlex.split(cmd))
def stop_processes(self):
self.stop_ssh_server()
self.stop_all_processes()
def create_ssh_config(self):
self.ssh_pid_file = os.path.join(self.root_dir, "var", "run",
"sshd.pid")
return self.ssh_template.render(pid_file=self.ssh_pid_file,
host_dir=self.root_dir)
def copy_auth_keys(self):
ssh_dir = os.path.join(self.root_dir, 'root/.ssh')
if not os.path.exists(ssh_dir):
os.mkdir(ssh_dir, 0700)
key_file = open(self.auth_keys)
destination = open(os.path.join(ssh_dir, 'authorized_keys'), 'wb')
destination.write(key_file.read())
destination.close()
key_file.close()
def start_ssh_server(self):
if self.auth_keys:
self.copy_auth_keys()
ssh_config = self.create_ssh_config()
host_config_path = os.path.join(self.root_dir,
'etc/ssh/sshd_config')
sshf = open(host_config_path, 'wb')
sshf.write(ssh_config)
sshf.close()
info('**** Starting ssh server on %s\n' % self.name)
start_ssh = '/usr/sbin/sshd -f %s' % host_config_path
self.cmd(shlex.split(start_ssh))
def stop_ssh_server(self):
info('**** Stopping ssh server on %s\n' % self.name)
kill_ssh = "/bin/kill $(cat %s)" % self.ssh_pid_file
self.cmd(shlex.split(kill_ssh))
def clean_all(self):
pass
def mount_root_fs(hostname, lab_dir, root_fs):
info('**** Mounting filesystem for %s\n' % hostname)
if not os.path.exists(lab_dir):
os.mkdir(lab_dir)
host_dir = os.path.join(lab_dir, hostname)
work_dir = os.path.join(host_dir, 'work')
upper_dir = os.path.join(host_dir, 'upper')
merged_dir = os.path.join(host_dir, 'merged')
if not os.path.exists(host_dir):
os.mkdir(host_dir)
os.mkdir(work_dir)
os.mkdir(upper_dir)
os.mkdir(merged_dir)
cmd = "mount -t overlay overlay -o lowerdir=%s,upperdir=%s,workdir=%s %s" % \
(root_fs, upper_dir, work_dir, merged_dir)
mount_root = shlex.split(cmd)
subprocess.call(mount_root)
host_proc = os.path.join(merged_dir, 'proc')
cmd_p = "mount -t proc proc %s" % host_proc
mount_proc = shlex.split(cmd_p)
subprocess.call(mount_proc)
host_sys = os.path.join(merged_dir, 'sys')
cmd_s = "mount -t sysfs sysfs %s" % host_sys
mount_sys = shlex.split(cmd_s)
subprocess.call(mount_sys)
return merged_dir
def umount_root_fs(hostname, lab_dir):
info('**** Unmounting filesystem for %s\n' % hostname)
host_dir = os.path.join(lab_dir, hostname)
merged_dir = os.path.join(host_dir, 'merged')
host_proc = os.path.join(merged_dir, 'proc')
host_sys = os.path.join(merged_dir, 'sys')
for mount_point in [host_sys, host_proc, merged_dir]:
subprocess.call(shlex.split("umount %s" % mount_point))
# fixme: currently need to umount /sys
# subprocess.call(shlex.split("umount %s" % '/sys'))
def load_config(config_file):
cfg = open(config_file)
config = yaml.load(cfg)
cfg.close()
return config
def setup_controllers(net, topology):
for controller in topology['controllers']:
ctrl = RemoteController(controller['name'],
ip=controller['ip'],
port=controller['port'])
info('*** Adding controller\n')
net.addController(ctrl)
def setup_hosts(net, switches, config, topology):
info('*** Adding hosts\n')
hosts = {}
ssh_template = None
auth_keys = None
if 'ssh' in config:
template = config['ssh']['template']
tmpl_dir = config['ssh']['tmpl_dir']
env = Environment(loader=FileSystemLoader(tmpl_dir))
ssh_template = env.get_template(template)
if 'authorized_keys' in config['ssh']:
auth_keys = config['ssh']['authorized_keys']
for host in topology['hosts']:
if host['is_manageable']:
root_dir = mount_root_fs(host['name'], config['ml_dir'],
config['rootfs'])
new_host = net.addHost(host['name'], ip=None,
cls=ManageableHost,
root_dir=root_dir,
ssh_template=ssh_template,
auth_keys=auth_keys)
else:
new_host = net.addHost(host['name'])
for link in host['links']:
switch = switches[link['sw']]
lnk = net.addLink(new_host, switch)
if 'ip' in link:
ip, netmask = link['ip'].split('/')
new_host.setIP(ip, prefixLen=netmask, intf=lnk.intf1)
if 'gw' in host:
new_host.sendCmd('ip route add default via %s' % host['gw'])
new_host.waiting = False
if not host['name'] in hosts:
hosts[host['name']] = {'node': new_host, 'rootfs': new_host.name}
return hosts
def | (net, topology):
switches = {}
info('*** Adding switches\n')
# first loop : create switches
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
switches[switch['name']] = net.addSwitch(switch['name'],
dpid=switch['dpid'],
cls=OVSSwitch,
protocols=protocols)
# second loop: add links between switches
for switch in topology['switches']:
if 'links' in switch:
for peer in switch['links']:
net.addLink(switches[switch['name']],
switches[peer])
return switches
def setup_nat(net, topology):
node = None
if 'nat' in topology:
info('*** Setup nat gateway node\n')
node = connectToInternet(net,
switch=topology['nat']['switch']['name'],
node_name=topology['nat']['node']['name'],
ip_address=topology['nat']['node']['ip'])
info('** Starting nat\n')
startNAT(node,
inetIntf=topology['nat']['ext_iface'],
intIP=topology['nat']['node']['ip'])
return node
def fix_switch_protocols(topology):
""" force protocols versions as mininet < 2.2.0 is not doing its job"""
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
cmd = "ovs-vsctl set Bridge %s protocols=%s" % (switch['name'],
protocols)
subprocess.call(shlex.split(cmd))
def set_oob_switch_standalone(topology):
if 'nat' in topology:
switch = topology['nat']['switch']['name']
cmd = shlex.split("ovs-vsctl set-fail-mode %s standalone " % switch)
subprocess.call(cmd)
cmd2 = shlex.split("ovs-vsctl del-controller %s" % switch)
subprocess.call(cmd2)
def tear_down_nat(node):
info('** Stopping nat\n')
stopNAT(node)
def start(net, topology):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.start_ssh_server()
info('** Starting network\n')
net.start()
fix_switch_protocols(topology)
set_oob_switch_standalone(topology)
CLI(net)
def stop(net, config):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.stop_processes()
umount_root_fs(name, config['ml_dir'])
info('** Stopping network\n')
net.stop()
def setup_topo(config, topology):
nat_node = None
try:
net = Mininet(controller=RemoteController)
setup_controllers(net, topology)
switches = setup_switches(net, topology)
setup_hosts(net, switches, config, topology)
nat_node = setup_nat(net, topology)
start(net, topology)
except Exception, e:
info('** Error spawning topology\n')
print e
cleanup_all(config, topology)
sys.exit(1)
if nat_node:
tear_down_nat(nat_node)
stop(net, config)
def cleanup_all(config, topology, hard_cleanup=False):
for host in topology['hosts']:
if host['is_manageable']:
host_dir = os.path.join(config['ml_dir'], host['name'])
host_root_dir = os.path.join(host_dir, 'merged')
for directory in ['sys', 'proc']:
mount_point = os.path.join(host_root_dir, directory)
subprocess.call(shlex.split("umount %s" % mount_point))
subprocess.call(shlex.split("umount %s" % host_root_dir))
if hard_cleanup:
shutil.rmtree(host_dir)
# clean mininet
subprocess.call(shlex.split("mn -c"))
if __name__ == '__main__':
setLogLevel('info')
parser = argparse.ArgumentParser(description='Minilab arguments.')
parser.add_argument('--config', dest='config', type=str,
default='config.yaml',
help='minilab config file (default: config.yaml)')
parser.add_argument('topology', metavar='topology', type=str,
help='topology configuration file')
parser.add_argument('--cleanup', dest='cleanup', action='store_true',
help='cleanup minilab setup')
parser.add_argument('--reset', dest='hard_cleanup', action='store_true',
help='cleanup and destroy all hosts directories')
args = parser.parse_args()
minilab_config = load_config(args.config)
topo_config = load_config(args.topology)
cleanup = args.cleanup
if args.cleanup:
if args.hard_cleanup:
info('** Cleaning minilab from scratch\n')
cleanup_all(minilab_config, topo_config, hard_cleanup=True)
else:
info('** Cleaning minilab\n')
cleanup_all(minilab_config, topo_config)
info('** Cleaning done\n')
sys.exit(0)
setup_topo(minilab_config, topo_config)
| setup_switches | identifier_name |
minilab.py | #!/usr/bin/python
"""
Minilab is a network lab simulator based on
mininet. Its goal is to provide an easy way
to setup and test any kind of complex network.
"""
import subprocess
import shutil
import shlex
import argparse
import yaml
import os
import sys
import glob
from mininet.net import Mininet
from mininet.node import Host, OVSSwitch, RemoteController
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from jinja2 import FileSystemLoader, Environment
from nat import *
class ManageableHost(Host):
def __init__(self, name, ip=None, inNamespace=True,
root_dir=None, ssh_template=None,
auth_keys=None, **kwargs):
self.name = name
self.ssh_template = ssh_template
self.auth_keys = auth_keys
self.root_dir = root_dir
self.ssh_pid_file = None
self.mounted_dirs = []
Host.__init__(self, name, inNamespace, **kwargs)
def list_processes(self):
process_list = []
my_ns_symlink = '/proc/%s/ns/net' % self.pid
for symlink in glob.glob('/proc/[1-9]*/ns/net'):
pid = None
try:
if os.path.samefile(my_ns_symlink, symlink):
pid = symlink.split('/')[2]
except:
pass
else:
if pid and int(pid) != self.pid:
process_list.append(pid)
return process_list
def stop_all_processes(self):
info('**** Stopping all remaining processes on %s\n' % self.name)
running_processes = self.list_processes()
for process in running_processes:
cmd = "kill -9 %s" % process
info('**** killing process id %s\n' % process)
subprocess.call(shlex.split(cmd))
def stop_processes(self):
self.stop_ssh_server()
self.stop_all_processes()
def create_ssh_config(self):
self.ssh_pid_file = os.path.join(self.root_dir, "var", "run",
"sshd.pid")
return self.ssh_template.render(pid_file=self.ssh_pid_file,
host_dir=self.root_dir)
def copy_auth_keys(self):
ssh_dir = os.path.join(self.root_dir, 'root/.ssh')
if not os.path.exists(ssh_dir):
os.mkdir(ssh_dir, 0700)
key_file = open(self.auth_keys)
destination = open(os.path.join(ssh_dir, 'authorized_keys'), 'wb')
destination.write(key_file.read())
destination.close()
key_file.close()
def start_ssh_server(self):
if self.auth_keys:
|
ssh_config = self.create_ssh_config()
host_config_path = os.path.join(self.root_dir,
'etc/ssh/sshd_config')
sshf = open(host_config_path, 'wb')
sshf.write(ssh_config)
sshf.close()
info('**** Starting ssh server on %s\n' % self.name)
start_ssh = '/usr/sbin/sshd -f %s' % host_config_path
self.cmd(shlex.split(start_ssh))
def stop_ssh_server(self):
info('**** Stopping ssh server on %s\n' % self.name)
kill_ssh = "/bin/kill $(cat %s)" % self.ssh_pid_file
self.cmd(shlex.split(kill_ssh))
def clean_all(self):
pass
def mount_root_fs(hostname, lab_dir, root_fs):
info('**** Mounting filesystem for %s\n' % hostname)
if not os.path.exists(lab_dir):
os.mkdir(lab_dir)
host_dir = os.path.join(lab_dir, hostname)
work_dir = os.path.join(host_dir, 'work')
upper_dir = os.path.join(host_dir, 'upper')
merged_dir = os.path.join(host_dir, 'merged')
if not os.path.exists(host_dir):
os.mkdir(host_dir)
os.mkdir(work_dir)
os.mkdir(upper_dir)
os.mkdir(merged_dir)
cmd = "mount -t overlay overlay -o lowerdir=%s,upperdir=%s,workdir=%s %s" % \
(root_fs, upper_dir, work_dir, merged_dir)
mount_root = shlex.split(cmd)
subprocess.call(mount_root)
host_proc = os.path.join(merged_dir, 'proc')
cmd_p = "mount -t proc proc %s" % host_proc
mount_proc = shlex.split(cmd_p)
subprocess.call(mount_proc)
host_sys = os.path.join(merged_dir, 'sys')
cmd_s = "mount -t sysfs sysfs %s" % host_sys
mount_sys = shlex.split(cmd_s)
subprocess.call(mount_sys)
return merged_dir
def umount_root_fs(hostname, lab_dir):
info('**** Unmounting filesystem for %s\n' % hostname)
host_dir = os.path.join(lab_dir, hostname)
merged_dir = os.path.join(host_dir, 'merged')
host_proc = os.path.join(merged_dir, 'proc')
host_sys = os.path.join(merged_dir, 'sys')
for mount_point in [host_sys, host_proc, merged_dir]:
subprocess.call(shlex.split("umount %s" % mount_point))
# fixme: currently need to umount /sys
# subprocess.call(shlex.split("umount %s" % '/sys'))
def load_config(config_file):
cfg = open(config_file)
config = yaml.load(cfg)
cfg.close()
return config
def setup_controllers(net, topology):
for controller in topology['controllers']:
ctrl = RemoteController(controller['name'],
ip=controller['ip'],
port=controller['port'])
info('*** Adding controller\n')
net.addController(ctrl)
def setup_hosts(net, switches, config, topology):
info('*** Adding hosts\n')
hosts = {}
ssh_template = None
auth_keys = None
if 'ssh' in config:
template = config['ssh']['template']
tmpl_dir = config['ssh']['tmpl_dir']
env = Environment(loader=FileSystemLoader(tmpl_dir))
ssh_template = env.get_template(template)
if 'authorized_keys' in config['ssh']:
auth_keys = config['ssh']['authorized_keys']
for host in topology['hosts']:
if host['is_manageable']:
root_dir = mount_root_fs(host['name'], config['ml_dir'],
config['rootfs'])
new_host = net.addHost(host['name'], ip=None,
cls=ManageableHost,
root_dir=root_dir,
ssh_template=ssh_template,
auth_keys=auth_keys)
else:
new_host = net.addHost(host['name'])
for link in host['links']:
switch = switches[link['sw']]
lnk = net.addLink(new_host, switch)
if 'ip' in link:
ip, netmask = link['ip'].split('/')
new_host.setIP(ip, prefixLen=netmask, intf=lnk.intf1)
if 'gw' in host:
new_host.sendCmd('ip route add default via %s' % host['gw'])
new_host.waiting = False
if not host['name'] in hosts:
hosts[host['name']] = {'node': new_host, 'rootfs': new_host.name}
return hosts
def setup_switches(net, topology):
switches = {}
info('*** Adding switches\n')
# first loop : create switches
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
switches[switch['name']] = net.addSwitch(switch['name'],
dpid=switch['dpid'],
cls=OVSSwitch,
protocols=protocols)
# second loop: add links between switches
for switch in topology['switches']:
if 'links' in switch:
for peer in switch['links']:
net.addLink(switches[switch['name']],
switches[peer])
return switches
def setup_nat(net, topology):
node = None
if 'nat' in topology:
info('*** Setup nat gateway node\n')
node = connectToInternet(net,
switch=topology['nat']['switch']['name'],
node_name=topology['nat']['node']['name'],
ip_address=topology['nat']['node']['ip'])
info('** Starting nat\n')
startNAT(node,
inetIntf=topology['nat']['ext_iface'],
intIP=topology['nat']['node']['ip'])
return node
def fix_switch_protocols(topology):
""" force protocols versions as mininet < 2.2.0 is not doing its job"""
for switch in topology['switches']:
if 'protocols' in switch:
protocols = ','.join(switch['protocols'])
else:
protocols = 'OpenFlow10'
cmd = "ovs-vsctl set Bridge %s protocols=%s" % (switch['name'],
protocols)
subprocess.call(shlex.split(cmd))
def set_oob_switch_standalone(topology):
if 'nat' in topology:
switch = topology['nat']['switch']['name']
cmd = shlex.split("ovs-vsctl set-fail-mode %s standalone " % switch)
subprocess.call(cmd)
cmd2 = shlex.split("ovs-vsctl del-controller %s" % switch)
subprocess.call(cmd2)
def tear_down_nat(node):
info('** Stopping nat\n')
stopNAT(node)
def start(net, topology):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.start_ssh_server()
info('** Starting network\n')
net.start()
fix_switch_protocols(topology)
set_oob_switch_standalone(topology)
CLI(net)
def stop(net, config):
for name, node in net.items():
if isinstance(node, ManageableHost):
node.stop_processes()
umount_root_fs(name, config['ml_dir'])
info('** Stopping network\n')
net.stop()
def setup_topo(config, topology):
nat_node = None
try:
net = Mininet(controller=RemoteController)
setup_controllers(net, topology)
switches = setup_switches(net, topology)
setup_hosts(net, switches, config, topology)
nat_node = setup_nat(net, topology)
start(net, topology)
except Exception, e:
info('** Error spawning topology\n')
print e
cleanup_all(config, topology)
sys.exit(1)
if nat_node:
tear_down_nat(nat_node)
stop(net, config)
def cleanup_all(config, topology, hard_cleanup=False):
for host in topology['hosts']:
if host['is_manageable']:
host_dir = os.path.join(config['ml_dir'], host['name'])
host_root_dir = os.path.join(host_dir, 'merged')
for directory in ['sys', 'proc']:
mount_point = os.path.join(host_root_dir, directory)
subprocess.call(shlex.split("umount %s" % mount_point))
subprocess.call(shlex.split("umount %s" % host_root_dir))
if hard_cleanup:
shutil.rmtree(host_dir)
# clean mininet
subprocess.call(shlex.split("mn -c"))
if __name__ == '__main__':
setLogLevel('info')
parser = argparse.ArgumentParser(description='Minilab arguments.')
parser.add_argument('--config', dest='config', type=str,
default='config.yaml',
help='minilab config file (default: config.yaml)')
parser.add_argument('topology', metavar='topology', type=str,
help='topology configuration file')
parser.add_argument('--cleanup', dest='cleanup', action='store_true',
help='cleanup minilab setup')
parser.add_argument('--reset', dest='hard_cleanup', action='store_true',
help='cleanup and destroy all hosts directories')
args = parser.parse_args()
minilab_config = load_config(args.config)
topo_config = load_config(args.topology)
cleanup = args.cleanup
if args.cleanup:
if args.hard_cleanup:
info('** Cleaning minilab from scratch\n')
cleanup_all(minilab_config, topo_config, hard_cleanup=True)
else:
info('** Cleaning minilab\n')
cleanup_all(minilab_config, topo_config)
info('** Cleaning done\n')
sys.exit(0)
setup_topo(minilab_config, topo_config)
| self.copy_auth_keys() | conditional_block |
get_samples.py | #!/usr/bin/env
#coding:utf8
import re, json, random, sys, jieba
from utils import char_cut
re_cdata=re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I) # 匹配 CDATA
re_script=re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I) # Script
re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I) # tyle
re_br=re.compile('<br\s*?/?>') # 处理换行
re_h=re.compile('</?\w+[^>]*>') # ML标签
re_comment=re.compile('<!--[^>]*-->') # HTML注释
blank_line=re.compile('\n+') # 去掉多余的空行
def filterHtmlTag(htmlstr):
s = re_cdata.sub('', htmlstr) # 去掉 CDATA
s = re_script.sub('', s) # 去掉SCRIPT
s = re_style.sub('', s) # 去掉style
s = re_br.sub('\n', s) # 将br转换为换行
s = re_h.sub('', s) # 去掉HTML 标签
s = re_comment.sub('', s) # 去掉HTML注释
s = blank_line.sub('\n', s) #去掉多余的空行
return s
p = re.compile(r"<p>(.+?)</p>")
l = re.compile(r"[\"\d+\",\"(.+)\"]")
capacity_txt = [e.strip().split(',') for e in open('./data/capacity_point.csv', 'r', encoding='utf8').readlines()[1:]]
capacity_dict = {}
for e1 in capacity_txt:
for e in e1:
if e == '0': continue
capacity_dict[e] = 0
ca = set(capacity_dict.keys())
intents = set(json.loads(open('./data/intents.txt', 'r', encoding='utf8').readlines()[0]))
diff = intents ^ ca
def min_edit_distance(word1, word2):
m, n = len(word1), len(word2)
if m == 0: retu | 0: return m
dp = [[0] * (n + 1) for _ in range(m + 1)] # 初始化dp和边界
for i in range(1, m + 1): dp[i][0] = i
for j in range(1, n + 1): dp[0][j] = j
for i in range(1, m + 1): # 计算dp
for j in range(1, n + 1):
a=word1[i - 1];b=word2[j - 1]
if word1[i - 1] == word2[j - 1]:
d = 0
else:
d = 1
dp[i][j] = min(dp[i - 1][j - 1] + d, dp[i][j - 1] + 1, dp[i - 1][j] + 1)
return dp[m][n]
def words_sim(word1, word2):
w1 = char_cut(word1); w2 = set(char_cut(word2))
intersection = set(w1).intersection(w2)
union = set(w1).union(set(w2))
if len(intersection) == 0:
return None
dice_dist = 2 * len(intersection) / len(union)
#edit_distance = min_edit_distance(word1, word2)
return dice_dist #/ (edit_distance + 1e-8)
def get_sample(src_file, des_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5
txt = open(src_file, 'r', encoding='utf8').readlines()
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0: continue
if question in qesset: continue
sim_dict = {}
for e in labels:
max_sim = 0; sim_word = ''
for k, v in capacity_dict.items():
dist = words_sim(e[1], k)
if dist and dist > max_sim:
max_sim = dist
sim_word = k
if max_sim < threshold: continue
if sim_word not in sim_dict: sim_dict[sim_word] = 0
sim_dict[sim_word] += max_sim
sorted_sim_dict = sorted(sim_dict.items(), key=lambda d:d[1], reverse=True)
if sorted_sim_dict:
label = sorted_sim_dict[0][0]
else:
continue
if question not in qes2label:
qes2label[question] = []
qesset.add(question)
if label not in label2id:
label2id[label] = index
index += 1
if label not in label_cnt: label_cnt[label] = 0
label_cnt[label] += 1
qes2label[question].append(label)
if label not in label2qes:
label2qes[label] = []
label2qes[label].append(question)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for k, v in capacity_dict.items():
if k in label_cnt:
capacity_dict[k] = label_cnt[k]
label_num = 0; sample_num = 0
with open(des_file, 'w', encoding='utf8') as f:
for k, v in label2qes.items():
if k not in capacity_dict and len(v) < 1000: continue
f.write('## intent:' + k + '\n'); label_num += 1
v = list(set(v))
for ele in v:
#f.write('- ' + ' '.join(char_cut(ele)) + '\n')
f.write('- ' + ele + '\n')
sample_num += 1
f.write('\n')
print('label_num = %d, sample_num = %d' % (label_num, sample_num))
a=1
def get_fasttext_sample(src_file, des_file):
label_set = set(); qes_set = set()
txt = open(src_file, 'r', encoding='utf8').readlines()
with open(des_file, 'w', encoding='utf8') as f:
for e in txt:
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0 or question.strip() == '': continue
qes_set.add(question)
line_sample = []
for e in labels:
line_sample.append('__label__' + e[1]); label_set.add(e[1])
for e in char_cut(question):
line_sample.append(e)
f.write(' '.join(line_sample) + '\n')
print("label number: {}, question number: {}".format(len(label_set), len(qes_set)))
b=1
def get_ft_data(src_file, train_file, test_file, val_file):
res = []
item_regex = re.compile(r'\s*[-\*+]\s*(.+)')
txt = open(src_file, 'r', encoding='utf8').readlines()
for line in txt:
if '## intent:' in line:
label = line.strip().split(':')[-1]
else:
match = re.match(item_regex, line)
if match:
item = match.group(1)
#seg_item = ' '.join(list(jieba.cut(item)))
seg_item = item #' '.join(char_cut(item))
#res.append('__label__' + label + ' ' + seg_item + '\n')
res.append(label + '\t' + seg_item + '\n')
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
def get_sample_new(src_file, train_file, test_file, val_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5; res = []
txt = open(src_file, 'r', encoding='utf8').readlines()
re_patten = [('活动策划', re.compile(u"活动|策划")), ('视频识别', re.compile(u"视频识别")), ('项目管理', re.compile(u"项目|管理")),
('图像算法', re.compile(u"图像算法")), ('视频算法', re.compile(u"视频算法")), ('入职准备', re.compile(u"入职|入职准备")),
('视频流转码', re.compile(u"视频流|转码")), ('用户运营', re.compile(u"用户|运营")), ('数据挖掘', re.compile(u"数据挖掘|挖掘")),
('用户研究', re.compile(u"用户研究")), ('数据库索引', re.compile(u"数据库|索引")), ('社交', re.compile(u"社交")),
('音频编解码', re.compile(u"音频|编解码")), ('数据分析', re.compile(u"数据|分析")), ('流媒体封装', re.compile(u"流媒体|封装")),
('图像识别', re.compile(u"图像识别")), ('游戏', re.compile(u"游戏")), ('计算广告', re.compile(u"计算广告")),
('高并发', re.compile(u"高并发|并发")), ('面试辅导', re.compile(u"面试|辅导")), ('技术', re.compile(u"技术")),
('手机游戏', re.compile(u"手机|游戏")), ('需求评估', re.compile(u"需求评估")), ('全栈', re.compile(u"全栈")),
('游戏制作人', re.compile(u"游戏制作人|制作人")), ('创意创新', re.compile(u"创意|创新")), ('协调能力', re.compile(u"协调能力|协调")),
('数据运营', re.compile(u"数据运营")), ('排版美工', re.compile(u"排版|美工")), ('SQL调优', re.compile(u"SQL|调优")),
('数值策划', re.compile(u"数值|策划")), ('求职应聘', re.compile(u"求职|应聘")), ('广告算法', re.compile(u"广告算法")),
('选题策划', re.compile(u"选题|策划")), ('游戏运营', re.compile(u"游戏运营")), ('需求分析', re.compile(u"需求分析")),
('文案编辑', re.compile(u"文案|编辑")), ('运营', re.compile(u"运营")), ('推荐算法', re.compile(u"推荐算法|推荐")),
('宣传推广', re.compile(u"宣传|推广")), ('电子商务', re.compile(u"电子|商务")), ('沟通能力', re.compile(u"沟通能力|沟通")),
('物料制作', re.compile(u"物料|制作")), ('交互设计', re.compile(u"交互|设计")), ('APP', re.compile(u"APP")),
('爬虫', re.compile(u"爬虫")), ('渠道增长', re.compile(u"渠道增长")), ('资源谈判', re.compile(u"资源谈判|谈判")),
('数据采集', re.compile(u"数据采集")), ('产品', re.compile(u"产品")), ('机器学习', re.compile(u"机器学习|深度学习|人工智能")),
('视频编解码', re.compile(u"视频|编解码")), ('游戏策划', re.compile(u"游戏策划")),]
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
'''
for e in labels:
if e[1] not in label2qes: label2qes[e[1]] = set()
label2qes[e[1]].add(question)
if e[1] not in label_cnt: label_cnt[e[1]] = 0
label_cnt[e[1]] += 1
'''
for e1, e2 in re_patten:
if e2.search(question):
res.append(e1 + '\t' + question + '\n'); break
aa=e2.search(question)
a=1
'''
sorted_label2qes = sorted(label2qes.items(), key=lambda d:len(d[1]), reverse=True)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for e in sorted_label2qes:
if len(e[1]) < 1000: continue
for e1 in e[1]: res.append(e[0] + '\t' + e1 + '\n')
'''
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
a=1
if __name__ == '__main__':
#min_edit_distance('求职', '求职应聘')
#get_sample('./data/q1.res', './data/sen_class_corp666.md')
#get_fasttext_sample('./data/q1.res', './data/fasttext.train')
#get_ft_data('./data/sen_class_corp666.md', './data/sen_class.train', './data/sen_class.test', './data/sen_class.val')
patt = re.compile(r"活动|策划"); aa=patt.search("活动着")
get_sample_new('./data/q1.res', './data/sen_class.train', './data/sen_class.test', './data/sen_class.val') | rn n
if n == | identifier_name |
get_samples.py | #!/usr/bin/env
#coding:utf8
import re, json, random, sys, jieba
from utils import char_cut
re_cdata=re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I) # 匹配 CDATA
re_script=re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I) # Script
re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I) # tyle
re_br=re.compile('<br\s*?/?>') # 处理换行
re_h=re.compile('</?\w+[^>]*>') # ML标签
re_comment=re.compile('<!--[^>]*-->') # HTML注释
blank_line=re.compile('\n+') # 去掉多余的空行
def filterHtmlTag(htmlstr):
s = re_cdata.sub('', htmlstr) # 去掉 CDATA
s = re_script.sub('', s) # 去掉SCRIPT
s = re_style.sub('', s) # 去掉style
s = re_br.sub('\n', s) # 将br转换为换行
s = re_h.sub('', s) # 去掉HTML 标签
s = re_comment.sub('', s) # 去掉HTML注释
s = blank_line.sub('\n', s) #去掉多余的空行
return s
p = re.compile(r"<p>(.+?)</p>")
l = re.compile(r"[\"\d+\",\"(.+)\"]")
capacity_txt = [e.strip().split(',') for e in open('./data/capacity_point.csv', 'r', encoding='utf8').readlines()[1:]]
capacity_dict = {}
for e1 in capacity_txt:
for e in e1:
if e == '0': continue
capacity_dict[e] = 0
ca = set(capacity_dict.keys())
intents = set(json.loads(open('./data/intents.txt', 'r', encoding='utf8').readlines()[0]))
diff = intents ^ ca
def min_edit_distance(word1, word2):
m, n = len(word1), len(word2)
if m == 0: return n
if n == 0: return m
dp = [[0] * (n + 1) for _ in range(m + 1)] # 初始化dp和边界
for i in range(1, m + 1): dp[i][0] = i
for j in range(1, n + 1): dp[0][j] = j
for i in range(1, m + 1): # 计算dp
for j in range(1, n + 1):
a=word1[i - 1];b=word2[j - 1]
if word1[i - 1] == word2[j - 1]:
d = 0
else:
d = 1
dp[i][j] = min(dp[i - 1][j - 1] + d, dp[i][j - 1] + 1, dp[i - 1][j] + 1)
return dp[m][n]
def words_sim(word1, word2):
w1 = char_cut(word1); w2 = set(char_cut(word2))
intersection = set(w1).intersection(w2)
union = set(w1).union(set(w2))
if len(intersection) == 0:
return None
dice_dist = 2 * len(intersection) / len(union)
#edit_distance = min_edit_distance(word1, word2)
return dice_dist #/ (edit_distance + 1e-8)
def get_sample(src_file, des_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5
txt = open(src_file, 'r', encoding='utf8').readlines()
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0: continue
if question in qesset: continue
sim_dict = {}
for e in labels:
max_sim = 0; sim_word = ''
for k, v in capacity_dict.items():
dist = words_sim(e[1], k)
if dist and dist > max_sim:
max_sim = dist
sim_word = k
if max_sim < threshold: continue
if sim_word not in sim_dict: sim_dict[sim_word] = 0
sim_dict[sim_word] += max_sim
sorted_sim_dict = sorted(sim_dict.items(), key=lambda d:d[1], reverse=True)
if sorted_sim_dict:
label = sorted_sim_dict[0][0]
else:
continue
if question not in qes2label:
qes2label[question] = []
qesset.add(question)
if label not in label2id:
label2id[label] = index
index += 1
if label not in label_cnt: label_cnt[label] = 0
label_cnt[label] += 1
qes2label[question].append(label)
if label not in label2qes:
label2qes[label] = []
label2qes[label].append(question)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for k, v in capacity_dict.items():
if k in label_cnt:
capacity_dict[k] = label_cnt[k]
label_num = 0; sample_num = 0
with open(des_file, 'w', encoding='utf8') as f:
for k, v in label2qes.items():
if k not in capacity_dict and len(v) < 1000: continue
f.write('## intent:' + k + '\n'); label_num += 1
v = list(set(v))
for ele in v:
#f.write('- ' + ' '.join(char_cut(ele)) + '\n')
f.write('- ' + ele + '\n')
sample_num += 1
f.write('\n')
print('label_num = %d, sample_num = %d' % (label_num, sample_num))
a=1
def get_fasttext_sample(src_file, des_file):
label_set = set(); qes_set = set()
txt = open(src_file, 'r', encoding='utf8').readlines()
with | le(r'\s*[-\*+]\s*(.+)')
txt = open(src_file, 'r', encoding='utf8').readlines()
for line in txt:
if '## intent:' in line:
label = line.strip().split(':')[-1]
else:
match = re.match(item_regex, line)
if match:
item = match.group(1)
#seg_item = ' '.join(list(jieba.cut(item)))
seg_item = item #' '.join(char_cut(item))
#res.append('__label__' + label + ' ' + seg_item + '\n')
res.append(label + '\t' + seg_item + '\n')
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
def get_sample_new(src_file, train_file, test_file, val_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5; res = []
txt = open(src_file, 'r', encoding='utf8').readlines()
re_patten = [('活动策划', re.compile(u"活动|策划")), ('视频识别', re.compile(u"视频识别")), ('项目管理', re.compile(u"项目|管理")),
('图像算法', re.compile(u"图像算法")), ('视频算法', re.compile(u"视频算法")), ('入职准备', re.compile(u"入职|入职准备")),
('视频流转码', re.compile(u"视频流|转码")), ('用户运营', re.compile(u"用户|运营")), ('数据挖掘', re.compile(u"数据挖掘|挖掘")),
('用户研究', re.compile(u"用户研究")), ('数据库索引', re.compile(u"数据库|索引")), ('社交', re.compile(u"社交")),
('音频编解码', re.compile(u"音频|编解码")), ('数据分析', re.compile(u"数据|分析")), ('流媒体封装', re.compile(u"流媒体|封装")),
('图像识别', re.compile(u"图像识别")), ('游戏', re.compile(u"游戏")), ('计算广告', re.compile(u"计算广告")),
('高并发', re.compile(u"高并发|并发")), ('面试辅导', re.compile(u"面试|辅导")), ('技术', re.compile(u"技术")),
('手机游戏', re.compile(u"手机|游戏")), ('需求评估', re.compile(u"需求评估")), ('全栈', re.compile(u"全栈")),
('游戏制作人', re.compile(u"游戏制作人|制作人")), ('创意创新', re.compile(u"创意|创新")), ('协调能力', re.compile(u"协调能力|协调")),
('数据运营', re.compile(u"数据运营")), ('排版美工', re.compile(u"排版|美工")), ('SQL调优', re.compile(u"SQL|调优")),
('数值策划', re.compile(u"数值|策划")), ('求职应聘', re.compile(u"求职|应聘")), ('广告算法', re.compile(u"广告算法")),
('选题策划', re.compile(u"选题|策划")), ('游戏运营', re.compile(u"游戏运营")), ('需求分析', re.compile(u"需求分析")),
('文案编辑', re.compile(u"文案|编辑")), ('运营', re.compile(u"运营")), ('推荐算法', re.compile(u"推荐算法|推荐")),
('宣传推广', re.compile(u"宣传|推广")), ('电子商务', re.compile(u"电子|商务")), ('沟通能力', re.compile(u"沟通能力|沟通")),
('物料制作', re.compile(u"物料|制作")), ('交互设计', re.compile(u"交互|设计")), ('APP', re.compile(u"APP")),
('爬虫', re.compile(u"爬虫")), ('渠道增长', re.compile(u"渠道增长")), ('资源谈判', re.compile(u"资源谈判|谈判")),
('数据采集', re.compile(u"数据采集")), ('产品', re.compile(u"产品")), ('机器学习', re.compile(u"机器学习|深度学习|人工智能")),
('视频编解码', re.compile(u"视频|编解码")), ('游戏策划', re.compile(u"游戏策划")),]
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
'''
for e in labels:
if e[1] not in label2qes: label2qes[e[1]] = set()
label2qes[e[1]].add(question)
if e[1] not in label_cnt: label_cnt[e[1]] = 0
label_cnt[e[1]] += 1
'''
for e1, e2 in re_patten:
if e2.search(question):
res.append(e1 + '\t' + question + '\n'); break
aa=e2.search(question)
a=1
'''
sorted_label2qes = sorted(label2qes.items(), key=lambda d:len(d[1]), reverse=True)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for e in sorted_label2qes:
if len(e[1]) < 1000: continue
for e1 in e[1]: res.append(e[0] + '\t' + e1 + '\n')
'''
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
a=1
if __name__ == '__main__':
#min_edit_distance('求职', '求职应聘')
#get_sample('./data/q1.res', './data/sen_class_corp666.md')
#get_fasttext_sample('./data/q1.res', './data/fasttext.train')
#get_ft_data('./data/sen_class_corp666.md', './data/sen_class.train', './data/sen_class.test', './data/sen_class.val')
patt = re.compile(r"活动|策划"); aa=patt.search("活动着")
get_sample_new('./data/q1.res', './data/sen_class.train', './data/sen_class.test', './data/sen_class.val') | open(des_file, 'w', encoding='utf8') as f:
for e in txt:
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0 or question.strip() == '': continue
qes_set.add(question)
line_sample = []
for e in labels:
line_sample.append('__label__' + e[1]); label_set.add(e[1])
for e in char_cut(question):
line_sample.append(e)
f.write(' '.join(line_sample) + '\n')
print("label number: {}, question number: {}".format(len(label_set), len(qes_set)))
b=1
def get_ft_data(src_file, train_file, test_file, val_file):
res = []
item_regex = re.compi | identifier_body |
get_samples.py | #!/usr/bin/env
#coding:utf8
import re, json, random, sys, jieba
from utils import char_cut
re_cdata=re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I) # 匹配 CDATA
re_script=re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I) # Script
re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I) # tyle
re_br=re.compile('<br\s*?/?>') # 处理换行
re_h=re.compile('</?\w+[^>]*>') # ML标签
re_comment=re.compile('<!--[^>]*-->') # HTML注释
blank_line=re.compile('\n+') # 去掉多余的空行
def filterHtmlTag(htmlstr):
s = re_cdata.sub('', htmlstr) # 去掉 CDATA
s = re_script.sub('', s) # 去掉SCRIPT
s = re_style.sub('', s) # 去掉style
s = re_br.sub('\n', s) # 将br转换为换行
s = re_h.sub('', s) # 去掉HTML 标签
s = re_comment.sub('', s) # 去掉HTML注释
s = blank_line.sub('\n', s) #去掉多余的空行
return s
p = re.compile(r"<p>(.+?)</p>")
l = re.compile(r"[\"\d+\",\"(.+)\"]")
capacity_txt = [e.strip().split(',') for e in open('./data/capacity_point.csv', 'r', encoding='utf8').readlines()[1:]]
capacity_dict = {}
for e1 in capacity_txt:
for e in e1:
if e == '0': continue
capacity_dict[e] = 0
ca = set(capacity_dict.keys())
intents = set(json.loads(open('./data/intents.txt', 'r', encoding='utf8').readlines()[0]))
diff = intents ^ ca
def min_edit_distance(word1, word2):
m, n = len(word1), len(word2)
if m == 0: return n
if n == 0: return m
dp = [[0] * (n + 1) for _ in range(m + 1)] # 初始化dp和边界
for i in range(1, m + 1): dp[i][0] = i
for j in range(1, n + 1): dp[0][j] = j
for i in range(1, m + 1): # 计算dp
for j in range(1, n + 1):
a=word1[i - 1];b=word2[j - 1]
if word1[i - 1] == wor | ))
intersection = set(w1).intersection(w2)
union = set(w1).union(set(w2))
if len(intersection) == 0:
return None
dice_dist = 2 * len(intersection) / len(union)
#edit_distance = min_edit_distance(word1, word2)
return dice_dist #/ (edit_distance + 1e-8)
def get_sample(src_file, des_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5
txt = open(src_file, 'r', encoding='utf8').readlines()
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0: continue
if question in qesset: continue
sim_dict = {}
for e in labels:
max_sim = 0; sim_word = ''
for k, v in capacity_dict.items():
dist = words_sim(e[1], k)
if dist and dist > max_sim:
max_sim = dist
sim_word = k
if max_sim < threshold: continue
if sim_word not in sim_dict: sim_dict[sim_word] = 0
sim_dict[sim_word] += max_sim
sorted_sim_dict = sorted(sim_dict.items(), key=lambda d:d[1], reverse=True)
if sorted_sim_dict:
label = sorted_sim_dict[0][0]
else:
continue
if question not in qes2label:
qes2label[question] = []
qesset.add(question)
if label not in label2id:
label2id[label] = index
index += 1
if label not in label_cnt: label_cnt[label] = 0
label_cnt[label] += 1
qes2label[question].append(label)
if label not in label2qes:
label2qes[label] = []
label2qes[label].append(question)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for k, v in capacity_dict.items():
if k in label_cnt:
capacity_dict[k] = label_cnt[k]
label_num = 0; sample_num = 0
with open(des_file, 'w', encoding='utf8') as f:
for k, v in label2qes.items():
if k not in capacity_dict and len(v) < 1000: continue
f.write('## intent:' + k + '\n'); label_num += 1
v = list(set(v))
for ele in v:
#f.write('- ' + ' '.join(char_cut(ele)) + '\n')
f.write('- ' + ele + '\n')
sample_num += 1
f.write('\n')
print('label_num = %d, sample_num = %d' % (label_num, sample_num))
a=1
def get_fasttext_sample(src_file, des_file):
label_set = set(); qes_set = set()
txt = open(src_file, 'r', encoding='utf8').readlines()
with open(des_file, 'w', encoding='utf8') as f:
for e in txt:
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0 or question.strip() == '': continue
qes_set.add(question)
line_sample = []
for e in labels:
line_sample.append('__label__' + e[1]); label_set.add(e[1])
for e in char_cut(question):
line_sample.append(e)
f.write(' '.join(line_sample) + '\n')
print("label number: {}, question number: {}".format(len(label_set), len(qes_set)))
b=1
def get_ft_data(src_file, train_file, test_file, val_file):
res = []
item_regex = re.compile(r'\s*[-\*+]\s*(.+)')
txt = open(src_file, 'r', encoding='utf8').readlines()
for line in txt:
if '## intent:' in line:
label = line.strip().split(':')[-1]
else:
match = re.match(item_regex, line)
if match:
item = match.group(1)
#seg_item = ' '.join(list(jieba.cut(item)))
seg_item = item #' '.join(char_cut(item))
#res.append('__label__' + label + ' ' + seg_item + '\n')
res.append(label + '\t' + seg_item + '\n')
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
def get_sample_new(src_file, train_file, test_file, val_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5; res = []
txt = open(src_file, 'r', encoding='utf8').readlines()
re_patten = [('活动策划', re.compile(u"活动|策划")), ('视频识别', re.compile(u"视频识别")), ('项目管理', re.compile(u"项目|管理")),
('图像算法', re.compile(u"图像算法")), ('视频算法', re.compile(u"视频算法")), ('入职准备', re.compile(u"入职|入职准备")),
('视频流转码', re.compile(u"视频流|转码")), ('用户运营', re.compile(u"用户|运营")), ('数据挖掘', re.compile(u"数据挖掘|挖掘")),
('用户研究', re.compile(u"用户研究")), ('数据库索引', re.compile(u"数据库|索引")), ('社交', re.compile(u"社交")),
('音频编解码', re.compile(u"音频|编解码")), ('数据分析', re.compile(u"数据|分析")), ('流媒体封装', re.compile(u"流媒体|封装")),
('图像识别', re.compile(u"图像识别")), ('游戏', re.compile(u"游戏")), ('计算广告', re.compile(u"计算广告")),
('高并发', re.compile(u"高并发|并发")), ('面试辅导', re.compile(u"面试|辅导")), ('技术', re.compile(u"技术")),
('手机游戏', re.compile(u"手机|游戏")), ('需求评估', re.compile(u"需求评估")), ('全栈', re.compile(u"全栈")),
('游戏制作人', re.compile(u"游戏制作人|制作人")), ('创意创新', re.compile(u"创意|创新")), ('协调能力', re.compile(u"协调能力|协调")),
('数据运营', re.compile(u"数据运营")), ('排版美工', re.compile(u"排版|美工")), ('SQL调优', re.compile(u"SQL|调优")),
('数值策划', re.compile(u"数值|策划")), ('求职应聘', re.compile(u"求职|应聘")), ('广告算法', re.compile(u"广告算法")),
('选题策划', re.compile(u"选题|策划")), ('游戏运营', re.compile(u"游戏运营")), ('需求分析', re.compile(u"需求分析")),
('文案编辑', re.compile(u"文案|编辑")), ('运营', re.compile(u"运营")), ('推荐算法', re.compile(u"推荐算法|推荐")),
('宣传推广', re.compile(u"宣传|推广")), ('电子商务', re.compile(u"电子|商务")), ('沟通能力', re.compile(u"沟通能力|沟通")),
('物料制作', re.compile(u"物料|制作")), ('交互设计', re.compile(u"交互|设计")), ('APP', re.compile(u"APP")),
('爬虫', re.compile(u"爬虫")), ('渠道增长', re.compile(u"渠道增长")), ('资源谈判', re.compile(u"资源谈判|谈判")),
('数据采集', re.compile(u"数据采集")), ('产品', re.compile(u"产品")), ('机器学习', re.compile(u"机器学习|深度学习|人工智能")),
('视频编解码', re.compile(u"视频|编解码")), ('游戏策划', re.compile(u"游戏策划")),]
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
'''
for e in labels:
if e[1] not in label2qes: label2qes[e[1]] = set()
label2qes[e[1]].add(question)
if e[1] not in label_cnt: label_cnt[e[1]] = 0
label_cnt[e[1]] += 1
'''
for e1, e2 in re_patten:
if e2.search(question):
res.append(e1 + '\t' + question + '\n'); break
aa=e2.search(question)
a=1
'''
sorted_label2qes = sorted(label2qes.items(), key=lambda d:len(d[1]), reverse=True)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for e in sorted_label2qes:
if len(e[1]) < 1000: continue
for e1 in e[1]: res.append(e[0] + '\t' + e1 + '\n')
'''
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
a=1
if __name__ == '__main__':
#min_edit_distance('求职', '求职应聘')
#get_sample('./data/q1.res', './data/sen_class_corp666.md')
#get_fasttext_sample('./data/q1.res', './data/fasttext.train')
#get_ft_data('./data/sen_class_corp666.md', './data/sen_class.train', './data/sen_class.test', './data/sen_class.val')
patt = re.compile(r"活动|策划"); aa=patt.search("活动着")
get_sample_new('./data/q1.res', './data/sen_class.train', './data/sen_class.test', './data/sen_class.val') | d2[j - 1]:
d = 0
else:
d = 1
dp[i][j] = min(dp[i - 1][j - 1] + d, dp[i][j - 1] + 1, dp[i - 1][j] + 1)
return dp[m][n]
def words_sim(word1, word2):
w1 = char_cut(word1); w2 = set(char_cut(word2 | conditional_block |
get_samples.py | #!/usr/bin/env
#coding:utf8
import re, json, random, sys, jieba
from utils import char_cut
re_cdata=re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I) # 匹配 CDATA
re_script=re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I) # Script
re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I) # tyle
re_br=re.compile('<br\s*?/?>') # 处理换行
re_h=re.compile('</?\w+[^>]*>') # ML标签
re_comment=re.compile('<!--[^>]*-->') # HTML注释
blank_line=re.compile('\n+') # 去掉多余的空行
def filterHtmlTag(htmlstr):
s = re_cdata.sub('', htmlstr) # 去掉 CDATA
s = re_script.sub('', s) # 去掉SCRIPT
s = re_style.sub('', s) # 去掉style
s = re_br.sub('\n', s) # 将br转换为换行
s = re_h.sub('', s) # 去掉HTML 标签
s = re_comment.sub('', s) # 去掉HTML注释
s = blank_line.sub('\n', s) #去掉多余的空行
return s
p = re.compile(r"<p>(.+?)</p>")
l = re.compile(r"[\"\d+\",\"(.+)\"]")
capacity_txt = [e.strip().split(',') for e in open('./data/capacity_point.csv', 'r', encoding='utf8').readlines()[1:]]
capacity_dict = {}
for e1 in capacity_txt:
for e in e1:
if e == '0': continue
capacity_dict[e] = 0
ca = set(capacity_dict.keys())
intents = set(json.loads(open('./data/intents.txt', 'r', encoding='utf8').readlines()[0]))
diff = intents ^ ca
def min_edit_distance(word1, word2):
m, n = len(word1), len(word2)
if m == 0: return n
if n == 0: return m
dp = [[0] * (n + 1) for _ in range(m + 1)] # 初始化dp和边界
for i in range(1, m + 1): dp[i][0] = i
for j in range(1, n + 1): dp[0][j] = j
for i in range(1, m + 1): # 计算dp
for j in range(1, n + 1):
a=word1[i - 1];b=word2[j - 1]
if word1[i - 1] == word2[j - 1]:
d = 0
else:
d = 1
dp[i][j] = min(dp[i - 1][j - 1] + d, dp[i][j - 1] + 1, dp[i - 1][j] + 1)
return dp[m][n]
def words_sim(word1, word2):
w1 = char_cut(word1); w2 = set(char_cut(word2))
intersection = set(w1).intersection(w2)
union = set(w1).union(set(w2))
if len(intersection) == 0:
return None
dice_dist = 2 * len(intersection) / len(union)
#edit_distance = min_edit_distance(word1, word2)
return dice_dist #/ (edit_distance + 1e-8)
def get_sample(src_file, des_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5
txt = open(src_file, 'r', encoding='utf8').readlines()
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0: continue
if question in qesset: continue
sim_dict = {}
for e in labels:
max_sim = 0; sim_word = ''
for k, v in capacity_dict.items():
dist = words_sim(e[1], k)
if dist and dist > max_sim:
max_sim = dist
sim_word = k
if max_sim < threshold: continue
if sim_word not in sim_dict: sim_dict[sim_word] = 0
sim_dict[sim_word] += max_sim
sorted_sim_dict = sorted(sim_dict.items(), key=lambda d:d[1], reverse=True)
if sorted_sim_dict:
label = sorted_sim_dict[0][0]
else:
continue
if question not in qes2label:
qes2label[question] = []
qesset.add(question)
if label not in label2id:
label2id[label] = index
index += 1
if label not in label_cnt: label_cnt[label] = 0
label_cnt[label] += 1
qes2label[question].append(label)
if label not in label2qes:
label2qes[label] = []
label2qes[label].append(question)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for k, v in capacity_dict.items():
if k in label_cnt:
capacity_dict[k] = label_cnt[k]
label_num = 0; sample_num = 0
with open(des_file, 'w', encoding='utf8') as f:
for k, v in label2qes.items():
if k not in capacity_dict and len(v) < 1000: continue
f.write('## intent:' + k + '\n'); label_num += 1
v = list(set(v))
for ele in v:
#f.write('- ' + ' '.join(char_cut(ele)) + '\n')
f.write('- ' + ele + '\n')
sample_num += 1
f.write('\n')
print('label_num = %d, sample_num = %d' % (label_num, sample_num))
a=1
def get_fasttext_sample(src_file, des_file):
label_set = set(); qes_set = set()
txt = open(src_file, 'r', encoding='utf8').readlines()
with open(des_file, 'w', encoding='utf8') as f:
for e in txt:
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
if len(labels) == 0 or question.strip() == '': continue
qes_set.add(question)
line_sample = []
for e in labels:
line_sample.append('__label__' + e[1]); label_set.add(e[1])
for e in char_cut(question):
line_sample.append(e)
f.write(' '.join(line_sample) + '\n')
print("label number: {}, question number: {}".format(len(label_set), len(qes_set)))
b=1
def get_ft_data(src_file, train_file, test_file, val_file):
res = []
item_regex = re.compile(r'\s*[-\*+]\s*(.+)')
txt = open(src_file, 'r', encoding='utf8').readlines()
for line in txt:
if '## intent:' in line:
label = line.strip().split(':')[-1]
else:
match = re.match(item_regex, line)
if match:
item = match.group(1)
#seg_item = ' '.join(list(jieba.cut(item)))
seg_item = item #' '.join(char_cut(item))
#res.append('__label__' + label + ' ' + seg_item + '\n')
res.append(label + '\t' + seg_item + '\n')
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
def get_sample_new(src_file, train_file, test_file, val_file):
qes2label = {}; label2id = {}; index = 0; label2qes = {}; label_cnt = {}; qesset = set(); idcnt = 1; threshold = 0.5; res = []
txt = open(src_file, 'r', encoding='utf8').readlines()
re_patten = [('活动策划', re.compile(u"活动|策划")), ('视频识别', re.compile(u"视频识别")), ('项目管理', re.compile(u"项目|管理")),
('图像算法', re.compile(u"图像算法")), ('视频算法', re.compile(u"视频算法")), ('入职准备', re.compile(u"入职|入职准备")),
('视频流转码', re.compile(u"视频流|转码")), ('用户运营', re.compile(u"用户|运营")), ('数据挖掘', re.compile(u"数据挖掘|挖掘")),
('用户研究', re.compile(u"用户研究")), ('数据库索引', re.compile(u"数据库|索引")), ('社交', re.compile(u"社交")),
('音频编解码', re.compile(u"音频|编解码")), ('数据分析', re.compile(u"数据|分析")), ('流媒体封装', re.compile(u"流媒体|封装")),
('图像识别', re.compile(u"图像识别")), ('游戏', re.compile(u"游戏")), ('计算广告', re.compile(u"计算广告")),
('高并发', re.compile(u"高并发|并发")), ('面试辅导', re.compile(u"面试|辅导")), ('技术', re.compile(u"技术")),
('手机游戏', re.compile(u"手机|游戏")), ('需求评估', re.compile(u"需求评估")), ('全栈', re.compile(u"全栈")),
('游戏制作人', re.compile(u"游戏制作人|制作人")), ('创意创新', re.compile(u"创意|创新")), ('协调能力', re.compile(u"协调能力|协调")),
('数据运营', re.compile(u"数据运营")), ('排版美工', re.compile(u"排版|美工")), ('SQL调优', re.compile(u"SQL|调优")),
('数值策划', re.compile(u"数值|策划")), ('求职应聘', re.compile(u"求职|应聘")), ('广告算法', re.compile(u"广告算法")),
('选题策划', re.compile(u"选题|策划")), ('游戏运营', re.compile(u"游戏运营")), ('需求分析', re.compile(u"需求分析")),
('文案编辑', re.compile(u"文案|编辑")), ('运营', re.compile(u"运营")), ('推荐算法', re.compile(u"推荐算法|推荐")),
('宣传推广', re.compile(u"宣传|推广")), ('电子商务', re.compile(u"电子|商务")), ('沟通能力', re.compile(u"沟通能力|沟通")),
('物料制作', re.compile(u"物料|制作")), ('交互设计', re.compile(u"交互|设计")), ('APP', re.compile(u"APP")),
('爬虫', re.compile(u"爬虫")), ('渠道增长', re.compile(u"渠道增长")), ('资源谈判', re.compile(u"资源谈判|谈判")),
('数据采集', re.compile(u"数据采集")), ('产品', re.compile(u"产品")), ('机器学习', re.compile(u"机器学习|深度学习|人工智能")),
('视频编解码', re.compile(u"视频|编解码")), ('游戏策划', re.compile(u"游戏策划")),]
for e in txt:
sys.stdout.write('Handle progress: ' + str(idcnt) + ' / ' + str(len(txt)) + '\n'); sys.stdout.flush(); idcnt += 1
split_text = e.split('\t')
question = filterHtmlTag(split_text[2])
labels = json.loads((split_text[4]))
'''
for e in labels:
if e[1] not in label2qes: label2qes[e[1]] = set()
label2qes[e[1]].add(question)
if e[1] not in label_cnt: label_cnt[e[1]] = 0
label_cnt[e[1]] += 1
'''
for e1, e2 in re_patten:
if e2.search(question):
res.append(e1 + '\t' + question + '\n'); break
aa=e2.search(question)
a=1
'''
sorted_label2qes = sorted(label2qes.items(), key=lambda d:len(d[1]), reverse=True)
sorted_label_cnt = sorted(label_cnt.items(), key=lambda d:d[1], reverse=True)
for e in sorted_label2qes:
if len(e[1]) < 1000: continue
for e1 in e[1]: res.append(e[0] + '\t' + e1 + '\n')
'''
random.shuffle(res)
with open(train_file, 'w', encoding='utf8') as f1:
for e in res[:int(len(res) * 0.6)]: f1.write(e)
with open(test_file, 'w', encoding='utf8') as f2:
for e in res[int(len(res) * 0.6):int(len(res) * 0.8)]: f2.write(e)
with open(val_file, 'w', encoding='utf8') as f3:
for e in res[int(len(res) * 0.8):]: f3.write(e)
a=1
if __name__ == '__main__':
| #get_fasttext_sample('./data/q1.res', './data/fasttext.train')
#get_ft_data('./data/sen_class_corp666.md', './data/sen_class.train', './data/sen_class.test', './data/sen_class.val')
patt = re.compile(r"活动|策划"); aa=patt.search("活动着")
get_sample_new('./data/q1.res', './data/sen_class.train', './data/sen_class.test', './data/sen_class.val') | #min_edit_distance('求职', '求职应聘')
#get_sample('./data/q1.res', './data/sen_class_corp666.md')
| random_line_split |
foreign.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic implementation of owner API functions
use crate::api_impl::owner::check_ttl;
use crate::api_impl::owner_swap;
use crate::grin_core::core::amount_to_hr_string;
use crate::grin_keychain::Keychain;
use crate::grin_util::secp::key::SecretKey;
use crate::grin_util::Mutex;
use crate::internal::selection;
use crate::internal::{tx, updater};
use crate::proof::crypto::Hex;
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProofAddressType;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate_versions::SlateVersion;
use crate::Context;
use crate::{
BlockFees, CbData, Error, ErrorKind, NodeClient, Slate, SlatePurpose, TxLogEntryType,
VersionInfo, VersionedSlate, WalletBackend, WalletInst, WalletLCProvider,
};
use ed25519_dalek::PublicKey as DalekPublicKey;
use grin_wallet_util::OnionV3Address;
use std::sync::Arc;
use std::sync::RwLock;
use strum::IntoEnumIterator;
const FOREIGN_API_VERSION: u16 = 2;
const USER_MESSAGE_MAX_LEN: usize = 256;
lazy_static! {
/// Recieve account can be specified separately and must be allpy to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn check_version() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
})
}
/// Build a coinbase transaction
pub fn build_coinbase<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
|
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message received from TOR
pub fn receive_swap_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
owner_swap::swap_income_message(wallet_inst, keychain_mask, &message, None).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(())
}
/// Process swap marketplace message. Please note. Wallet does a minor role here,
/// The marketplace workflow and managed by QT wallet.
pub fn marketplace_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<String, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let response =
owner_swap::marketplace_message(wallet_inst, keychain_mask, &message).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(response)
}
/// Utility method to decrypt the slate pack for receive operation.
/// Returns: slate, content, sender PK, recipient Pk
pub fn decrypt_slate<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
encrypted_slate: VersionedSlate,
address_index: Option<u32>,
) -> Result<
(
Slate,
SlatePurpose,
Option<DalekPublicKey>,
Option<DalekPublicKey>,
),
Error,
>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let sec_key = proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)
.map_err(|e| {
ErrorKind::SlatepackDecodeError(format!("Unable to build key to decrypt, {}", e))
})?;
let sp = encrypted_slate.into_slatepack(&sec_key)?;
let sender = sp.get_sender();
let recipient = sp.get_recipient();
let content = sp.get_content();
let slate = sp.to_result_slate();
Ok((slate, content, sender, recipient))
}
/// Utility method to conver Slate into the Versioned Slate.
pub fn encrypt_slate<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
version: Option<SlateVersion>,
content: SlatePurpose,
slatepack_recipient: Option<DalekPublicKey>,
address_index: Option<u32>,
use_test_rng: bool,
) -> Result<VersionedSlate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let slatepack_format = slatepack_recipient.is_some() || version == Some(SlateVersion::SP);
if slatepack_format {
// Can be not encrypted slate binary if slatepack_recipient is_none
let (slatepack_secret, slatepack_pk) = {
let keychain = w.keychain(keychain_mask)?;
let slatepack_secret =
proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)?;
let slatepack_pk = DalekPublicKey::from(&slatepack_secret);
(slatepack_secret, slatepack_pk)
};
Ok(VersionedSlate::into_version(
slate.clone(),
version.unwrap_or(SlateVersion::SP),
content,
slatepack_pk,
slatepack_recipient,
&slatepack_secret,
use_test_rng,
)?)
} else {
// Plain slate format
let version = version.unwrap_or(slate.lowest_version());
Ok(
VersionedSlate::into_version_plain(slate.clone(), version).map_err(|e| {
ErrorKind::SlatepackEncodeError(format!("Unable to build a slate, {}", e))
})?,
)
}
}
| {
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
} | identifier_body |
foreign.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic implementation of owner API functions
use crate::api_impl::owner::check_ttl;
use crate::api_impl::owner_swap;
use crate::grin_core::core::amount_to_hr_string;
use crate::grin_keychain::Keychain;
use crate::grin_util::secp::key::SecretKey;
use crate::grin_util::Mutex;
use crate::internal::selection;
use crate::internal::{tx, updater};
use crate::proof::crypto::Hex;
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProofAddressType;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate_versions::SlateVersion;
use crate::Context;
use crate::{
BlockFees, CbData, Error, ErrorKind, NodeClient, Slate, SlatePurpose, TxLogEntryType,
VersionInfo, VersionedSlate, WalletBackend, WalletInst, WalletLCProvider,
};
use ed25519_dalek::PublicKey as DalekPublicKey;
use grin_wallet_util::OnionV3Address;
use std::sync::Arc;
use std::sync::RwLock;
use strum::IntoEnumIterator;
const FOREIGN_API_VERSION: u16 = 2;
const USER_MESSAGE_MAX_LEN: usize = 256;
lazy_static! {
/// Recieve account can be specified separately and must be allpy to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn check_version() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(), | pub fn build_coinbase<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message received from TOR
pub fn receive_swap_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
owner_swap::swap_income_message(wallet_inst, keychain_mask, &message, None).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(())
}
/// Process swap marketplace message. Please note. Wallet does a minor role here,
/// The marketplace workflow and managed by QT wallet.
pub fn marketplace_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<String, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let response =
owner_swap::marketplace_message(wallet_inst, keychain_mask, &message).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(response)
}
/// Utility method to decrypt the slate pack for receive operation.
/// Returns: slate, content, sender PK, recipient Pk
pub fn decrypt_slate<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
encrypted_slate: VersionedSlate,
address_index: Option<u32>,
) -> Result<
(
Slate,
SlatePurpose,
Option<DalekPublicKey>,
Option<DalekPublicKey>,
),
Error,
>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let sec_key = proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)
.map_err(|e| {
ErrorKind::SlatepackDecodeError(format!("Unable to build key to decrypt, {}", e))
})?;
let sp = encrypted_slate.into_slatepack(&sec_key)?;
let sender = sp.get_sender();
let recipient = sp.get_recipient();
let content = sp.get_content();
let slate = sp.to_result_slate();
Ok((slate, content, sender, recipient))
}
/// Utility method to conver Slate into the Versioned Slate.
pub fn encrypt_slate<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
version: Option<SlateVersion>,
content: SlatePurpose,
slatepack_recipient: Option<DalekPublicKey>,
address_index: Option<u32>,
use_test_rng: bool,
) -> Result<VersionedSlate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let slatepack_format = slatepack_recipient.is_some() || version == Some(SlateVersion::SP);
if slatepack_format {
// Can be not encrypted slate binary if slatepack_recipient is_none
let (slatepack_secret, slatepack_pk) = {
let keychain = w.keychain(keychain_mask)?;
let slatepack_secret =
proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)?;
let slatepack_pk = DalekPublicKey::from(&slatepack_secret);
(slatepack_secret, slatepack_pk)
};
Ok(VersionedSlate::into_version(
slate.clone(),
version.unwrap_or(SlateVersion::SP),
content,
slatepack_pk,
slatepack_recipient,
&slatepack_secret,
use_test_rng,
)?)
} else {
// Plain slate format
let version = version.unwrap_or(slate.lowest_version());
Ok(
VersionedSlate::into_version_plain(slate.clone(), version).map_err(|e| {
ErrorKind::SlatepackEncodeError(format!("Unable to build a slate, {}", e))
})?,
)
}
} | })
}
/// Build a coinbase transaction | random_line_split |
foreign.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic implementation of owner API functions
use crate::api_impl::owner::check_ttl;
use crate::api_impl::owner_swap;
use crate::grin_core::core::amount_to_hr_string;
use crate::grin_keychain::Keychain;
use crate::grin_util::secp::key::SecretKey;
use crate::grin_util::Mutex;
use crate::internal::selection;
use crate::internal::{tx, updater};
use crate::proof::crypto::Hex;
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProofAddressType;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate_versions::SlateVersion;
use crate::Context;
use crate::{
BlockFees, CbData, Error, ErrorKind, NodeClient, Slate, SlatePurpose, TxLogEntryType,
VersionInfo, VersionedSlate, WalletBackend, WalletInst, WalletLCProvider,
};
use ed25519_dalek::PublicKey as DalekPublicKey;
use grin_wallet_util::OnionV3Address;
use std::sync::Arc;
use std::sync::RwLock;
use strum::IntoEnumIterator;
const FOREIGN_API_VERSION: u16 = 2;
const USER_MESSAGE_MAX_LEN: usize = 256;
lazy_static! {
/// Recieve account can be specified separately and must be allpy to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn | () -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
})
}
/// Build a coinbase transaction
pub fn build_coinbase<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
}
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message received from TOR
pub fn receive_swap_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
owner_swap::swap_income_message(wallet_inst, keychain_mask, &message, None).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(())
}
/// Process swap marketplace message. Please note. Wallet does a minor role here,
/// The marketplace workflow and managed by QT wallet.
pub fn marketplace_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<String, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let response =
owner_swap::marketplace_message(wallet_inst, keychain_mask, &message).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(response)
}
/// Utility method to decrypt the slate pack for receive operation.
/// Returns: slate, content, sender PK, recipient Pk
pub fn decrypt_slate<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
encrypted_slate: VersionedSlate,
address_index: Option<u32>,
) -> Result<
(
Slate,
SlatePurpose,
Option<DalekPublicKey>,
Option<DalekPublicKey>,
),
Error,
>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let sec_key = proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)
.map_err(|e| {
ErrorKind::SlatepackDecodeError(format!("Unable to build key to decrypt, {}", e))
})?;
let sp = encrypted_slate.into_slatepack(&sec_key)?;
let sender = sp.get_sender();
let recipient = sp.get_recipient();
let content = sp.get_content();
let slate = sp.to_result_slate();
Ok((slate, content, sender, recipient))
}
/// Utility method to conver Slate into the Versioned Slate.
pub fn encrypt_slate<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
version: Option<SlateVersion>,
content: SlatePurpose,
slatepack_recipient: Option<DalekPublicKey>,
address_index: Option<u32>,
use_test_rng: bool,
) -> Result<VersionedSlate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let slatepack_format = slatepack_recipient.is_some() || version == Some(SlateVersion::SP);
if slatepack_format {
// Can be not encrypted slate binary if slatepack_recipient is_none
let (slatepack_secret, slatepack_pk) = {
let keychain = w.keychain(keychain_mask)?;
let slatepack_secret =
proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)?;
let slatepack_pk = DalekPublicKey::from(&slatepack_secret);
(slatepack_secret, slatepack_pk)
};
Ok(VersionedSlate::into_version(
slate.clone(),
version.unwrap_or(SlateVersion::SP),
content,
slatepack_pk,
slatepack_recipient,
&slatepack_secret,
use_test_rng,
)?)
} else {
// Plain slate format
let version = version.unwrap_or(slate.lowest_version());
Ok(
VersionedSlate::into_version_plain(slate.clone(), version).map_err(|e| {
ErrorKind::SlatepackEncodeError(format!("Unable to build a slate, {}", e))
})?,
)
}
}
| check_version | identifier_name |
foreign.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic implementation of owner API functions
use crate::api_impl::owner::check_ttl;
use crate::api_impl::owner_swap;
use crate::grin_core::core::amount_to_hr_string;
use crate::grin_keychain::Keychain;
use crate::grin_util::secp::key::SecretKey;
use crate::grin_util::Mutex;
use crate::internal::selection;
use crate::internal::{tx, updater};
use crate::proof::crypto::Hex;
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProofAddressType;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate_versions::SlateVersion;
use crate::Context;
use crate::{
BlockFees, CbData, Error, ErrorKind, NodeClient, Slate, SlatePurpose, TxLogEntryType,
VersionInfo, VersionedSlate, WalletBackend, WalletInst, WalletLCProvider,
};
use ed25519_dalek::PublicKey as DalekPublicKey;
use grin_wallet_util::OnionV3Address;
use std::sync::Arc;
use std::sync::RwLock;
use strum::IntoEnumIterator;
const FOREIGN_API_VERSION: u16 = 2;
const USER_MESSAGE_MAX_LEN: usize = 256;
lazy_static! {
/// Recieve account can be specified separately and must be allpy to ALL receive operations
static ref RECV_ACCOUNT: RwLock<Option<String>> = RwLock::new(None);
}
/// get current receive account name
pub fn get_receive_account() -> Option<String> {
RECV_ACCOUNT.read().unwrap().clone()
}
/// get tor proof address
pub fn get_proof_address<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
) -> Result<String, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let provable_address = proofaddress::payment_proof_address(&keychain, ProofAddressType::Onion)
.map_err(|e| {
ErrorKind::PaymentProofAddress(format!(
"Error occurred in getting payment proof address, {}",
e
))
})?;
Ok(provable_address.public_key)
}
///
pub fn set_receive_account(account: String) {
RECV_ACCOUNT.write().unwrap().replace(account.to_string());
}
/// Return the version info
pub fn check_version() -> Result<VersionInfo, Error> {
// Proof address will be the onion address (Dalec Paublic Key). It is exactly what we need
Ok(VersionInfo {
foreign_api_version: FOREIGN_API_VERSION,
supported_slate_versions: SlateVersion::iter().collect(),
})
}
/// Build a coinbase transaction
pub fn build_coinbase<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
block_fees: &BlockFees,
test_mode: bool,
) -> Result<CbData, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
updater::build_coinbase(&mut *w, keychain_mask, block_fees, test_mode)
}
/// verify slate messages
pub fn verify_slate_messages(slate: &Slate) -> Result<(), Error> {
slate.verify_messages()
}
/// Receive a tx as recipient
/// Note: key_id & output_amounts needed for secure claims, mwc713.
pub fn receive_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
address: Option<String>,
key_id_opt: Option<&str>,
output_amounts: Option<Vec<u64>>,
dest_acct_name: Option<&str>,
message: Option<String>,
use_test_rng: bool,
refresh_from_node: bool,
) -> Result<(Slate, Context), Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let display_from = address.clone().unwrap_or("http listener".to_string());
let slate_message = &slate.participant_data[0].message;
let address_for_logging = address.clone().unwrap_or("http".to_string());
// that means it's not mqs so need to print it
if slate_message.is_some() {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs. Message: [\"{}\"]",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false),
slate_message.clone().unwrap()
)
.to_string()
);
} else {
println!(
"{}",
format!(
"slate [{}] received from [{}] for [{}] MWCs.",
slate.id.to_string(),
display_from,
amount_to_hr_string(slate.amount, false)
)
.to_string()
);
}
debug!("foreign just received_tx just got slate = {:?}", slate);
let mut ret_slate = slate.clone();
check_ttl(w, &ret_slate, refresh_from_node)?;
let mut dest_acct_name = dest_acct_name.map(|s| s.to_string());
if dest_acct_name.is_none() {
dest_acct_name = get_receive_account();
}
let parent_key_id = match dest_acct_name {
Some(d) => {
let pm = w.get_acct_path(d.to_owned())?;
match pm {
Some(p) => p.path,
None => w.parent_key_id(),
}
}
None => w.parent_key_id(),
};
// Don't do this multiple times
let tx = updater::retrieve_txs(
&mut *w,
keychain_mask,
None,
Some(ret_slate.id),
Some(&parent_key_id),
use_test_rng,
None,
None,
)?;
for t in &tx {
if t.tx_type == TxLogEntryType::TxReceived {
return Err(ErrorKind::TransactionAlreadyReceived(ret_slate.id.to_string()).into());
}
if let Some(offset) = t.kernel_offset {
let offset_skey = slate.tx.offset.secret_key()?;
let keychain = w.keychain(keychain_mask)?;
let offset_commit = keychain.secp().commit(0, offset_skey)?;
if offset == offset_commit {
return Err(ErrorKind::TransactionWithSameOffsetAlreadyReceived(
offset_commit.to_hex(),
)
.into());
}
}
}
let message = match message {
Some(mut m) => {
m.truncate(USER_MESSAGE_MAX_LEN);
Some(m)
}
None => None,
};
let num_outputs = match &output_amounts {
Some(v) => v.len(),
None => 1,
};
let height = w.last_confirmed_height()?;
// Note: key_id & output_amounts needed for secure claims, mwc713.
let mut context = tx::add_output_to_slate(
&mut *w,
keychain_mask,
&mut ret_slate,
height,
Some(address_for_logging),
key_id_opt,
output_amounts,
&parent_key_id,
1,
message,
false,
use_test_rng,
num_outputs,
)?;
let keychain = w.keychain(keychain_mask)?;
if slate.compact_slate |
tx::update_message(&mut *w, keychain_mask, &ret_slate)?;
let excess = ret_slate.calc_excess(Some(&keychain))?;
if let Some(ref mut p) = ret_slate.payment_proof {
if p.sender_address
.public_key
.eq(&p.receiver_address.public_key)
{
debug!("file proof, replace the receiver address with its address");
let sec_key = proofaddress::payment_proof_address_secret(&keychain, None)?;
let onion_address = OnionV3Address::from_private(&sec_key.0)?;
let dalek_pubkey = onion_address.to_ov3_str();
p.receiver_address = ProvableAddress::from_str(&dalek_pubkey)?;
}
let sig = tx::create_payment_proof_signature(
ret_slate.amount,
&excess,
p.sender_address.clone(),
p.receiver_address.clone(),
proofaddress::payment_proof_address_secret(&keychain, None)?,
)?;
p.receiver_signature = Some(sig);
}
Ok((ret_slate, context))
}
/// Receive an tx that this wallet has issued
pub fn finalize_invoice_tx<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
refresh_from_node: bool,
use_test_rng: bool,
) -> Result<Slate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let mut sl = slate.clone();
check_ttl(w, &sl, refresh_from_node)?;
// Participant id 0 for mwc713 compatibility
let context = w.get_private_context(keychain_mask, sl.id.as_bytes(), 0)?;
if slate.compact_slate {
// Add our contribution to the offset
sl.adjust_offset(&w.keychain(keychain_mask)?, &context)?;
// Slate can be 'compact' - it is mean some of the data can be gone
let mut temp_ctx = context.clone();
temp_ctx.sec_key = context.initial_sec_key.clone();
temp_ctx.sec_nonce = context.initial_sec_nonce.clone();
selection::repopulate_tx(
&mut *w,
keychain_mask,
&mut sl,
&temp_ctx,
false,
use_test_rng,
)?;
}
// Participant id 0 for mwc713 compatibility
tx::complete_tx(&mut *w, keychain_mask, &mut sl, 0, &context)?;
tx::update_stored_tx(&mut *w, keychain_mask, &context, &sl, true)?;
tx::update_message(&mut *w, keychain_mask, &sl)?;
{
let mut batch = w.batch(keychain_mask)?;
// Participant id 0 for mwc713 compatibility
batch.delete_private_context(sl.id.as_bytes(), 0)?;
batch.commit()?;
}
Ok(sl)
}
/// Process the incoming swap message received from TOR
pub fn receive_swap_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<(), Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
owner_swap::swap_income_message(wallet_inst, keychain_mask, &message, None).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(())
}
/// Process swap marketplace message. Please note. Wallet does a minor role here,
/// The marketplace workflow and managed by QT wallet.
pub fn marketplace_message<'a, L, C, K>(
wallet_inst: Arc<Mutex<Box<dyn WalletInst<'a, L, C, K>>>>,
keychain_mask: Option<&SecretKey>,
message: &String,
) -> Result<String, Error>
where
L: WalletLCProvider<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let response =
owner_swap::marketplace_message(wallet_inst, keychain_mask, &message).map_err(|e| {
ErrorKind::SwapError(format!(
"Error occurred in receiving the swap message by TOR, {}",
e
))
})?;
Ok(response)
}
/// Utility method to decrypt the slate pack for receive operation.
/// Returns: slate, content, sender PK, recipient Pk
pub fn decrypt_slate<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
encrypted_slate: VersionedSlate,
address_index: Option<u32>,
) -> Result<
(
Slate,
SlatePurpose,
Option<DalekPublicKey>,
Option<DalekPublicKey>,
),
Error,
>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let keychain = w.keychain(keychain_mask)?;
let sec_key = proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)
.map_err(|e| {
ErrorKind::SlatepackDecodeError(format!("Unable to build key to decrypt, {}", e))
})?;
let sp = encrypted_slate.into_slatepack(&sec_key)?;
let sender = sp.get_sender();
let recipient = sp.get_recipient();
let content = sp.get_content();
let slate = sp.to_result_slate();
Ok((slate, content, sender, recipient))
}
/// Utility method to conver Slate into the Versioned Slate.
pub fn encrypt_slate<'a, T: ?Sized, C, K>(
w: &mut T,
keychain_mask: Option<&SecretKey>,
slate: &Slate,
version: Option<SlateVersion>,
content: SlatePurpose,
slatepack_recipient: Option<DalekPublicKey>,
address_index: Option<u32>,
use_test_rng: bool,
) -> Result<VersionedSlate, Error>
where
T: WalletBackend<'a, C, K>,
C: NodeClient + 'a,
K: Keychain + 'a,
{
let slatepack_format = slatepack_recipient.is_some() || version == Some(SlateVersion::SP);
if slatepack_format {
// Can be not encrypted slate binary if slatepack_recipient is_none
let (slatepack_secret, slatepack_pk) = {
let keychain = w.keychain(keychain_mask)?;
let slatepack_secret =
proofaddress::payment_proof_address_dalek_secret(&keychain, address_index)?;
let slatepack_pk = DalekPublicKey::from(&slatepack_secret);
(slatepack_secret, slatepack_pk)
};
Ok(VersionedSlate::into_version(
slate.clone(),
version.unwrap_or(SlateVersion::SP),
content,
slatepack_pk,
slatepack_recipient,
&slatepack_secret,
use_test_rng,
)?)
} else {
// Plain slate format
let version = version.unwrap_or(slate.lowest_version());
Ok(
VersionedSlate::into_version_plain(slate.clone(), version).map_err(|e| {
ErrorKind::SlatepackEncodeError(format!("Unable to build a slate, {}", e))
})?,
)
}
}
| {
// Add our contribution to the offset
ret_slate.adjust_offset(&keychain, &mut context)?;
} | conditional_block |
inventory.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: inventory.proto
package cisco_ios_xr_installmgr_admin_oper_install_software_inventory_active_inventories_inventory
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Inventory_KEYS struct {
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory_KEYS) Reset() { *m = Inventory_KEYS{} }
func (m *Inventory_KEYS) String() string { return proto.CompactTextString(m) }
func (*Inventory_KEYS) ProtoMessage() {}
func (*Inventory_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{0}
}
func (m *Inventory_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory_KEYS.Unmarshal(m, b)
}
func (m *Inventory_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory_KEYS.Marshal(b, m, deterministic)
}
func (m *Inventory_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory_KEYS.Merge(m, src)
}
func (m *Inventory_KEYS) XXX_Size() int {
return xxx_messageInfo_Inventory_KEYS.Size(m)
}
func (m *Inventory_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory_KEYS proto.InternalMessageInfo
func (m *Inventory_KEYS) GetNodeName() string {
if m != nil {
return m.NodeName
}
return ""
}
type PkgGroup struct {
DeviceName string `protobuf:"bytes,1,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgGroup) Reset() { *m = PkgGroup{} }
func (m *PkgGroup) String() string { return proto.CompactTextString(m) }
func (*PkgGroup) ProtoMessage() {}
func (*PkgGroup) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{1}
}
func (m *PkgGroup) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgGroup.Unmarshal(m, b)
}
func (m *PkgGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgGroup.Marshal(b, m, deterministic)
}
func (m *PkgGroup) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgGroup.Merge(m, src)
}
func (m *PkgGroup) XXX_Size() int {
return xxx_messageInfo_PkgGroup.Size(m)
}
func (m *PkgGroup) XXX_DiscardUnknown() {
xxx_messageInfo_PkgGroup.DiscardUnknown(m)
}
var xxx_messageInfo_PkgGroup proto.InternalMessageInfo
func (m *PkgGroup) GetDeviceName() string {
if m != nil {
return m.DeviceName
}
return ""
}
func (m *PkgGroup) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type PkgInfo struct {
Package *PkgGroup `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
BuildInformation string `protobuf:"bytes,3,opt,name=build_information,json=buildInformation,proto3" json:"build_information,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgInfo) Reset() { *m = PkgInfo{} }
func (m *PkgInfo) String() string { return proto.CompactTextString(m) }
func (*PkgInfo) ProtoMessage() {}
func (*PkgInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{2}
}
func (m *PkgInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgInfo.Unmarshal(m, b)
}
func (m *PkgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgInfo.Marshal(b, m, deterministic)
}
func (m *PkgInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgInfo.Merge(m, src)
}
func (m *PkgInfo) XXX_Size() int {
return xxx_messageInfo_PkgInfo.Size(m)
}
func (m *PkgInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PkgInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PkgInfo proto.InternalMessageInfo
func (m *PkgInfo) GetPackage() *PkgGroup {
if m != nil {
return m.Package
}
return nil
}
func (m *PkgInfo) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *PkgInfo) GetBuildInformation() string {
if m != nil {
return m.BuildInformation
}
return ""
}
type Inventory struct {
Major uint32 `protobuf:"varint,50,opt,name=major,proto3" json:"major,omitempty"`
Minor uint32 `protobuf:"varint,51,opt,name=minor,proto3" json:"minor,omitempty"`
BootImageName string `protobuf:"bytes,52,opt,name=boot_image_name,json=bootImageName,proto3" json:"boot_image_name,omitempty"`
LoadPath []*PkgInfo `protobuf:"bytes,53,rep,name=load_path,json=loadPath,proto3" json:"load_path,omitempty"`
NodeType uint64 `protobuf:"varint,54,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
SecureDomainRouterName string `protobuf:"bytes,55,opt,name=secure_domain_router_name,json=secureDomainRouterName,proto3" json:"secure_domain_router_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory) Reset() { *m = Inventory{} }
func (m *Inventory) String() string { return proto.CompactTextString(m) }
func (*Inventory) ProtoMessage() {}
func (*Inventory) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{3}
}
func (m *Inventory) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory.Unmarshal(m, b)
}
func (m *Inventory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory.Marshal(b, m, deterministic)
}
func (m *Inventory) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory.Merge(m, src)
}
func (m *Inventory) XXX_Size() int {
return xxx_messageInfo_Inventory.Size(m)
}
func (m *Inventory) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory proto.InternalMessageInfo
func (m *Inventory) GetMajor() uint32 {
if m != nil {
return m.Major
}
return 0
}
func (m *Inventory) GetMinor() uint32 |
func (m *Inventory) GetBootImageName() string {
if m != nil {
return m.BootImageName
}
return ""
}
func (m *Inventory) GetLoadPath() []*PkgInfo {
if m != nil {
return m.LoadPath
}
return nil
}
func (m *Inventory) GetNodeType() uint64 {
if m != nil {
return m.NodeType
}
return 0
}
func (m *Inventory) GetSecureDomainRouterName() string {
if m != nil {
return m.SecureDomainRouterName
}
return ""
}
func init() {
proto.RegisterType((*Inventory_KEYS)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory_KEYS")
proto.RegisterType((*PkgGroup)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_group")
proto.RegisterType((*PkgInfo)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_info")
proto.RegisterType((*Inventory)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory")
}
func init() { proto.RegisterFile("inventory.proto", fileDescriptor_7173caedb7c6ae96) }
var fileDescriptor_7173caedb7c6ae96 = []byte{
// 394 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xcf, 0x6e, 0xd4, 0x30,
0x10, 0xc6, 0x95, 0xb6, 0xd0, 0xcd, 0xac, 0x4a, 0xc1, 0x42, 0x28, 0x88, 0x03, 0x51, 0x0e, 0x28,
0x12, 0x22, 0x87, 0x2d, 0x7f, 0xc4, 0x8d, 0x03, 0x1c, 0x2a, 0x24, 0x84, 0x02, 0x17, 0xb8, 0x58,
0xde, 0x64, 0x9a, 0x9a, 0xc6, 0x1e, 0xcb, 0x71, 0x02, 0x39, 0x21, 0x9e, 0x8b, 0xb7, 0xe0, 0x89,
0x90, 0x9d, 0x6c, 0x56, 0x7d, 0x80, 0xbd, 0x65, 0x7e, 0xdf, 0x17, 0xcf, 0x7c, 0xa3, 0x81, 0x73,
0xa9, 0x07, 0xd4, 0x8e, 0xec, 0x58, 0x18, 0x4b, 0x8e, 0xd8, 0xf7, 0x4a, 0x76, 0x15, 0x71, 0x49,
0x1d, 0xff, 0x65, 0xb9, 0xd4, 0x9d, 0x13, 0x6d, 0xab, 0x1a, 0xcb, 0x45, 0xad, 0xa4, 0xe6, 0x64,
0xd0, 0x16, 0x33, 0x2d, 0x3a, 0xba, 0x72, 0x3f, 0x85, 0x45, 0xbe, 0x7f, 0x44, 0x54, 0x4e, 0x0e,
0x58, 0xec, 0x80, 0xc4, 0x6e, 0xf9, 0x1e, 0xb3, 0x17, 0x70, 0x6f, 0x29, 0xf8, 0xc7, 0x0f, 0xdf,
0xbe, 0xb0, 0x27, 0x10, 0x6b, 0xaa, 0x91, 0x6b, 0xa1, 0x30, 0x89, 0xd2, 0x28, 0x8f, 0xcb, 0x95,
0x07, 0x9f, 0x84, 0xc2, 0xec, 0x1d, 0xc4, 0xe6, 0xa6, 0xe1, 0x8d, 0xa5, 0xde, 0xb0, 0xa7, 0xb0,
0xae, 0x71, 0x90, 0xd5, 0x2d, 0x2f, 0x4c, 0xc8, 0xbb, 0x19, 0x83, 0x93, 0xa0, 0x1c, 0x05, 0x25,
0x7c, 0x67, 0xff, 0x22, 0x58, 0xf9, 0x27, 0xa4, 0xbe, 0x22, 0xf6, 0x1b, 0x4e, 0x8d, 0xa8, 0x6e,
0x44, 0x33, 0xfd, 0xbd, 0xde, 0x60, 0x71, 0xb8, 0xac, 0xc5, 0x32, 0x79, 0xb9, 0xeb, 0xca, 0x12,
0x38, 0x1d, 0xd0, 0x76, 0x92, 0xf4, 0x3c, 0xe4, 0xae, 0x64, 0xcf, 0xe1, 0xc1, 0xb6, 0x97, 0x6d,
0x1d, 0x06, 0xb5, 0x4a, 0x38, 0xef, 0x39, 0x0e, 0x9e, 0xfb, 0x41, 0xb8, 0xdc, 0xf3, 0xec, 0xef,
0x11, 0xc4, 0x4b, 0x1f, 0xf6, 0x10, 0xee, 0x28, 0xf1, 0x83, 0x6c, 0xb2, 0x49, 0xa3, 0xfc, 0xac,
0x9c, 0x8a, 0x40, 0xa5, 0x26, 0x9b, 0x5c, 0xcc, 0xd4, 0x17, 0xec, 0x19, 0x9c, 0x6f, 0x89, 0x1c,
0x97, 0x4a, 0x34, 0xf3, 0x1e, 0x5f, 0x86, 0x26, 0x67, 0x1e, 0x5f, 0x7a, 0x1a, 0x56, 0xf9, 0x27,
0x82, 0xb8, 0x25, 0x51, 0x73, 0x23, 0xdc, 0x75, 0xf2, 0x2a, 0x3d, 0xce, 0xd7, 0x9b, 0xfa, 0xd0,
0xcb, 0xf2, 0xd1, 0xcb, 0x95, 0x6f, 0xfb, 0x59, 0xb8, 0xeb, 0xe5, 0x32, 0xdc, 0x68, 0x30, 0x79,
0x9d, 0x46, 0xf9, 0xc9, 0x74, 0x19, 0x5f, 0x47, 0x83, 0xec, 0x2d, 0x3c, 0xee, 0xb0, 0xea, 0x2d,
0xf2, 0x9a, 0x94, 0x90, 0x9a, 0x5b, 0xea, 0x1d, 0xda, 0x29, 0xd2, 0x9b, 0x10, 0xe9, 0xd1, 0x64,
0x78, 0x1f, 0xf4, 0x32, 0xc8, 0x3e, 0xdb, 0xf6, 0x6e, 0x38, 0xf3, 0x8b, 0xff, 0x01, 0x00, 0x00,
0xff, 0xff, 0xc8, 0x1f, 0xbd, 0xbe, 0xf9, 0x02, 0x00, 0x00,
}
| {
if m != nil {
return m.Minor
}
return 0
} | identifier_body |
inventory.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: inventory.proto
package cisco_ios_xr_installmgr_admin_oper_install_software_inventory_active_inventories_inventory
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Inventory_KEYS struct {
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` | func (m *Inventory_KEYS) String() string { return proto.CompactTextString(m) }
func (*Inventory_KEYS) ProtoMessage() {}
func (*Inventory_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{0}
}
func (m *Inventory_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory_KEYS.Unmarshal(m, b)
}
func (m *Inventory_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory_KEYS.Marshal(b, m, deterministic)
}
func (m *Inventory_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory_KEYS.Merge(m, src)
}
func (m *Inventory_KEYS) XXX_Size() int {
return xxx_messageInfo_Inventory_KEYS.Size(m)
}
func (m *Inventory_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory_KEYS proto.InternalMessageInfo
func (m *Inventory_KEYS) GetNodeName() string {
if m != nil {
return m.NodeName
}
return ""
}
type PkgGroup struct {
DeviceName string `protobuf:"bytes,1,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgGroup) Reset() { *m = PkgGroup{} }
func (m *PkgGroup) String() string { return proto.CompactTextString(m) }
func (*PkgGroup) ProtoMessage() {}
func (*PkgGroup) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{1}
}
func (m *PkgGroup) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgGroup.Unmarshal(m, b)
}
func (m *PkgGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgGroup.Marshal(b, m, deterministic)
}
func (m *PkgGroup) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgGroup.Merge(m, src)
}
func (m *PkgGroup) XXX_Size() int {
return xxx_messageInfo_PkgGroup.Size(m)
}
func (m *PkgGroup) XXX_DiscardUnknown() {
xxx_messageInfo_PkgGroup.DiscardUnknown(m)
}
var xxx_messageInfo_PkgGroup proto.InternalMessageInfo
func (m *PkgGroup) GetDeviceName() string {
if m != nil {
return m.DeviceName
}
return ""
}
func (m *PkgGroup) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type PkgInfo struct {
Package *PkgGroup `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
BuildInformation string `protobuf:"bytes,3,opt,name=build_information,json=buildInformation,proto3" json:"build_information,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgInfo) Reset() { *m = PkgInfo{} }
func (m *PkgInfo) String() string { return proto.CompactTextString(m) }
func (*PkgInfo) ProtoMessage() {}
func (*PkgInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{2}
}
func (m *PkgInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgInfo.Unmarshal(m, b)
}
func (m *PkgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgInfo.Marshal(b, m, deterministic)
}
func (m *PkgInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgInfo.Merge(m, src)
}
func (m *PkgInfo) XXX_Size() int {
return xxx_messageInfo_PkgInfo.Size(m)
}
func (m *PkgInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PkgInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PkgInfo proto.InternalMessageInfo
func (m *PkgInfo) GetPackage() *PkgGroup {
if m != nil {
return m.Package
}
return nil
}
func (m *PkgInfo) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *PkgInfo) GetBuildInformation() string {
if m != nil {
return m.BuildInformation
}
return ""
}
type Inventory struct {
Major uint32 `protobuf:"varint,50,opt,name=major,proto3" json:"major,omitempty"`
Minor uint32 `protobuf:"varint,51,opt,name=minor,proto3" json:"minor,omitempty"`
BootImageName string `protobuf:"bytes,52,opt,name=boot_image_name,json=bootImageName,proto3" json:"boot_image_name,omitempty"`
LoadPath []*PkgInfo `protobuf:"bytes,53,rep,name=load_path,json=loadPath,proto3" json:"load_path,omitempty"`
NodeType uint64 `protobuf:"varint,54,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
SecureDomainRouterName string `protobuf:"bytes,55,opt,name=secure_domain_router_name,json=secureDomainRouterName,proto3" json:"secure_domain_router_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory) Reset() { *m = Inventory{} }
func (m *Inventory) String() string { return proto.CompactTextString(m) }
func (*Inventory) ProtoMessage() {}
func (*Inventory) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{3}
}
func (m *Inventory) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory.Unmarshal(m, b)
}
func (m *Inventory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory.Marshal(b, m, deterministic)
}
func (m *Inventory) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory.Merge(m, src)
}
func (m *Inventory) XXX_Size() int {
return xxx_messageInfo_Inventory.Size(m)
}
func (m *Inventory) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory proto.InternalMessageInfo
func (m *Inventory) GetMajor() uint32 {
if m != nil {
return m.Major
}
return 0
}
func (m *Inventory) GetMinor() uint32 {
if m != nil {
return m.Minor
}
return 0
}
func (m *Inventory) GetBootImageName() string {
if m != nil {
return m.BootImageName
}
return ""
}
func (m *Inventory) GetLoadPath() []*PkgInfo {
if m != nil {
return m.LoadPath
}
return nil
}
func (m *Inventory) GetNodeType() uint64 {
if m != nil {
return m.NodeType
}
return 0
}
func (m *Inventory) GetSecureDomainRouterName() string {
if m != nil {
return m.SecureDomainRouterName
}
return ""
}
func init() {
proto.RegisterType((*Inventory_KEYS)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory_KEYS")
proto.RegisterType((*PkgGroup)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_group")
proto.RegisterType((*PkgInfo)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_info")
proto.RegisterType((*Inventory)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory")
}
func init() { proto.RegisterFile("inventory.proto", fileDescriptor_7173caedb7c6ae96) }
var fileDescriptor_7173caedb7c6ae96 = []byte{
// 394 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xcf, 0x6e, 0xd4, 0x30,
0x10, 0xc6, 0x95, 0xb6, 0xd0, 0xcd, 0xac, 0x4a, 0xc1, 0x42, 0x28, 0x88, 0x03, 0x51, 0x0e, 0x28,
0x12, 0x22, 0x87, 0x2d, 0x7f, 0xc4, 0x8d, 0x03, 0x1c, 0x2a, 0x24, 0x84, 0x02, 0x17, 0xb8, 0x58,
0xde, 0x64, 0x9a, 0x9a, 0xc6, 0x1e, 0xcb, 0x71, 0x02, 0x39, 0x21, 0x9e, 0x8b, 0xb7, 0xe0, 0x89,
0x90, 0x9d, 0x6c, 0x56, 0x7d, 0x80, 0xbd, 0x65, 0x7e, 0xdf, 0x17, 0xcf, 0x7c, 0xa3, 0x81, 0x73,
0xa9, 0x07, 0xd4, 0x8e, 0xec, 0x58, 0x18, 0x4b, 0x8e, 0xd8, 0xf7, 0x4a, 0x76, 0x15, 0x71, 0x49,
0x1d, 0xff, 0x65, 0xb9, 0xd4, 0x9d, 0x13, 0x6d, 0xab, 0x1a, 0xcb, 0x45, 0xad, 0xa4, 0xe6, 0x64,
0xd0, 0x16, 0x33, 0x2d, 0x3a, 0xba, 0x72, 0x3f, 0x85, 0x45, 0xbe, 0x7f, 0x44, 0x54, 0x4e, 0x0e,
0x58, 0xec, 0x80, 0xc4, 0x6e, 0xf9, 0x1e, 0xb3, 0x17, 0x70, 0x6f, 0x29, 0xf8, 0xc7, 0x0f, 0xdf,
0xbe, 0xb0, 0x27, 0x10, 0x6b, 0xaa, 0x91, 0x6b, 0xa1, 0x30, 0x89, 0xd2, 0x28, 0x8f, 0xcb, 0x95,
0x07, 0x9f, 0x84, 0xc2, 0xec, 0x1d, 0xc4, 0xe6, 0xa6, 0xe1, 0x8d, 0xa5, 0xde, 0xb0, 0xa7, 0xb0,
0xae, 0x71, 0x90, 0xd5, 0x2d, 0x2f, 0x4c, 0xc8, 0xbb, 0x19, 0x83, 0x93, 0xa0, 0x1c, 0x05, 0x25,
0x7c, 0x67, 0xff, 0x22, 0x58, 0xf9, 0x27, 0xa4, 0xbe, 0x22, 0xf6, 0x1b, 0x4e, 0x8d, 0xa8, 0x6e,
0x44, 0x33, 0xfd, 0xbd, 0xde, 0x60, 0x71, 0xb8, 0xac, 0xc5, 0x32, 0x79, 0xb9, 0xeb, 0xca, 0x12,
0x38, 0x1d, 0xd0, 0x76, 0x92, 0xf4, 0x3c, 0xe4, 0xae, 0x64, 0xcf, 0xe1, 0xc1, 0xb6, 0x97, 0x6d,
0x1d, 0x06, 0xb5, 0x4a, 0x38, 0xef, 0x39, 0x0e, 0x9e, 0xfb, 0x41, 0xb8, 0xdc, 0xf3, 0xec, 0xef,
0x11, 0xc4, 0x4b, 0x1f, 0xf6, 0x10, 0xee, 0x28, 0xf1, 0x83, 0x6c, 0xb2, 0x49, 0xa3, 0xfc, 0xac,
0x9c, 0x8a, 0x40, 0xa5, 0x26, 0x9b, 0x5c, 0xcc, 0xd4, 0x17, 0xec, 0x19, 0x9c, 0x6f, 0x89, 0x1c,
0x97, 0x4a, 0x34, 0xf3, 0x1e, 0x5f, 0x86, 0x26, 0x67, 0x1e, 0x5f, 0x7a, 0x1a, 0x56, 0xf9, 0x27,
0x82, 0xb8, 0x25, 0x51, 0x73, 0x23, 0xdc, 0x75, 0xf2, 0x2a, 0x3d, 0xce, 0xd7, 0x9b, 0xfa, 0xd0,
0xcb, 0xf2, 0xd1, 0xcb, 0x95, 0x6f, 0xfb, 0x59, 0xb8, 0xeb, 0xe5, 0x32, 0xdc, 0x68, 0x30, 0x79,
0x9d, 0x46, 0xf9, 0xc9, 0x74, 0x19, 0x5f, 0x47, 0x83, 0xec, 0x2d, 0x3c, 0xee, 0xb0, 0xea, 0x2d,
0xf2, 0x9a, 0x94, 0x90, 0x9a, 0x5b, 0xea, 0x1d, 0xda, 0x29, 0xd2, 0x9b, 0x10, 0xe9, 0xd1, 0x64,
0x78, 0x1f, 0xf4, 0x32, 0xc8, 0x3e, 0xdb, 0xf6, 0x6e, 0x38, 0xf3, 0x8b, 0xff, 0x01, 0x00, 0x00,
0xff, 0xff, 0xc8, 0x1f, 0xbd, 0xbe, 0xf9, 0x02, 0x00, 0x00,
} | XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory_KEYS) Reset() { *m = Inventory_KEYS{} } | random_line_split |
inventory.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: inventory.proto
package cisco_ios_xr_installmgr_admin_oper_install_software_inventory_active_inventories_inventory
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Inventory_KEYS struct {
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory_KEYS) Reset() { *m = Inventory_KEYS{} }
func (m *Inventory_KEYS) String() string { return proto.CompactTextString(m) }
func (*Inventory_KEYS) ProtoMessage() {}
func (*Inventory_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{0}
}
func (m *Inventory_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory_KEYS.Unmarshal(m, b)
}
func (m *Inventory_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory_KEYS.Marshal(b, m, deterministic)
}
func (m *Inventory_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory_KEYS.Merge(m, src)
}
func (m *Inventory_KEYS) XXX_Size() int {
return xxx_messageInfo_Inventory_KEYS.Size(m)
}
func (m *Inventory_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory_KEYS proto.InternalMessageInfo
func (m *Inventory_KEYS) GetNodeName() string {
if m != nil {
return m.NodeName
}
return ""
}
type PkgGroup struct {
DeviceName string `protobuf:"bytes,1,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgGroup) Reset() { *m = PkgGroup{} }
func (m *PkgGroup) String() string { return proto.CompactTextString(m) }
func (*PkgGroup) ProtoMessage() {}
func (*PkgGroup) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{1}
}
func (m *PkgGroup) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgGroup.Unmarshal(m, b)
}
func (m *PkgGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgGroup.Marshal(b, m, deterministic)
}
func (m *PkgGroup) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgGroup.Merge(m, src)
}
func (m *PkgGroup) XXX_Size() int {
return xxx_messageInfo_PkgGroup.Size(m)
}
func (m *PkgGroup) XXX_DiscardUnknown() {
xxx_messageInfo_PkgGroup.DiscardUnknown(m)
}
var xxx_messageInfo_PkgGroup proto.InternalMessageInfo
func (m *PkgGroup) GetDeviceName() string {
if m != nil {
return m.DeviceName
}
return ""
}
func (m *PkgGroup) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type PkgInfo struct {
Package *PkgGroup `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
BuildInformation string `protobuf:"bytes,3,opt,name=build_information,json=buildInformation,proto3" json:"build_information,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgInfo) Reset() { *m = PkgInfo{} }
func (m *PkgInfo) String() string { return proto.CompactTextString(m) }
func (*PkgInfo) ProtoMessage() {}
func (*PkgInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{2}
}
func (m *PkgInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgInfo.Unmarshal(m, b)
}
func (m *PkgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgInfo.Marshal(b, m, deterministic)
}
func (m *PkgInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgInfo.Merge(m, src)
}
func (m *PkgInfo) XXX_Size() int {
return xxx_messageInfo_PkgInfo.Size(m)
}
func (m *PkgInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PkgInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PkgInfo proto.InternalMessageInfo
func (m *PkgInfo) GetPackage() *PkgGroup {
if m != nil {
return m.Package
}
return nil
}
func (m *PkgInfo) GetVersion() string {
if m != nil |
return ""
}
func (m *PkgInfo) GetBuildInformation() string {
if m != nil {
return m.BuildInformation
}
return ""
}
type Inventory struct {
Major uint32 `protobuf:"varint,50,opt,name=major,proto3" json:"major,omitempty"`
Minor uint32 `protobuf:"varint,51,opt,name=minor,proto3" json:"minor,omitempty"`
BootImageName string `protobuf:"bytes,52,opt,name=boot_image_name,json=bootImageName,proto3" json:"boot_image_name,omitempty"`
LoadPath []*PkgInfo `protobuf:"bytes,53,rep,name=load_path,json=loadPath,proto3" json:"load_path,omitempty"`
NodeType uint64 `protobuf:"varint,54,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
SecureDomainRouterName string `protobuf:"bytes,55,opt,name=secure_domain_router_name,json=secureDomainRouterName,proto3" json:"secure_domain_router_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory) Reset() { *m = Inventory{} }
func (m *Inventory) String() string { return proto.CompactTextString(m) }
func (*Inventory) ProtoMessage() {}
func (*Inventory) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{3}
}
func (m *Inventory) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory.Unmarshal(m, b)
}
func (m *Inventory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory.Marshal(b, m, deterministic)
}
func (m *Inventory) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory.Merge(m, src)
}
func (m *Inventory) XXX_Size() int {
return xxx_messageInfo_Inventory.Size(m)
}
func (m *Inventory) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory proto.InternalMessageInfo
func (m *Inventory) GetMajor() uint32 {
if m != nil {
return m.Major
}
return 0
}
func (m *Inventory) GetMinor() uint32 {
if m != nil {
return m.Minor
}
return 0
}
func (m *Inventory) GetBootImageName() string {
if m != nil {
return m.BootImageName
}
return ""
}
func (m *Inventory) GetLoadPath() []*PkgInfo {
if m != nil {
return m.LoadPath
}
return nil
}
func (m *Inventory) GetNodeType() uint64 {
if m != nil {
return m.NodeType
}
return 0
}
func (m *Inventory) GetSecureDomainRouterName() string {
if m != nil {
return m.SecureDomainRouterName
}
return ""
}
func init() {
proto.RegisterType((*Inventory_KEYS)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory_KEYS")
proto.RegisterType((*PkgGroup)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_group")
proto.RegisterType((*PkgInfo)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_info")
proto.RegisterType((*Inventory)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory")
}
func init() { proto.RegisterFile("inventory.proto", fileDescriptor_7173caedb7c6ae96) }
var fileDescriptor_7173caedb7c6ae96 = []byte{
// 394 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xcf, 0x6e, 0xd4, 0x30,
0x10, 0xc6, 0x95, 0xb6, 0xd0, 0xcd, 0xac, 0x4a, 0xc1, 0x42, 0x28, 0x88, 0x03, 0x51, 0x0e, 0x28,
0x12, 0x22, 0x87, 0x2d, 0x7f, 0xc4, 0x8d, 0x03, 0x1c, 0x2a, 0x24, 0x84, 0x02, 0x17, 0xb8, 0x58,
0xde, 0x64, 0x9a, 0x9a, 0xc6, 0x1e, 0xcb, 0x71, 0x02, 0x39, 0x21, 0x9e, 0x8b, 0xb7, 0xe0, 0x89,
0x90, 0x9d, 0x6c, 0x56, 0x7d, 0x80, 0xbd, 0x65, 0x7e, 0xdf, 0x17, 0xcf, 0x7c, 0xa3, 0x81, 0x73,
0xa9, 0x07, 0xd4, 0x8e, 0xec, 0x58, 0x18, 0x4b, 0x8e, 0xd8, 0xf7, 0x4a, 0x76, 0x15, 0x71, 0x49,
0x1d, 0xff, 0x65, 0xb9, 0xd4, 0x9d, 0x13, 0x6d, 0xab, 0x1a, 0xcb, 0x45, 0xad, 0xa4, 0xe6, 0x64,
0xd0, 0x16, 0x33, 0x2d, 0x3a, 0xba, 0x72, 0x3f, 0x85, 0x45, 0xbe, 0x7f, 0x44, 0x54, 0x4e, 0x0e,
0x58, 0xec, 0x80, 0xc4, 0x6e, 0xf9, 0x1e, 0xb3, 0x17, 0x70, 0x6f, 0x29, 0xf8, 0xc7, 0x0f, 0xdf,
0xbe, 0xb0, 0x27, 0x10, 0x6b, 0xaa, 0x91, 0x6b, 0xa1, 0x30, 0x89, 0xd2, 0x28, 0x8f, 0xcb, 0x95,
0x07, 0x9f, 0x84, 0xc2, 0xec, 0x1d, 0xc4, 0xe6, 0xa6, 0xe1, 0x8d, 0xa5, 0xde, 0xb0, 0xa7, 0xb0,
0xae, 0x71, 0x90, 0xd5, 0x2d, 0x2f, 0x4c, 0xc8, 0xbb, 0x19, 0x83, 0x93, 0xa0, 0x1c, 0x05, 0x25,
0x7c, 0x67, 0xff, 0x22, 0x58, 0xf9, 0x27, 0xa4, 0xbe, 0x22, 0xf6, 0x1b, 0x4e, 0x8d, 0xa8, 0x6e,
0x44, 0x33, 0xfd, 0xbd, 0xde, 0x60, 0x71, 0xb8, 0xac, 0xc5, 0x32, 0x79, 0xb9, 0xeb, 0xca, 0x12,
0x38, 0x1d, 0xd0, 0x76, 0x92, 0xf4, 0x3c, 0xe4, 0xae, 0x64, 0xcf, 0xe1, 0xc1, 0xb6, 0x97, 0x6d,
0x1d, 0x06, 0xb5, 0x4a, 0x38, 0xef, 0x39, 0x0e, 0x9e, 0xfb, 0x41, 0xb8, 0xdc, 0xf3, 0xec, 0xef,
0x11, 0xc4, 0x4b, 0x1f, 0xf6, 0x10, 0xee, 0x28, 0xf1, 0x83, 0x6c, 0xb2, 0x49, 0xa3, 0xfc, 0xac,
0x9c, 0x8a, 0x40, 0xa5, 0x26, 0x9b, 0x5c, 0xcc, 0xd4, 0x17, 0xec, 0x19, 0x9c, 0x6f, 0x89, 0x1c,
0x97, 0x4a, 0x34, 0xf3, 0x1e, 0x5f, 0x86, 0x26, 0x67, 0x1e, 0x5f, 0x7a, 0x1a, 0x56, 0xf9, 0x27,
0x82, 0xb8, 0x25, 0x51, 0x73, 0x23, 0xdc, 0x75, 0xf2, 0x2a, 0x3d, 0xce, 0xd7, 0x9b, 0xfa, 0xd0,
0xcb, 0xf2, 0xd1, 0xcb, 0x95, 0x6f, 0xfb, 0x59, 0xb8, 0xeb, 0xe5, 0x32, 0xdc, 0x68, 0x30, 0x79,
0x9d, 0x46, 0xf9, 0xc9, 0x74, 0x19, 0x5f, 0x47, 0x83, 0xec, 0x2d, 0x3c, 0xee, 0xb0, 0xea, 0x2d,
0xf2, 0x9a, 0x94, 0x90, 0x9a, 0x5b, 0xea, 0x1d, 0xda, 0x29, 0xd2, 0x9b, 0x10, 0xe9, 0xd1, 0x64,
0x78, 0x1f, 0xf4, 0x32, 0xc8, 0x3e, 0xdb, 0xf6, 0x6e, 0x38, 0xf3, 0x8b, 0xff, 0x01, 0x00, 0x00,
0xff, 0xff, 0xc8, 0x1f, 0xbd, 0xbe, 0xf9, 0x02, 0x00, 0x00,
}
| {
return m.Version
} | conditional_block |
inventory.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: inventory.proto
package cisco_ios_xr_installmgr_admin_oper_install_software_inventory_active_inventories_inventory
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Inventory_KEYS struct {
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory_KEYS) Reset() { *m = Inventory_KEYS{} }
func (m *Inventory_KEYS) String() string { return proto.CompactTextString(m) }
func (*Inventory_KEYS) ProtoMessage() {}
func (*Inventory_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{0}
}
func (m *Inventory_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory_KEYS.Unmarshal(m, b)
}
func (m *Inventory_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory_KEYS.Marshal(b, m, deterministic)
}
func (m *Inventory_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory_KEYS.Merge(m, src)
}
func (m *Inventory_KEYS) XXX_Size() int {
return xxx_messageInfo_Inventory_KEYS.Size(m)
}
func (m *Inventory_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory_KEYS proto.InternalMessageInfo
func (m *Inventory_KEYS) GetNodeName() string {
if m != nil {
return m.NodeName
}
return ""
}
type PkgGroup struct {
DeviceName string `protobuf:"bytes,1,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgGroup) Reset() { *m = PkgGroup{} }
func (m *PkgGroup) String() string { return proto.CompactTextString(m) }
func (*PkgGroup) ProtoMessage() {}
func (*PkgGroup) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{1}
}
func (m *PkgGroup) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgGroup.Unmarshal(m, b)
}
func (m *PkgGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgGroup.Marshal(b, m, deterministic)
}
func (m *PkgGroup) | (src proto.Message) {
xxx_messageInfo_PkgGroup.Merge(m, src)
}
func (m *PkgGroup) XXX_Size() int {
return xxx_messageInfo_PkgGroup.Size(m)
}
func (m *PkgGroup) XXX_DiscardUnknown() {
xxx_messageInfo_PkgGroup.DiscardUnknown(m)
}
var xxx_messageInfo_PkgGroup proto.InternalMessageInfo
func (m *PkgGroup) GetDeviceName() string {
if m != nil {
return m.DeviceName
}
return ""
}
func (m *PkgGroup) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type PkgInfo struct {
Package *PkgGroup `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
BuildInformation string `protobuf:"bytes,3,opt,name=build_information,json=buildInformation,proto3" json:"build_information,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PkgInfo) Reset() { *m = PkgInfo{} }
func (m *PkgInfo) String() string { return proto.CompactTextString(m) }
func (*PkgInfo) ProtoMessage() {}
func (*PkgInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{2}
}
func (m *PkgInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PkgInfo.Unmarshal(m, b)
}
func (m *PkgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PkgInfo.Marshal(b, m, deterministic)
}
func (m *PkgInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PkgInfo.Merge(m, src)
}
func (m *PkgInfo) XXX_Size() int {
return xxx_messageInfo_PkgInfo.Size(m)
}
func (m *PkgInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PkgInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PkgInfo proto.InternalMessageInfo
func (m *PkgInfo) GetPackage() *PkgGroup {
if m != nil {
return m.Package
}
return nil
}
func (m *PkgInfo) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *PkgInfo) GetBuildInformation() string {
if m != nil {
return m.BuildInformation
}
return ""
}
type Inventory struct {
Major uint32 `protobuf:"varint,50,opt,name=major,proto3" json:"major,omitempty"`
Minor uint32 `protobuf:"varint,51,opt,name=minor,proto3" json:"minor,omitempty"`
BootImageName string `protobuf:"bytes,52,opt,name=boot_image_name,json=bootImageName,proto3" json:"boot_image_name,omitempty"`
LoadPath []*PkgInfo `protobuf:"bytes,53,rep,name=load_path,json=loadPath,proto3" json:"load_path,omitempty"`
NodeType uint64 `protobuf:"varint,54,opt,name=node_type,json=nodeType,proto3" json:"node_type,omitempty"`
SecureDomainRouterName string `protobuf:"bytes,55,opt,name=secure_domain_router_name,json=secureDomainRouterName,proto3" json:"secure_domain_router_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Inventory) Reset() { *m = Inventory{} }
func (m *Inventory) String() string { return proto.CompactTextString(m) }
func (*Inventory) ProtoMessage() {}
func (*Inventory) Descriptor() ([]byte, []int) {
return fileDescriptor_7173caedb7c6ae96, []int{3}
}
func (m *Inventory) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Inventory.Unmarshal(m, b)
}
func (m *Inventory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Inventory.Marshal(b, m, deterministic)
}
func (m *Inventory) XXX_Merge(src proto.Message) {
xxx_messageInfo_Inventory.Merge(m, src)
}
func (m *Inventory) XXX_Size() int {
return xxx_messageInfo_Inventory.Size(m)
}
func (m *Inventory) XXX_DiscardUnknown() {
xxx_messageInfo_Inventory.DiscardUnknown(m)
}
var xxx_messageInfo_Inventory proto.InternalMessageInfo
func (m *Inventory) GetMajor() uint32 {
if m != nil {
return m.Major
}
return 0
}
func (m *Inventory) GetMinor() uint32 {
if m != nil {
return m.Minor
}
return 0
}
func (m *Inventory) GetBootImageName() string {
if m != nil {
return m.BootImageName
}
return ""
}
func (m *Inventory) GetLoadPath() []*PkgInfo {
if m != nil {
return m.LoadPath
}
return nil
}
func (m *Inventory) GetNodeType() uint64 {
if m != nil {
return m.NodeType
}
return 0
}
func (m *Inventory) GetSecureDomainRouterName() string {
if m != nil {
return m.SecureDomainRouterName
}
return ""
}
func init() {
proto.RegisterType((*Inventory_KEYS)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory_KEYS")
proto.RegisterType((*PkgGroup)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_group")
proto.RegisterType((*PkgInfo)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.pkg_info")
proto.RegisterType((*Inventory)(nil), "cisco_ios_xr_installmgr_admin_oper.install.software_inventory.active.inventories.inventory.inventory")
}
func init() { proto.RegisterFile("inventory.proto", fileDescriptor_7173caedb7c6ae96) }
var fileDescriptor_7173caedb7c6ae96 = []byte{
// 394 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xcf, 0x6e, 0xd4, 0x30,
0x10, 0xc6, 0x95, 0xb6, 0xd0, 0xcd, 0xac, 0x4a, 0xc1, 0x42, 0x28, 0x88, 0x03, 0x51, 0x0e, 0x28,
0x12, 0x22, 0x87, 0x2d, 0x7f, 0xc4, 0x8d, 0x03, 0x1c, 0x2a, 0x24, 0x84, 0x02, 0x17, 0xb8, 0x58,
0xde, 0x64, 0x9a, 0x9a, 0xc6, 0x1e, 0xcb, 0x71, 0x02, 0x39, 0x21, 0x9e, 0x8b, 0xb7, 0xe0, 0x89,
0x90, 0x9d, 0x6c, 0x56, 0x7d, 0x80, 0xbd, 0x65, 0x7e, 0xdf, 0x17, 0xcf, 0x7c, 0xa3, 0x81, 0x73,
0xa9, 0x07, 0xd4, 0x8e, 0xec, 0x58, 0x18, 0x4b, 0x8e, 0xd8, 0xf7, 0x4a, 0x76, 0x15, 0x71, 0x49,
0x1d, 0xff, 0x65, 0xb9, 0xd4, 0x9d, 0x13, 0x6d, 0xab, 0x1a, 0xcb, 0x45, 0xad, 0xa4, 0xe6, 0x64,
0xd0, 0x16, 0x33, 0x2d, 0x3a, 0xba, 0x72, 0x3f, 0x85, 0x45, 0xbe, 0x7f, 0x44, 0x54, 0x4e, 0x0e,
0x58, 0xec, 0x80, 0xc4, 0x6e, 0xf9, 0x1e, 0xb3, 0x17, 0x70, 0x6f, 0x29, 0xf8, 0xc7, 0x0f, 0xdf,
0xbe, 0xb0, 0x27, 0x10, 0x6b, 0xaa, 0x91, 0x6b, 0xa1, 0x30, 0x89, 0xd2, 0x28, 0x8f, 0xcb, 0x95,
0x07, 0x9f, 0x84, 0xc2, 0xec, 0x1d, 0xc4, 0xe6, 0xa6, 0xe1, 0x8d, 0xa5, 0xde, 0xb0, 0xa7, 0xb0,
0xae, 0x71, 0x90, 0xd5, 0x2d, 0x2f, 0x4c, 0xc8, 0xbb, 0x19, 0x83, 0x93, 0xa0, 0x1c, 0x05, 0x25,
0x7c, 0x67, 0xff, 0x22, 0x58, 0xf9, 0x27, 0xa4, 0xbe, 0x22, 0xf6, 0x1b, 0x4e, 0x8d, 0xa8, 0x6e,
0x44, 0x33, 0xfd, 0xbd, 0xde, 0x60, 0x71, 0xb8, 0xac, 0xc5, 0x32, 0x79, 0xb9, 0xeb, 0xca, 0x12,
0x38, 0x1d, 0xd0, 0x76, 0x92, 0xf4, 0x3c, 0xe4, 0xae, 0x64, 0xcf, 0xe1, 0xc1, 0xb6, 0x97, 0x6d,
0x1d, 0x06, 0xb5, 0x4a, 0x38, 0xef, 0x39, 0x0e, 0x9e, 0xfb, 0x41, 0xb8, 0xdc, 0xf3, 0xec, 0xef,
0x11, 0xc4, 0x4b, 0x1f, 0xf6, 0x10, 0xee, 0x28, 0xf1, 0x83, 0x6c, 0xb2, 0x49, 0xa3, 0xfc, 0xac,
0x9c, 0x8a, 0x40, 0xa5, 0x26, 0x9b, 0x5c, 0xcc, 0xd4, 0x17, 0xec, 0x19, 0x9c, 0x6f, 0x89, 0x1c,
0x97, 0x4a, 0x34, 0xf3, 0x1e, 0x5f, 0x86, 0x26, 0x67, 0x1e, 0x5f, 0x7a, 0x1a, 0x56, 0xf9, 0x27,
0x82, 0xb8, 0x25, 0x51, 0x73, 0x23, 0xdc, 0x75, 0xf2, 0x2a, 0x3d, 0xce, 0xd7, 0x9b, 0xfa, 0xd0,
0xcb, 0xf2, 0xd1, 0xcb, 0x95, 0x6f, 0xfb, 0x59, 0xb8, 0xeb, 0xe5, 0x32, 0xdc, 0x68, 0x30, 0x79,
0x9d, 0x46, 0xf9, 0xc9, 0x74, 0x19, 0x5f, 0x47, 0x83, 0xec, 0x2d, 0x3c, 0xee, 0xb0, 0xea, 0x2d,
0xf2, 0x9a, 0x94, 0x90, 0x9a, 0x5b, 0xea, 0x1d, 0xda, 0x29, 0xd2, 0x9b, 0x10, 0xe9, 0xd1, 0x64,
0x78, 0x1f, 0xf4, 0x32, 0xc8, 0x3e, 0xdb, 0xf6, 0x6e, 0x38, 0xf3, 0x8b, 0xff, 0x01, 0x00, 0x00,
0xff, 0xff, 0xc8, 0x1f, 0xbd, 0xbe, 0xf9, 0x02, 0x00, 0x00,
}
| XXX_Merge | identifier_name |
Serigne.py | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # Matlab-style plotting
import seaborn as sns
import warnings
from scipy import stats
from scipy.stats import norm, skew #for some statistics
from subprocess import check_output
color = sns.color_palette()
sns.set_style('darkgrid')
############ Build Environment ############
############################################
def ignore_warn(*args, **kwargs):
pass
warnings.warn = ignore_warn # ignore annoying warning (from sklearn and seaborn)
pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) # Limiting floats output to 3 decimal points
# print(check_output(["ls", "../data"]).decode("utf8")) # check the files available in the directory
############################################
#Now let's import and put the train and test datasets in pandas dataframe
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
##display the first five rows of the train dataset.
# print(train.head(5))
##display the first five rows of the test dataset.
# print(test.head(5))
############ Inro ############
############################################
#check the numbers of samples and features
print("The train data size before dropping Id feature is : {} ".format(train.shape))
print("The test data size before dropping Id feature is : {} ".format(test.shape))
#Save the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
print('\n\n')
print("The train ID :\n {} ".format(np.array(train_ID)))
print("The test ID :\n {} ".format(np.array(test_ID)))
#Now drop the 'Id' colum since it's unnecessary for the prediction process.
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
#check again the data size after dropping the 'Id' variable
print("\nThe train data size after dropping Id feature is : {} ".format(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
############################################
############ Data Processing ############
################################################
############## Outliers ############
# fig, ax = plt.subplots()
# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
# fig, ax = plt.subplots() # GRAPH 1
# ax.scatter(train['GrLivArea'], train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
############## Target variable #############
# Uncomment to PLOT
# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
# sns.distplot(train['SalePrice'] , fit=norm) # GRAPH 2
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice']) # STANDART
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure() # GRAPH 3
# res = stats.probplot(train['SalePrice'], plot=plt)
# # The target variable is right skewed. As (linear) models love normally distributed data ,
# # we need to transform this variable and make it more normally distributed.
################################################################################
############## Log-transformation of the target variable ############
# Uncomment to PLOT
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Check the new distribution
# sns.distplot(train['SalePrice'] , fit=norm)
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice'])
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
|
############################################################
############## Features engineering ############
# let's first concatenate the train and test data in the same dataframe
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("\nall_data size is : {}".format(all_data.shape))
##### Missing Data. #####
# Uncomment to PLOT
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
print(missing_data.head(20))
# f, ax = plt.subplots(figsize=(15, 12)) # GRAPH 4
# plt.xticks(rotation='90')
# sns.barplot(x=all_data_na.index, y=all_data_na)
# plt.xlabel('Features', fontsize=15)
# plt.ylabel('Percent of missing values', fontsize=15)
# plt.title('Percent missing data by feature', fontsize=15)
# plt.show()
############################################################
############## Data Correlation ############
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)
plt.show()
############################################################
############## Imputing missing values ############
# --------------->>>>>>>> groupby() and mode()
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood ,
# we can fill in missing values by the median LotFrontage of the neighborhood.
# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath :
# missing values are likely zero for having no basement
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 :
# For all these categorical basement-related features, NaN means that there is no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
# MasVnrArea and MasVnrType :
# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
##### !!!!! #####
# MSZoning (The general zoning classification) :
# 'RL' is by far the most common value. So we can fill in missing values with 'RL'
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# Utilities : For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA .
# Since the house with 'NoSewa' is in the training set, this feature won't help in predictive modelling.
# We can then safely remove it.
all_data = all_data.drop(['Utilities'], axis=1)
# Functional : data description says NA means typical
all_data["Functional"] = all_data["Functional"].fillna("Typ")
# Electrical : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value.
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
# KitchenQual: Only one NA value, and same as Electrical,
# we set 'TA' (which is the most frequent) for the missing value in KitchenQual.
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
# Exterior1st and Exterior2nd :
# Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
# SaleType : Fill in again with most frequent which is "WD"
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
# MSSubClass : Na most likely means No building class. We can replace missing values with None
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
########## Is there any remaining missing value ? ##########
#Check remaining missing values if any
print('\n\n\n\n')
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()
############################################################
############## More features engeneering ############
##### Transforming some numerical variables that are really categorical #####
#MSSubClass=The building class
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
#Changing OverallCond into a categorical variable
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
#Year and month sold are transformed into categorical features.
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str)
# Label Encoding some categorical variables that may contain information in their ordering set
# --------------->>>>>>>> LabelEncoder
# --------------->>>>>>>> WHY ?!?!?!?!
from sklearn.preprocessing import LabelEncoder
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
# process columns, apply LabelEncoder to categorical features
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
# shape
print('Shape all_data: {}'.format(all_data.shape))
######### Adding one more important feature ##########
# Since area related features are very important to determine house prices,
# we add one more feature which is the total area of basement, first and second floor areas of each house
# Adding total sqfootage feature
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']
############## Skewed features ############
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Check the skew of all numerical features
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
print(skewness.head(10))
##### Box Cox Transformation of (highly) skewed features #####
# --------------->>>>>>>> WHAT ?!?!?!?!
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
#all_data[skewed_features] = np.log1p(all_data[skewed_features])
# print('\n\n\n\t\t\tSKEW:')
# sk = pd.DataFrame({'Skew' :all_data[feat]})
# print(all_data.head(10))
# Getting dummy categorical features
all_data = pd.get_dummies(all_data)
print(all_data.shape)
train = all_data[:ntrain]
test = all_data[ntrain:]
############################################################
############## Modeling ############
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb | # #Get also the QQ-plot
# fig = plt.figure()
# res = stats.probplot(train['SalePrice'], plot=plt)
# plt.show() | random_line_split |
Serigne.py | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # Matlab-style plotting
import seaborn as sns
import warnings
from scipy import stats
from scipy.stats import norm, skew #for some statistics
from subprocess import check_output
color = sns.color_palette()
sns.set_style('darkgrid')
############ Build Environment ############
############################################
def ignore_warn(*args, **kwargs):
|
warnings.warn = ignore_warn # ignore annoying warning (from sklearn and seaborn)
pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) # Limiting floats output to 3 decimal points
# print(check_output(["ls", "../data"]).decode("utf8")) # check the files available in the directory
############################################
#Now let's import and put the train and test datasets in pandas dataframe
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
##display the first five rows of the train dataset.
# print(train.head(5))
##display the first five rows of the test dataset.
# print(test.head(5))
############ Inro ############
############################################
#check the numbers of samples and features
print("The train data size before dropping Id feature is : {} ".format(train.shape))
print("The test data size before dropping Id feature is : {} ".format(test.shape))
#Save the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
print('\n\n')
print("The train ID :\n {} ".format(np.array(train_ID)))
print("The test ID :\n {} ".format(np.array(test_ID)))
#Now drop the 'Id' colum since it's unnecessary for the prediction process.
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
#check again the data size after dropping the 'Id' variable
print("\nThe train data size after dropping Id feature is : {} ".format(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
############################################
############ Data Processing ############
################################################
############## Outliers ############
# fig, ax = plt.subplots()
# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
# fig, ax = plt.subplots() # GRAPH 1
# ax.scatter(train['GrLivArea'], train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
############## Target variable #############
# Uncomment to PLOT
# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
# sns.distplot(train['SalePrice'] , fit=norm) # GRAPH 2
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice']) # STANDART
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure() # GRAPH 3
# res = stats.probplot(train['SalePrice'], plot=plt)
# # The target variable is right skewed. As (linear) models love normally distributed data ,
# # we need to transform this variable and make it more normally distributed.
################################################################################
############## Log-transformation of the target variable ############
# Uncomment to PLOT
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Check the new distribution
# sns.distplot(train['SalePrice'] , fit=norm)
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice'])
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure()
# res = stats.probplot(train['SalePrice'], plot=plt)
# plt.show()
############################################################
############## Features engineering ############
# let's first concatenate the train and test data in the same dataframe
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("\nall_data size is : {}".format(all_data.shape))
##### Missing Data. #####
# Uncomment to PLOT
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
print(missing_data.head(20))
# f, ax = plt.subplots(figsize=(15, 12)) # GRAPH 4
# plt.xticks(rotation='90')
# sns.barplot(x=all_data_na.index, y=all_data_na)
# plt.xlabel('Features', fontsize=15)
# plt.ylabel('Percent of missing values', fontsize=15)
# plt.title('Percent missing data by feature', fontsize=15)
# plt.show()
############################################################
############## Data Correlation ############
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)
plt.show()
############################################################
############## Imputing missing values ############
# --------------->>>>>>>> groupby() and mode()
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood ,
# we can fill in missing values by the median LotFrontage of the neighborhood.
# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath :
# missing values are likely zero for having no basement
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 :
# For all these categorical basement-related features, NaN means that there is no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
# MasVnrArea and MasVnrType :
# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
##### !!!!! #####
# MSZoning (The general zoning classification) :
# 'RL' is by far the most common value. So we can fill in missing values with 'RL'
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# Utilities : For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA .
# Since the house with 'NoSewa' is in the training set, this feature won't help in predictive modelling.
# We can then safely remove it.
all_data = all_data.drop(['Utilities'], axis=1)
# Functional : data description says NA means typical
all_data["Functional"] = all_data["Functional"].fillna("Typ")
# Electrical : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value.
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
# KitchenQual: Only one NA value, and same as Electrical,
# we set 'TA' (which is the most frequent) for the missing value in KitchenQual.
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
# Exterior1st and Exterior2nd :
# Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
# SaleType : Fill in again with most frequent which is "WD"
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
# MSSubClass : Na most likely means No building class. We can replace missing values with None
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
########## Is there any remaining missing value ? ##########
#Check remaining missing values if any
print('\n\n\n\n')
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()
############################################################
############## More features engeneering ############
##### Transforming some numerical variables that are really categorical #####
#MSSubClass=The building class
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
#Changing OverallCond into a categorical variable
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
#Year and month sold are transformed into categorical features.
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str)
# Label Encoding some categorical variables that may contain information in their ordering set
# --------------->>>>>>>> LabelEncoder
# --------------->>>>>>>> WHY ?!?!?!?!
from sklearn.preprocessing import LabelEncoder
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
# process columns, apply LabelEncoder to categorical features
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
# shape
print('Shape all_data: {}'.format(all_data.shape))
######### Adding one more important feature ##########
# Since area related features are very important to determine house prices,
# we add one more feature which is the total area of basement, first and second floor areas of each house
# Adding total sqfootage feature
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']
############## Skewed features ############
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Check the skew of all numerical features
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
print(skewness.head(10))
##### Box Cox Transformation of (highly) skewed features #####
# --------------->>>>>>>> WHAT ?!?!?!?!
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
#all_data[skewed_features] = np.log1p(all_data[skewed_features])
# print('\n\n\n\t\t\tSKEW:')
# sk = pd.DataFrame({'Skew' :all_data[feat]})
# print(all_data.head(10))
# Getting dummy categorical features
all_data = pd.get_dummies(all_data)
print(all_data.shape)
train = all_data[:ntrain]
test = all_data[ntrain:]
############################################################
############## Modeling ############
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
| pass | identifier_body |
Serigne.py | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # Matlab-style plotting
import seaborn as sns
import warnings
from scipy import stats
from scipy.stats import norm, skew #for some statistics
from subprocess import check_output
color = sns.color_palette()
sns.set_style('darkgrid')
############ Build Environment ############
############################################
def ignore_warn(*args, **kwargs):
pass
warnings.warn = ignore_warn # ignore annoying warning (from sklearn and seaborn)
pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) # Limiting floats output to 3 decimal points
# print(check_output(["ls", "../data"]).decode("utf8")) # check the files available in the directory
############################################
#Now let's import and put the train and test datasets in pandas dataframe
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
##display the first five rows of the train dataset.
# print(train.head(5))
##display the first five rows of the test dataset.
# print(test.head(5))
############ Inro ############
############################################
#check the numbers of samples and features
print("The train data size before dropping Id feature is : {} ".format(train.shape))
print("The test data size before dropping Id feature is : {} ".format(test.shape))
#Save the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
print('\n\n')
print("The train ID :\n {} ".format(np.array(train_ID)))
print("The test ID :\n {} ".format(np.array(test_ID)))
#Now drop the 'Id' colum since it's unnecessary for the prediction process.
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
#check again the data size after dropping the 'Id' variable
print("\nThe train data size after dropping Id feature is : {} ".format(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
############################################
############ Data Processing ############
################################################
############## Outliers ############
# fig, ax = plt.subplots()
# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
# fig, ax = plt.subplots() # GRAPH 1
# ax.scatter(train['GrLivArea'], train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
############## Target variable #############
# Uncomment to PLOT
# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
# sns.distplot(train['SalePrice'] , fit=norm) # GRAPH 2
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice']) # STANDART
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure() # GRAPH 3
# res = stats.probplot(train['SalePrice'], plot=plt)
# # The target variable is right skewed. As (linear) models love normally distributed data ,
# # we need to transform this variable and make it more normally distributed.
################################################################################
############## Log-transformation of the target variable ############
# Uncomment to PLOT
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Check the new distribution
# sns.distplot(train['SalePrice'] , fit=norm)
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice'])
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure()
# res = stats.probplot(train['SalePrice'], plot=plt)
# plt.show()
############################################################
############## Features engineering ############
# let's first concatenate the train and test data in the same dataframe
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("\nall_data size is : {}".format(all_data.shape))
##### Missing Data. #####
# Uncomment to PLOT
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
print(missing_data.head(20))
# f, ax = plt.subplots(figsize=(15, 12)) # GRAPH 4
# plt.xticks(rotation='90')
# sns.barplot(x=all_data_na.index, y=all_data_na)
# plt.xlabel('Features', fontsize=15)
# plt.ylabel('Percent of missing values', fontsize=15)
# plt.title('Percent missing data by feature', fontsize=15)
# plt.show()
############################################################
############## Data Correlation ############
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)
plt.show()
############################################################
############## Imputing missing values ############
# --------------->>>>>>>> groupby() and mode()
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood ,
# we can fill in missing values by the median LotFrontage of the neighborhood.
# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath :
# missing values are likely zero for having no basement
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
|
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 :
# For all these categorical basement-related features, NaN means that there is no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
# MasVnrArea and MasVnrType :
# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
##### !!!!! #####
# MSZoning (The general zoning classification) :
# 'RL' is by far the most common value. So we can fill in missing values with 'RL'
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# Utilities : For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA .
# Since the house with 'NoSewa' is in the training set, this feature won't help in predictive modelling.
# We can then safely remove it.
all_data = all_data.drop(['Utilities'], axis=1)
# Functional : data description says NA means typical
all_data["Functional"] = all_data["Functional"].fillna("Typ")
# Electrical : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value.
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
# KitchenQual: Only one NA value, and same as Electrical,
# we set 'TA' (which is the most frequent) for the missing value in KitchenQual.
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
# Exterior1st and Exterior2nd :
# Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
# SaleType : Fill in again with most frequent which is "WD"
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
# MSSubClass : Na most likely means No building class. We can replace missing values with None
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
########## Is there any remaining missing value ? ##########
#Check remaining missing values if any
print('\n\n\n\n')
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()
############################################################
############## More features engeneering ############
##### Transforming some numerical variables that are really categorical #####
#MSSubClass=The building class
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
#Changing OverallCond into a categorical variable
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
#Year and month sold are transformed into categorical features.
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str)
# Label Encoding some categorical variables that may contain information in their ordering set
# --------------->>>>>>>> LabelEncoder
# --------------->>>>>>>> WHY ?!?!?!?!
from sklearn.preprocessing import LabelEncoder
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
# process columns, apply LabelEncoder to categorical features
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
# shape
print('Shape all_data: {}'.format(all_data.shape))
######### Adding one more important feature ##########
# Since area related features are very important to determine house prices,
# we add one more feature which is the total area of basement, first and second floor areas of each house
# Adding total sqfootage feature
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']
############## Skewed features ############
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Check the skew of all numerical features
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
print(skewness.head(10))
##### Box Cox Transformation of (highly) skewed features #####
# --------------->>>>>>>> WHAT ?!?!?!?!
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
#all_data[skewed_features] = np.log1p(all_data[skewed_features])
# print('\n\n\n\t\t\tSKEW:')
# sk = pd.DataFrame({'Skew' :all_data[feat]})
# print(all_data.head(10))
# Getting dummy categorical features
all_data = pd.get_dummies(all_data)
print(all_data.shape)
train = all_data[:ntrain]
test = all_data[ntrain:]
############################################################
############## Modeling ############
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
| all_data[col] = all_data[col].fillna(0) | conditional_block |
Serigne.py | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # Matlab-style plotting
import seaborn as sns
import warnings
from scipy import stats
from scipy.stats import norm, skew #for some statistics
from subprocess import check_output
color = sns.color_palette()
sns.set_style('darkgrid')
############ Build Environment ############
############################################
def | (*args, **kwargs):
pass
warnings.warn = ignore_warn # ignore annoying warning (from sklearn and seaborn)
pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) # Limiting floats output to 3 decimal points
# print(check_output(["ls", "../data"]).decode("utf8")) # check the files available in the directory
############################################
#Now let's import and put the train and test datasets in pandas dataframe
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
##display the first five rows of the train dataset.
# print(train.head(5))
##display the first five rows of the test dataset.
# print(test.head(5))
############ Inro ############
############################################
#check the numbers of samples and features
print("The train data size before dropping Id feature is : {} ".format(train.shape))
print("The test data size before dropping Id feature is : {} ".format(test.shape))
#Save the 'Id' column
train_ID = train['Id']
test_ID = test['Id']
print('\n\n')
print("The train ID :\n {} ".format(np.array(train_ID)))
print("The test ID :\n {} ".format(np.array(test_ID)))
#Now drop the 'Id' colum since it's unnecessary for the prediction process.
train.drop("Id", axis = 1, inplace = True)
test.drop("Id", axis = 1, inplace = True)
#check again the data size after dropping the 'Id' variable
print("\nThe train data size after dropping Id feature is : {} ".format(train.shape))
print("The test data size after dropping Id feature is : {} ".format(test.shape))
############################################
############ Data Processing ############
################################################
############## Outliers ############
# fig, ax = plt.subplots()
# ax.scatter(x = train['GrLivArea'], y = train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
#Deleting outliers
train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index)
#Check the graphic again
# fig, ax = plt.subplots() # GRAPH 1
# ax.scatter(train['GrLivArea'], train['SalePrice'])
# plt.ylabel('SalePrice', fontsize=13)
# plt.xlabel('GrLivArea', fontsize=13)
############## Target variable #############
# Uncomment to PLOT
# # SalePrice is the variable we need to predict. So let's do some analysis on this variable first.
# sns.distplot(train['SalePrice'] , fit=norm) # GRAPH 2
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice']) # STANDART
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure() # GRAPH 3
# res = stats.probplot(train['SalePrice'], plot=plt)
# # The target variable is right skewed. As (linear) models love normally distributed data ,
# # we need to transform this variable and make it more normally distributed.
################################################################################
############## Log-transformation of the target variable ############
# Uncomment to PLOT
# #We use the numpy fuction log1p which applies log(1+x) to all elements of the column
# train["SalePrice"] = np.log1p(train["SalePrice"])
# #Check the new distribution
# sns.distplot(train['SalePrice'] , fit=norm)
# # Get the fitted parameters used by the function
# (mu, sigma) = norm.fit(train['SalePrice'])
# print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
# #Now plot the distribution
# plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
# loc='best')
# plt.ylabel('Frequency')
# plt.title('SalePrice distribution')
# #Get also the QQ-plot
# fig = plt.figure()
# res = stats.probplot(train['SalePrice'], plot=plt)
# plt.show()
############################################################
############## Features engineering ############
# let's first concatenate the train and test data in the same dataframe
ntrain = train.shape[0]
ntest = test.shape[0]
y_train = train.SalePrice.values
all_data = pd.concat((train, test)).reset_index(drop=True)
all_data.drop(['SalePrice'], axis=1, inplace=True)
print("\nall_data size is : {}".format(all_data.shape))
##### Missing Data. #####
# Uncomment to PLOT
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
print(missing_data.head(20))
# f, ax = plt.subplots(figsize=(15, 12)) # GRAPH 4
# plt.xticks(rotation='90')
# sns.barplot(x=all_data_na.index, y=all_data_na)
# plt.xlabel('Features', fontsize=15)
# plt.ylabel('Percent of missing values', fontsize=15)
# plt.title('Percent missing data by feature', fontsize=15)
# plt.show()
############################################################
############## Data Correlation ############
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True, linewidth = 1)
plt.show()
############################################################
############## Imputing missing values ############
# --------------->>>>>>>> groupby() and mode()
all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
# LotFrontage : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood ,
# we can fill in missing values by the median LotFrontage of the neighborhood.
# Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))
# GarageType, GarageFinish, GarageQual and GarageCond : Replacing missing data with None
for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
# GarageYrBlt, GarageArea and GarageCars : Replacing missing data with 0 (Since No garage = no cars in such garage.)
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
# BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath :
# missing values are likely zero for having no basement
for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2 :
# For all these categorical basement-related features, NaN means that there is no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
# MasVnrArea and MasVnrType :
# NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type.
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0)
##### !!!!! #####
# MSZoning (The general zoning classification) :
# 'RL' is by far the most common value. So we can fill in missing values with 'RL'
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0])
# Utilities : For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA .
# Since the house with 'NoSewa' is in the training set, this feature won't help in predictive modelling.
# We can then safely remove it.
all_data = all_data.drop(['Utilities'], axis=1)
# Functional : data description says NA means typical
all_data["Functional"] = all_data["Functional"].fillna("Typ")
# Electrical : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value.
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0])
# KitchenQual: Only one NA value, and same as Electrical,
# we set 'TA' (which is the most frequent) for the missing value in KitchenQual.
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0])
# Exterior1st and Exterior2nd :
# Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0])
# SaleType : Fill in again with most frequent which is "WD"
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0])
# MSSubClass : Na most likely means No building class. We can replace missing values with None
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None")
########## Is there any remaining missing value ? ##########
#Check remaining missing values if any
print('\n\n\n\n')
all_data_na = (all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()
############################################################
############## More features engeneering ############
##### Transforming some numerical variables that are really categorical #####
#MSSubClass=The building class
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
#Changing OverallCond into a categorical variable
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
#Year and month sold are transformed into categorical features.
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str)
# Label Encoding some categorical variables that may contain information in their ordering set
# --------------->>>>>>>> LabelEncoder
# --------------->>>>>>>> WHY ?!?!?!?!
from sklearn.preprocessing import LabelEncoder
cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
# process columns, apply LabelEncoder to categorical features
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
# shape
print('Shape all_data: {}'.format(all_data.shape))
######### Adding one more important feature ##########
# Since area related features are very important to determine house prices,
# we add one more feature which is the total area of basement, first and second floor areas of each house
# Adding total sqfootage feature
all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']
############## Skewed features ############
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Check the skew of all numerical features
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
print(skewness.head(10))
##### Box Cox Transformation of (highly) skewed features #####
# --------------->>>>>>>> WHAT ?!?!?!?!
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
#all_data[skewed_features] = np.log1p(all_data[skewed_features])
# print('\n\n\n\t\t\tSKEW:')
# sk = pd.DataFrame({'Skew' :all_data[feat]})
# print(all_data.head(10))
# Getting dummy categorical features
all_data = pd.get_dummies(all_data)
print(all_data.shape)
train = all_data[:ntrain]
test = all_data[ntrain:]
############################################################
############## Modeling ############
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
| ignore_warn | identifier_name |
main.py | # coding=utf-8
# Martin Svonava
# program sluzi na parsovanie HTML rozvrhov na UMBcke
# a tvorbu XMLka ktore sa da dalej spracovavat
# >>>> Struktura dat <<<<
# [0] Nazov hodiny
# [1] Vyucujuci
# [2] Ucebna
# [3] Den
# [4] Zaciatok
# [5] Trvanie
# [6] Nazov Triedy pre ktoru rozvrh plati
from operator import attrgetter
import ODTCreator
import SchoolClass
import itertools
import subprocess
import os
import requests
from bs4 import BeautifulSoup
from lxml import etree
# URL Kde sa nachadzaju rozvrhy
url = "http://www.pdf.umb.sk/~jsedliak/Public/"
# "konstanty" ktore rozhoduju o tom ake XMLka sa generuju
XML_ONLY = 0
ODT_ONLY = 1
BOTH_XML_ODT = 2
def add_days_of_week_xml(root):
pon = etree.Element('pondelok')
uto = etree.Element('utorok')
ste = etree.Element('streda')
stv = etree.Element('stvrtok')
pia = etree.Element('piatok')
root.append(pon)
root.append(uto)
root.append(ste)
root.append(stv)
root.append(pia)
def add_class_to_xml(root, clazz):
if clazz[6] is not None:
print "Spracuvam " + clazz[6]
day = None
# Keby chceme zmenit clazz[3] nepojde to lebo tuple sa neda zmenit
den_string = clazz[3]
if not clazz[3] is None:
if den_string == u'štvrtok':
den_string = 'stvrtok'
day = root.find(den_string)
else:
if clazz[0] is not None:
print "Nepodarilo sa ziskat den pre " + clazz[0] + ". Rozvrh moze byt nekompletny alebo hodiny posunute"
elif clazz[0] is None and clazz[6] is not None:
print "***** Prazdny rozvrh *****" + clazz[6]
else:
print "***** Rozvrhu chyba nadpis *****"
if day is not None:
if not clazz[0] is None:
hodina = etree.Element('hodina')
hodina.text = clazz[0]
day.append(hodina)
if not clazz[1] is None:
vyucujuci = etree.Element('vyucujuci')
vyucujuci.text = clazz[1]
day.append(vyucujuci)
if not clazz[2] is None:
ucebna = etree.Element('ucebna')
ucebna.text = clazz[2]
day.append(ucebna)
if not clazz[4] is None:
zaciatok = etree.Element('zaciatok')
zaciatok.text = clazz[4]
day.append(zaciatok)
if not clazz[5] is None:
trvanie = etree.Element('trvanie')
trvanie.text = clazz[5]
day.append(trvanie)
def get_class_length(title):
return title['colspan']
def get_class_start(title):
# title obsahuje nieco ako "* streda : 14-15 *" - potrebujem dostat len prve cislo
# pretoze aka dlha hodina bude viem z ineho atributu (pozri funkciu get_lessons_of_class, colspan)
# toto mi vrati pole obsahujuce ['streda', '14-15'], vezmem prvy index cize cisla 14-15
zac_hod = title['title'].partition('*')[-1].rpartition('*')[0].replace(" ", "").split(':')[1]
# 1 alebo 15 to je v poriadku
# 1-2 a 9-10 -> v oboch pripadoch chcem len prve cislo (dlzka 3 a 4)
# 12-13 -> tu chcem prve dve cisla (dlzka 5)
if len(zac_hod) == 3 or len(zac_hod) == 4:
return zac_hod[0:1]
if len(zac_hod) == 5:
return zac_hod[0:2]
return zac_hod
def get_lessons_of_class(url_class):
global hlavicka_dni
print "Ziskavam rozvrh z URL " + url_class
src = requests.get(url_class)
txt = src.text
bsoup = BeautifulSoup(txt, "html.parser")
predmety = []
ucitelia = []
ucebne = []
dni = []
zacina_hod = []
poc_hod = []
# neni to bohvieako pekne ale budiz, v nultom indexe je nazov skoly, v prvom Triedy
nadpis = [bsoup.find_all(("div", {'class': 'Nadpis'}))[1].text]
for predmet in bsoup.find_all("font", {'class': 'Predmet'}):
if predmet.text == '':
predmety.append('chyba nazov predmetu')
else:
predmety.append(predmet.text)
for ucitel in bsoup.find_all("font", {'class': 'Vyucujuci'}):
if ucitel.text == '':
ucitelia.append('chyba ucitel')
else:
ucitelia.append(ucitel.text)
for ucebna in bsoup.find_all("font", {'class': 'Ucebna'}):
if ucebna.text == '':
ucebne.append('chyba ucebna')
else:
ucebne.append(ucebna.text)
ciste_trka = bsoup.find_all("tr", {'class': False})
ciste_trka = ciste_trka[1:-1] # Vyhodime to trko ktore to obsahuje vsetko, to nepotrebujem
for trko in ciste_trka:
if trko != '\n' and trko.find("td", {'class': 'HlavickaDni'}) is not None:
hlavicka_dni = trko.find("td", {'class': 'HlavickaDni'})['title']
# vsetky hodiny v ramci toho dna
hodiny = trko.find("td", {'class': 'HlavickaDni'}).parent.find_all("td")
# podla bgcolor viem ci je hodina alebo volnahodina
for hodinaInfo in hodiny:
if hodinaInfo.has_attr('bgcolor') or hodinaInfo.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaInfo))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaInfo))
# Ked je dve a viac predmetov v tom istom case a tom istom dni, tak ta druha alebo tretia
# je mimo hlavneho <tr> tagu v ktorom sa nachadza aj nazov dna, a preto treba hladat
# dalsie hodiny mimo tr tagu.
elif trko != '\n' and trko.find_all("td") is not None:
for hodinaMimoTr in trko.find_all("td"):
if hodinaMimoTr.has_attr('bgcolor') or hodinaMimoTr.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaMimoTr))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaMimoTr))
# Ak aj ucebna alebo meno vyucujuceho chyba, dlzka vsetkych zoznamov bude tak ci tak rovnaka
# avsak v pripade "Nadpis", ten bude vzdy len jeden, preto musime pouzit "izip_longest"
# ktory zo zoznamu kratsej dlzky, spravi dlhsi a doplni tam "None". keby to nespravim, kazdy
# zoznam skrati na jednu polozku a to by nam chybali hodiny...
# No a navyse je potrebne odstranit predmet ktory ma v nazve tri hviezdy pretoze ten ma v sebe
# data o dalsich hodinach a tie sa nedaju normalne sparsovat, teda ich odstranim. Keby to neodstranim
# hodiny sa mozu posunut a byt v nespravnych dnoch a mat zle ucebne
if u'***' in predmety:
predmety.remove(u'***')
print "Predmet obsahoval dalsie hodiny ktore sa neda spracovat kvoli nespravnym datam"
moj_list = list(itertools.izip_longest(predmety, ucitelia, ucebne, dni, zacina_hod, poc_hod, nadpis))
return moj_list
# Zobere vsetky linky a pozrie sa ci link obsahuje "rozvrh_tr", aby sa spracovali len rozvrhy tried
def remove_non_class_timetables(soup_links):
modified_urls = []
for link in soup_links.find_all("a"):
to_append = link.text
if "rozvrh_tr" in to_append:
final_url = url + to_append
modified_urls.append(final_url)
return modified_urls
def get_urls_to_process():
source = requests.get(url)
text = source.text
soup = BeautifulSoup(text, "html.parser")
urls = remove_non_class_timetables(soup)
# vyhodi sa prvy link ktory neobsahuje konkretnu triedu, ale sablonu pre vsetky triedy
urls = urls[1:-1]
return urls
def make_folder():
# spravime si zlozku na rozvrhy
try:
os.makedirs('rozvrhy')
except OSError:
pass # ak uz zlozka existuje da error, ten ignorujeme, chceme zapisat do zlozky
def g | generate_xml):
make_folder()
global pondelok, utorok, streda, stvrtok, piatok, rozvrh, trieda_nazov
if generate_xml == XML_ONLY or generate_xml == BOTH_XML_ODT:
# cele_rozvrhy_tried = [get_lessons_of_class("http://www.pdf.umb.sk/~jsedliak/Public/rozvrh_tr2815.htm")]
cele_rozvrhy_tried = []
for url_to_process in get_urls_to_process():
cele_rozvrhy_tried.append(get_lessons_of_class(url_to_process))
for rozvrh_jednej_triedy in cele_rozvrhy_tried:
trieda_nazov = rozvrh_jednej_triedy[0][6] # 6ty index obsahuje meno triedy ktorej patri rozvrh
rozvrh = etree.Element("rozvrh")
rozvrh.attrib['Trieda'] = trieda_nazov
add_days_of_week_xml(rozvrh)
vyuc_hodiny = []
for jedna_hodina_rozvrhu in rozvrh_jednej_triedy:
add_class_to_xml(rozvrh, jedna_hodina_rozvrhu)
vyuc_hodiny.append(SchoolClass.make_class(jedna_hodina_rozvrhu[0],
jedna_hodina_rozvrhu[1],
jedna_hodina_rozvrhu[2],
jedna_hodina_rozvrhu[3],
jedna_hodina_rozvrhu[4],
jedna_hodina_rozvrhu[5]))
pondelok = []
utorok = []
streda = []
stvrtok = []
piatok = []
for objekt_hodina in vyuc_hodiny:
if objekt_hodina.den == 'pondelok':
pondelok.append(objekt_hodina)
if objekt_hodina.den == 'utorok':
utorok.append(objekt_hodina)
if objekt_hodina.den == 'streda':
streda.append(objekt_hodina)
if objekt_hodina.den == u'štvrtok':
stvrtok.append(objekt_hodina)
if objekt_hodina.den == 'piatok':
piatok.append(objekt_hodina)
pondelok.sort(key=attrgetter('zaciatok'))
utorok.sort(key=attrgetter('zaciatok'))
streda.sort(key=attrgetter('zaciatok'))
stvrtok.sort(key=attrgetter('zaciatok'))
piatok.sort(key=attrgetter('zaciatok'))
f = open('rozvrhy/' + trieda_nazov + '.xml', 'w')
f.write(etree.tostring(rozvrh, pretty_print=True))
f.close()
if generate_xml == ODT_ONLY or generate_xml == BOTH_XML_ODT:
zoznam_dni = [pondelok, utorok, streda, stvrtok, piatok]
for den in zoznam_dni:
for objekt_hodina in den:
ODTCreator.add_values(objekt_hodina.hodina,
objekt_hodina.vyuc,
objekt_hodina.ucebna,
objekt_hodina.den,
objekt_hodina.zaciatok,
objekt_hodina.trvanie)
ODTCreator.align_cells('pondelok')
ODTCreator.align_cells('utorok')
ODTCreator.align_cells('streda')
ODTCreator.align_cells('stvrtok')
ODTCreator.align_cells('piatok')
# Won't work in Windows
# subprocess.call(['./packNrun.sh'])
generate_xmls(XML_ONLY)
| enerate_xmls( | identifier_name |
main.py | # coding=utf-8
# Martin Svonava
# program sluzi na parsovanie HTML rozvrhov na UMBcke
# a tvorbu XMLka ktore sa da dalej spracovavat
# >>>> Struktura dat <<<<
# [0] Nazov hodiny
# [1] Vyucujuci
# [2] Ucebna
# [3] Den
# [4] Zaciatok
# [5] Trvanie
# [6] Nazov Triedy pre ktoru rozvrh plati
from operator import attrgetter
import ODTCreator
import SchoolClass
import itertools
import subprocess
import os
import requests
from bs4 import BeautifulSoup
from lxml import etree
# URL Kde sa nachadzaju rozvrhy
url = "http://www.pdf.umb.sk/~jsedliak/Public/"
# "konstanty" ktore rozhoduju o tom ake XMLka sa generuju
XML_ONLY = 0
ODT_ONLY = 1
BOTH_XML_ODT = 2
def add_days_of_week_xml(root):
pon = etree.Element('pondelok')
uto = etree.Element('utorok')
ste = etree.Element('streda')
stv = etree.Element('stvrtok')
pia = etree.Element('piatok')
root.append(pon)
root.append(uto)
root.append(ste)
root.append(stv)
root.append(pia)
def add_class_to_xml(root, clazz):
if clazz[6] is not None:
print "Spracuvam " + clazz[6]
day = None
# Keby chceme zmenit clazz[3] nepojde to lebo tuple sa neda zmenit
den_string = clazz[3]
if not clazz[3] is None:
if den_string == u'štvrtok':
den_string = 'stvrtok'
day = root.find(den_string)
else:
if clazz[0] is not None:
print "Nepodarilo sa ziskat den pre " + clazz[0] + ". Rozvrh moze byt nekompletny alebo hodiny posunute"
elif clazz[0] is None and clazz[6] is not None:
print "***** Prazdny rozvrh *****" + clazz[6]
else:
print "***** Rozvrhu chyba nadpis *****"
if day is not None:
if not clazz[0] is None:
hodina = etree.Element('hodina')
hodina.text = clazz[0]
day.append(hodina)
if not clazz[1] is None:
vyucujuci = etree.Element('vyucujuci')
vyucujuci.text = clazz[1]
day.append(vyucujuci)
if not clazz[2] is None:
ucebna = etree.Element('ucebna')
ucebna.text = clazz[2]
day.append(ucebna)
if not clazz[4] is None:
zaciatok = etree.Element('zaciatok')
zaciatok.text = clazz[4]
day.append(zaciatok)
if not clazz[5] is None:
trvanie = etree.Element('trvanie')
trvanie.text = clazz[5]
day.append(trvanie)
def get_class_length(title):
r |
def get_class_start(title):
# title obsahuje nieco ako "* streda : 14-15 *" - potrebujem dostat len prve cislo
# pretoze aka dlha hodina bude viem z ineho atributu (pozri funkciu get_lessons_of_class, colspan)
# toto mi vrati pole obsahujuce ['streda', '14-15'], vezmem prvy index cize cisla 14-15
zac_hod = title['title'].partition('*')[-1].rpartition('*')[0].replace(" ", "").split(':')[1]
# 1 alebo 15 to je v poriadku
# 1-2 a 9-10 -> v oboch pripadoch chcem len prve cislo (dlzka 3 a 4)
# 12-13 -> tu chcem prve dve cisla (dlzka 5)
if len(zac_hod) == 3 or len(zac_hod) == 4:
return zac_hod[0:1]
if len(zac_hod) == 5:
return zac_hod[0:2]
return zac_hod
def get_lessons_of_class(url_class):
global hlavicka_dni
print "Ziskavam rozvrh z URL " + url_class
src = requests.get(url_class)
txt = src.text
bsoup = BeautifulSoup(txt, "html.parser")
predmety = []
ucitelia = []
ucebne = []
dni = []
zacina_hod = []
poc_hod = []
# neni to bohvieako pekne ale budiz, v nultom indexe je nazov skoly, v prvom Triedy
nadpis = [bsoup.find_all(("div", {'class': 'Nadpis'}))[1].text]
for predmet in bsoup.find_all("font", {'class': 'Predmet'}):
if predmet.text == '':
predmety.append('chyba nazov predmetu')
else:
predmety.append(predmet.text)
for ucitel in bsoup.find_all("font", {'class': 'Vyucujuci'}):
if ucitel.text == '':
ucitelia.append('chyba ucitel')
else:
ucitelia.append(ucitel.text)
for ucebna in bsoup.find_all("font", {'class': 'Ucebna'}):
if ucebna.text == '':
ucebne.append('chyba ucebna')
else:
ucebne.append(ucebna.text)
ciste_trka = bsoup.find_all("tr", {'class': False})
ciste_trka = ciste_trka[1:-1] # Vyhodime to trko ktore to obsahuje vsetko, to nepotrebujem
for trko in ciste_trka:
if trko != '\n' and trko.find("td", {'class': 'HlavickaDni'}) is not None:
hlavicka_dni = trko.find("td", {'class': 'HlavickaDni'})['title']
# vsetky hodiny v ramci toho dna
hodiny = trko.find("td", {'class': 'HlavickaDni'}).parent.find_all("td")
# podla bgcolor viem ci je hodina alebo volnahodina
for hodinaInfo in hodiny:
if hodinaInfo.has_attr('bgcolor') or hodinaInfo.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaInfo))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaInfo))
# Ked je dve a viac predmetov v tom istom case a tom istom dni, tak ta druha alebo tretia
# je mimo hlavneho <tr> tagu v ktorom sa nachadza aj nazov dna, a preto treba hladat
# dalsie hodiny mimo tr tagu.
elif trko != '\n' and trko.find_all("td") is not None:
for hodinaMimoTr in trko.find_all("td"):
if hodinaMimoTr.has_attr('bgcolor') or hodinaMimoTr.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaMimoTr))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaMimoTr))
# Ak aj ucebna alebo meno vyucujuceho chyba, dlzka vsetkych zoznamov bude tak ci tak rovnaka
# avsak v pripade "Nadpis", ten bude vzdy len jeden, preto musime pouzit "izip_longest"
# ktory zo zoznamu kratsej dlzky, spravi dlhsi a doplni tam "None". keby to nespravim, kazdy
# zoznam skrati na jednu polozku a to by nam chybali hodiny...
# No a navyse je potrebne odstranit predmet ktory ma v nazve tri hviezdy pretoze ten ma v sebe
# data o dalsich hodinach a tie sa nedaju normalne sparsovat, teda ich odstranim. Keby to neodstranim
# hodiny sa mozu posunut a byt v nespravnych dnoch a mat zle ucebne
if u'***' in predmety:
predmety.remove(u'***')
print "Predmet obsahoval dalsie hodiny ktore sa neda spracovat kvoli nespravnym datam"
moj_list = list(itertools.izip_longest(predmety, ucitelia, ucebne, dni, zacina_hod, poc_hod, nadpis))
return moj_list
# Zobere vsetky linky a pozrie sa ci link obsahuje "rozvrh_tr", aby sa spracovali len rozvrhy tried
def remove_non_class_timetables(soup_links):
modified_urls = []
for link in soup_links.find_all("a"):
to_append = link.text
if "rozvrh_tr" in to_append:
final_url = url + to_append
modified_urls.append(final_url)
return modified_urls
def get_urls_to_process():
source = requests.get(url)
text = source.text
soup = BeautifulSoup(text, "html.parser")
urls = remove_non_class_timetables(soup)
# vyhodi sa prvy link ktory neobsahuje konkretnu triedu, ale sablonu pre vsetky triedy
urls = urls[1:-1]
return urls
def make_folder():
# spravime si zlozku na rozvrhy
try:
os.makedirs('rozvrhy')
except OSError:
pass # ak uz zlozka existuje da error, ten ignorujeme, chceme zapisat do zlozky
def generate_xmls(generate_xml):
make_folder()
global pondelok, utorok, streda, stvrtok, piatok, rozvrh, trieda_nazov
if generate_xml == XML_ONLY or generate_xml == BOTH_XML_ODT:
# cele_rozvrhy_tried = [get_lessons_of_class("http://www.pdf.umb.sk/~jsedliak/Public/rozvrh_tr2815.htm")]
cele_rozvrhy_tried = []
for url_to_process in get_urls_to_process():
cele_rozvrhy_tried.append(get_lessons_of_class(url_to_process))
for rozvrh_jednej_triedy in cele_rozvrhy_tried:
trieda_nazov = rozvrh_jednej_triedy[0][6] # 6ty index obsahuje meno triedy ktorej patri rozvrh
rozvrh = etree.Element("rozvrh")
rozvrh.attrib['Trieda'] = trieda_nazov
add_days_of_week_xml(rozvrh)
vyuc_hodiny = []
for jedna_hodina_rozvrhu in rozvrh_jednej_triedy:
add_class_to_xml(rozvrh, jedna_hodina_rozvrhu)
vyuc_hodiny.append(SchoolClass.make_class(jedna_hodina_rozvrhu[0],
jedna_hodina_rozvrhu[1],
jedna_hodina_rozvrhu[2],
jedna_hodina_rozvrhu[3],
jedna_hodina_rozvrhu[4],
jedna_hodina_rozvrhu[5]))
pondelok = []
utorok = []
streda = []
stvrtok = []
piatok = []
for objekt_hodina in vyuc_hodiny:
if objekt_hodina.den == 'pondelok':
pondelok.append(objekt_hodina)
if objekt_hodina.den == 'utorok':
utorok.append(objekt_hodina)
if objekt_hodina.den == 'streda':
streda.append(objekt_hodina)
if objekt_hodina.den == u'štvrtok':
stvrtok.append(objekt_hodina)
if objekt_hodina.den == 'piatok':
piatok.append(objekt_hodina)
pondelok.sort(key=attrgetter('zaciatok'))
utorok.sort(key=attrgetter('zaciatok'))
streda.sort(key=attrgetter('zaciatok'))
stvrtok.sort(key=attrgetter('zaciatok'))
piatok.sort(key=attrgetter('zaciatok'))
f = open('rozvrhy/' + trieda_nazov + '.xml', 'w')
f.write(etree.tostring(rozvrh, pretty_print=True))
f.close()
if generate_xml == ODT_ONLY or generate_xml == BOTH_XML_ODT:
zoznam_dni = [pondelok, utorok, streda, stvrtok, piatok]
for den in zoznam_dni:
for objekt_hodina in den:
ODTCreator.add_values(objekt_hodina.hodina,
objekt_hodina.vyuc,
objekt_hodina.ucebna,
objekt_hodina.den,
objekt_hodina.zaciatok,
objekt_hodina.trvanie)
ODTCreator.align_cells('pondelok')
ODTCreator.align_cells('utorok')
ODTCreator.align_cells('streda')
ODTCreator.align_cells('stvrtok')
ODTCreator.align_cells('piatok')
# Won't work in Windows
# subprocess.call(['./packNrun.sh'])
generate_xmls(XML_ONLY)
| eturn title['colspan']
| identifier_body |
main.py | # coding=utf-8
# Martin Svonava
# program sluzi na parsovanie HTML rozvrhov na UMBcke
# a tvorbu XMLka ktore sa da dalej spracovavat
# >>>> Struktura dat <<<<
# [0] Nazov hodiny
# [1] Vyucujuci
# [2] Ucebna
# [3] Den
# [4] Zaciatok
# [5] Trvanie
# [6] Nazov Triedy pre ktoru rozvrh plati
from operator import attrgetter
import ODTCreator
import SchoolClass
import itertools
import subprocess
import os
import requests
from bs4 import BeautifulSoup
from lxml import etree
# URL Kde sa nachadzaju rozvrhy
url = "http://www.pdf.umb.sk/~jsedliak/Public/"
# "konstanty" ktore rozhoduju o tom ake XMLka sa generuju
XML_ONLY = 0
ODT_ONLY = 1
BOTH_XML_ODT = 2
def add_days_of_week_xml(root):
pon = etree.Element('pondelok')
uto = etree.Element('utorok')
ste = etree.Element('streda')
stv = etree.Element('stvrtok')
pia = etree.Element('piatok')
root.append(pon)
root.append(uto)
root.append(ste)
root.append(stv)
root.append(pia)
def add_class_to_xml(root, clazz):
if clazz[6] is not None:
print "Spracuvam " + clazz[6]
day = None
# Keby chceme zmenit clazz[3] nepojde to lebo tuple sa neda zmenit
den_string = clazz[3]
if not clazz[3] is None:
if den_string == u'štvrtok':
den_string = 'stvrtok'
day = root.find(den_string)
else:
if clazz[0] is not None:
print "Nepodarilo sa ziskat den pre " + clazz[0] + ". Rozvrh moze byt nekompletny alebo hodiny posunute"
elif clazz[0] is None and clazz[6] is not None:
print "***** Prazdny rozvrh *****" + clazz[6]
else:
print "***** Rozvrhu chyba nadpis *****"
if day is not None:
if not clazz[0] is None:
hodina = etree.Element('hodina')
hodina.text = clazz[0]
day.append(hodina)
if not clazz[1] is None:
vyucujuci = etree.Element('vyucujuci')
vyucujuci.text = clazz[1]
day.append(vyucujuci)
if not clazz[2] is None:
ucebna = etree.Element('ucebna')
ucebna.text = clazz[2]
day.append(ucebna)
if not clazz[4] is None:
zaciatok = etree.Element('zaciatok')
zaciatok.text = clazz[4]
day.append(zaciatok)
if not clazz[5] is None:
trvanie = etree.Element('trvanie')
trvanie.text = clazz[5]
day.append(trvanie)
def get_class_length(title):
return title['colspan']
def get_class_start(title): | zac_hod = title['title'].partition('*')[-1].rpartition('*')[0].replace(" ", "").split(':')[1]
# 1 alebo 15 to je v poriadku
# 1-2 a 9-10 -> v oboch pripadoch chcem len prve cislo (dlzka 3 a 4)
# 12-13 -> tu chcem prve dve cisla (dlzka 5)
if len(zac_hod) == 3 or len(zac_hod) == 4:
return zac_hod[0:1]
if len(zac_hod) == 5:
return zac_hod[0:2]
return zac_hod
def get_lessons_of_class(url_class):
global hlavicka_dni
print "Ziskavam rozvrh z URL " + url_class
src = requests.get(url_class)
txt = src.text
bsoup = BeautifulSoup(txt, "html.parser")
predmety = []
ucitelia = []
ucebne = []
dni = []
zacina_hod = []
poc_hod = []
# neni to bohvieako pekne ale budiz, v nultom indexe je nazov skoly, v prvom Triedy
nadpis = [bsoup.find_all(("div", {'class': 'Nadpis'}))[1].text]
for predmet in bsoup.find_all("font", {'class': 'Predmet'}):
if predmet.text == '':
predmety.append('chyba nazov predmetu')
else:
predmety.append(predmet.text)
for ucitel in bsoup.find_all("font", {'class': 'Vyucujuci'}):
if ucitel.text == '':
ucitelia.append('chyba ucitel')
else:
ucitelia.append(ucitel.text)
for ucebna in bsoup.find_all("font", {'class': 'Ucebna'}):
if ucebna.text == '':
ucebne.append('chyba ucebna')
else:
ucebne.append(ucebna.text)
ciste_trka = bsoup.find_all("tr", {'class': False})
ciste_trka = ciste_trka[1:-1] # Vyhodime to trko ktore to obsahuje vsetko, to nepotrebujem
for trko in ciste_trka:
if trko != '\n' and trko.find("td", {'class': 'HlavickaDni'}) is not None:
hlavicka_dni = trko.find("td", {'class': 'HlavickaDni'})['title']
# vsetky hodiny v ramci toho dna
hodiny = trko.find("td", {'class': 'HlavickaDni'}).parent.find_all("td")
# podla bgcolor viem ci je hodina alebo volnahodina
for hodinaInfo in hodiny:
if hodinaInfo.has_attr('bgcolor') or hodinaInfo.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaInfo))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaInfo))
# Ked je dve a viac predmetov v tom istom case a tom istom dni, tak ta druha alebo tretia
# je mimo hlavneho <tr> tagu v ktorom sa nachadza aj nazov dna, a preto treba hladat
# dalsie hodiny mimo tr tagu.
elif trko != '\n' and trko.find_all("td") is not None:
for hodinaMimoTr in trko.find_all("td"):
if hodinaMimoTr.has_attr('bgcolor') or hodinaMimoTr.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaMimoTr))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaMimoTr))
# Ak aj ucebna alebo meno vyucujuceho chyba, dlzka vsetkych zoznamov bude tak ci tak rovnaka
# avsak v pripade "Nadpis", ten bude vzdy len jeden, preto musime pouzit "izip_longest"
# ktory zo zoznamu kratsej dlzky, spravi dlhsi a doplni tam "None". keby to nespravim, kazdy
# zoznam skrati na jednu polozku a to by nam chybali hodiny...
# No a navyse je potrebne odstranit predmet ktory ma v nazve tri hviezdy pretoze ten ma v sebe
# data o dalsich hodinach a tie sa nedaju normalne sparsovat, teda ich odstranim. Keby to neodstranim
# hodiny sa mozu posunut a byt v nespravnych dnoch a mat zle ucebne
if u'***' in predmety:
predmety.remove(u'***')
print "Predmet obsahoval dalsie hodiny ktore sa neda spracovat kvoli nespravnym datam"
moj_list = list(itertools.izip_longest(predmety, ucitelia, ucebne, dni, zacina_hod, poc_hod, nadpis))
return moj_list
# Zobere vsetky linky a pozrie sa ci link obsahuje "rozvrh_tr", aby sa spracovali len rozvrhy tried
def remove_non_class_timetables(soup_links):
modified_urls = []
for link in soup_links.find_all("a"):
to_append = link.text
if "rozvrh_tr" in to_append:
final_url = url + to_append
modified_urls.append(final_url)
return modified_urls
def get_urls_to_process():
source = requests.get(url)
text = source.text
soup = BeautifulSoup(text, "html.parser")
urls = remove_non_class_timetables(soup)
# vyhodi sa prvy link ktory neobsahuje konkretnu triedu, ale sablonu pre vsetky triedy
urls = urls[1:-1]
return urls
def make_folder():
# spravime si zlozku na rozvrhy
try:
os.makedirs('rozvrhy')
except OSError:
pass # ak uz zlozka existuje da error, ten ignorujeme, chceme zapisat do zlozky
def generate_xmls(generate_xml):
make_folder()
global pondelok, utorok, streda, stvrtok, piatok, rozvrh, trieda_nazov
if generate_xml == XML_ONLY or generate_xml == BOTH_XML_ODT:
# cele_rozvrhy_tried = [get_lessons_of_class("http://www.pdf.umb.sk/~jsedliak/Public/rozvrh_tr2815.htm")]
cele_rozvrhy_tried = []
for url_to_process in get_urls_to_process():
cele_rozvrhy_tried.append(get_lessons_of_class(url_to_process))
for rozvrh_jednej_triedy in cele_rozvrhy_tried:
trieda_nazov = rozvrh_jednej_triedy[0][6] # 6ty index obsahuje meno triedy ktorej patri rozvrh
rozvrh = etree.Element("rozvrh")
rozvrh.attrib['Trieda'] = trieda_nazov
add_days_of_week_xml(rozvrh)
vyuc_hodiny = []
for jedna_hodina_rozvrhu in rozvrh_jednej_triedy:
add_class_to_xml(rozvrh, jedna_hodina_rozvrhu)
vyuc_hodiny.append(SchoolClass.make_class(jedna_hodina_rozvrhu[0],
jedna_hodina_rozvrhu[1],
jedna_hodina_rozvrhu[2],
jedna_hodina_rozvrhu[3],
jedna_hodina_rozvrhu[4],
jedna_hodina_rozvrhu[5]))
pondelok = []
utorok = []
streda = []
stvrtok = []
piatok = []
for objekt_hodina in vyuc_hodiny:
if objekt_hodina.den == 'pondelok':
pondelok.append(objekt_hodina)
if objekt_hodina.den == 'utorok':
utorok.append(objekt_hodina)
if objekt_hodina.den == 'streda':
streda.append(objekt_hodina)
if objekt_hodina.den == u'štvrtok':
stvrtok.append(objekt_hodina)
if objekt_hodina.den == 'piatok':
piatok.append(objekt_hodina)
pondelok.sort(key=attrgetter('zaciatok'))
utorok.sort(key=attrgetter('zaciatok'))
streda.sort(key=attrgetter('zaciatok'))
stvrtok.sort(key=attrgetter('zaciatok'))
piatok.sort(key=attrgetter('zaciatok'))
f = open('rozvrhy/' + trieda_nazov + '.xml', 'w')
f.write(etree.tostring(rozvrh, pretty_print=True))
f.close()
if generate_xml == ODT_ONLY or generate_xml == BOTH_XML_ODT:
zoznam_dni = [pondelok, utorok, streda, stvrtok, piatok]
for den in zoznam_dni:
for objekt_hodina in den:
ODTCreator.add_values(objekt_hodina.hodina,
objekt_hodina.vyuc,
objekt_hodina.ucebna,
objekt_hodina.den,
objekt_hodina.zaciatok,
objekt_hodina.trvanie)
ODTCreator.align_cells('pondelok')
ODTCreator.align_cells('utorok')
ODTCreator.align_cells('streda')
ODTCreator.align_cells('stvrtok')
ODTCreator.align_cells('piatok')
# Won't work in Windows
# subprocess.call(['./packNrun.sh'])
generate_xmls(XML_ONLY) | # title obsahuje nieco ako "* streda : 14-15 *" - potrebujem dostat len prve cislo
# pretoze aka dlha hodina bude viem z ineho atributu (pozri funkciu get_lessons_of_class, colspan)
# toto mi vrati pole obsahujuce ['streda', '14-15'], vezmem prvy index cize cisla 14-15 | random_line_split |
main.py | # coding=utf-8
# Martin Svonava
# program sluzi na parsovanie HTML rozvrhov na UMBcke
# a tvorbu XMLka ktore sa da dalej spracovavat
# >>>> Struktura dat <<<<
# [0] Nazov hodiny
# [1] Vyucujuci
# [2] Ucebna
# [3] Den
# [4] Zaciatok
# [5] Trvanie
# [6] Nazov Triedy pre ktoru rozvrh plati
from operator import attrgetter
import ODTCreator
import SchoolClass
import itertools
import subprocess
import os
import requests
from bs4 import BeautifulSoup
from lxml import etree
# URL Kde sa nachadzaju rozvrhy
url = "http://www.pdf.umb.sk/~jsedliak/Public/"
# "konstanty" ktore rozhoduju o tom ake XMLka sa generuju
XML_ONLY = 0
ODT_ONLY = 1
BOTH_XML_ODT = 2
def add_days_of_week_xml(root):
pon = etree.Element('pondelok')
uto = etree.Element('utorok')
ste = etree.Element('streda')
stv = etree.Element('stvrtok')
pia = etree.Element('piatok')
root.append(pon)
root.append(uto)
root.append(ste)
root.append(stv)
root.append(pia)
def add_class_to_xml(root, clazz):
if clazz[6] is not None:
print "Spracuvam " + clazz[6]
day = None
# Keby chceme zmenit clazz[3] nepojde to lebo tuple sa neda zmenit
den_string = clazz[3]
if not clazz[3] is None:
if den_string == u'štvrtok':
den_string = 'stvrtok'
day = root.find(den_string)
else:
if clazz[0] is not None:
print "Nepodarilo sa ziskat den pre " + clazz[0] + ". Rozvrh moze byt nekompletny alebo hodiny posunute"
elif clazz[0] is None and clazz[6] is not None:
print "***** Prazdny rozvrh *****" + clazz[6]
else:
print "***** Rozvrhu chyba nadpis *****"
if day is not None:
if not clazz[0] is None:
hodina = etree.Element('hodina')
hodina.text = clazz[0]
day.append(hodina)
if not clazz[1] is None:
vyucujuci = etree.Element('vyucujuci')
vyucujuci.text = clazz[1]
day.append(vyucujuci)
if not clazz[2] is None:
ucebna = etree.Element('ucebna')
ucebna.text = clazz[2]
day.append(ucebna)
if not clazz[4] is None:
zaciatok = etree.Element('zaciatok')
zaciatok.text = clazz[4]
day.append(zaciatok)
if not clazz[5] is None:
trvanie = etree.Element('trvanie')
trvanie.text = clazz[5]
day.append(trvanie)
def get_class_length(title):
return title['colspan']
def get_class_start(title):
# title obsahuje nieco ako "* streda : 14-15 *" - potrebujem dostat len prve cislo
# pretoze aka dlha hodina bude viem z ineho atributu (pozri funkciu get_lessons_of_class, colspan)
# toto mi vrati pole obsahujuce ['streda', '14-15'], vezmem prvy index cize cisla 14-15
zac_hod = title['title'].partition('*')[-1].rpartition('*')[0].replace(" ", "").split(':')[1]
# 1 alebo 15 to je v poriadku
# 1-2 a 9-10 -> v oboch pripadoch chcem len prve cislo (dlzka 3 a 4)
# 12-13 -> tu chcem prve dve cisla (dlzka 5)
if len(zac_hod) == 3 or len(zac_hod) == 4:
return zac_hod[0:1]
if len(zac_hod) == 5:
r | return zac_hod
def get_lessons_of_class(url_class):
global hlavicka_dni
print "Ziskavam rozvrh z URL " + url_class
src = requests.get(url_class)
txt = src.text
bsoup = BeautifulSoup(txt, "html.parser")
predmety = []
ucitelia = []
ucebne = []
dni = []
zacina_hod = []
poc_hod = []
# neni to bohvieako pekne ale budiz, v nultom indexe je nazov skoly, v prvom Triedy
nadpis = [bsoup.find_all(("div", {'class': 'Nadpis'}))[1].text]
for predmet in bsoup.find_all("font", {'class': 'Predmet'}):
if predmet.text == '':
predmety.append('chyba nazov predmetu')
else:
predmety.append(predmet.text)
for ucitel in bsoup.find_all("font", {'class': 'Vyucujuci'}):
if ucitel.text == '':
ucitelia.append('chyba ucitel')
else:
ucitelia.append(ucitel.text)
for ucebna in bsoup.find_all("font", {'class': 'Ucebna'}):
if ucebna.text == '':
ucebne.append('chyba ucebna')
else:
ucebne.append(ucebna.text)
ciste_trka = bsoup.find_all("tr", {'class': False})
ciste_trka = ciste_trka[1:-1] # Vyhodime to trko ktore to obsahuje vsetko, to nepotrebujem
for trko in ciste_trka:
if trko != '\n' and trko.find("td", {'class': 'HlavickaDni'}) is not None:
hlavicka_dni = trko.find("td", {'class': 'HlavickaDni'})['title']
# vsetky hodiny v ramci toho dna
hodiny = trko.find("td", {'class': 'HlavickaDni'}).parent.find_all("td")
# podla bgcolor viem ci je hodina alebo volnahodina
for hodinaInfo in hodiny:
if hodinaInfo.has_attr('bgcolor') or hodinaInfo.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaInfo))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaInfo))
# Ked je dve a viac predmetov v tom istom case a tom istom dni, tak ta druha alebo tretia
# je mimo hlavneho <tr> tagu v ktorom sa nachadza aj nazov dna, a preto treba hladat
# dalsie hodiny mimo tr tagu.
elif trko != '\n' and trko.find_all("td") is not None:
for hodinaMimoTr in trko.find_all("td"):
if hodinaMimoTr.has_attr('bgcolor') or hodinaMimoTr.attrs['class'][0] == 'Hod':
poc_hod.append(get_class_length(hodinaMimoTr))
dni.append(hlavicka_dni)
zacina_hod.append(get_class_start(hodinaMimoTr))
# Ak aj ucebna alebo meno vyucujuceho chyba, dlzka vsetkych zoznamov bude tak ci tak rovnaka
# avsak v pripade "Nadpis", ten bude vzdy len jeden, preto musime pouzit "izip_longest"
# ktory zo zoznamu kratsej dlzky, spravi dlhsi a doplni tam "None". keby to nespravim, kazdy
# zoznam skrati na jednu polozku a to by nam chybali hodiny...
# No a navyse je potrebne odstranit predmet ktory ma v nazve tri hviezdy pretoze ten ma v sebe
# data o dalsich hodinach a tie sa nedaju normalne sparsovat, teda ich odstranim. Keby to neodstranim
# hodiny sa mozu posunut a byt v nespravnych dnoch a mat zle ucebne
if u'***' in predmety:
predmety.remove(u'***')
print "Predmet obsahoval dalsie hodiny ktore sa neda spracovat kvoli nespravnym datam"
moj_list = list(itertools.izip_longest(predmety, ucitelia, ucebne, dni, zacina_hod, poc_hod, nadpis))
return moj_list
# Zobere vsetky linky a pozrie sa ci link obsahuje "rozvrh_tr", aby sa spracovali len rozvrhy tried
def remove_non_class_timetables(soup_links):
modified_urls = []
for link in soup_links.find_all("a"):
to_append = link.text
if "rozvrh_tr" in to_append:
final_url = url + to_append
modified_urls.append(final_url)
return modified_urls
def get_urls_to_process():
source = requests.get(url)
text = source.text
soup = BeautifulSoup(text, "html.parser")
urls = remove_non_class_timetables(soup)
# vyhodi sa prvy link ktory neobsahuje konkretnu triedu, ale sablonu pre vsetky triedy
urls = urls[1:-1]
return urls
def make_folder():
# spravime si zlozku na rozvrhy
try:
os.makedirs('rozvrhy')
except OSError:
pass # ak uz zlozka existuje da error, ten ignorujeme, chceme zapisat do zlozky
def generate_xmls(generate_xml):
make_folder()
global pondelok, utorok, streda, stvrtok, piatok, rozvrh, trieda_nazov
if generate_xml == XML_ONLY or generate_xml == BOTH_XML_ODT:
# cele_rozvrhy_tried = [get_lessons_of_class("http://www.pdf.umb.sk/~jsedliak/Public/rozvrh_tr2815.htm")]
cele_rozvrhy_tried = []
for url_to_process in get_urls_to_process():
cele_rozvrhy_tried.append(get_lessons_of_class(url_to_process))
for rozvrh_jednej_triedy in cele_rozvrhy_tried:
trieda_nazov = rozvrh_jednej_triedy[0][6] # 6ty index obsahuje meno triedy ktorej patri rozvrh
rozvrh = etree.Element("rozvrh")
rozvrh.attrib['Trieda'] = trieda_nazov
add_days_of_week_xml(rozvrh)
vyuc_hodiny = []
for jedna_hodina_rozvrhu in rozvrh_jednej_triedy:
add_class_to_xml(rozvrh, jedna_hodina_rozvrhu)
vyuc_hodiny.append(SchoolClass.make_class(jedna_hodina_rozvrhu[0],
jedna_hodina_rozvrhu[1],
jedna_hodina_rozvrhu[2],
jedna_hodina_rozvrhu[3],
jedna_hodina_rozvrhu[4],
jedna_hodina_rozvrhu[5]))
pondelok = []
utorok = []
streda = []
stvrtok = []
piatok = []
for objekt_hodina in vyuc_hodiny:
if objekt_hodina.den == 'pondelok':
pondelok.append(objekt_hodina)
if objekt_hodina.den == 'utorok':
utorok.append(objekt_hodina)
if objekt_hodina.den == 'streda':
streda.append(objekt_hodina)
if objekt_hodina.den == u'štvrtok':
stvrtok.append(objekt_hodina)
if objekt_hodina.den == 'piatok':
piatok.append(objekt_hodina)
pondelok.sort(key=attrgetter('zaciatok'))
utorok.sort(key=attrgetter('zaciatok'))
streda.sort(key=attrgetter('zaciatok'))
stvrtok.sort(key=attrgetter('zaciatok'))
piatok.sort(key=attrgetter('zaciatok'))
f = open('rozvrhy/' + trieda_nazov + '.xml', 'w')
f.write(etree.tostring(rozvrh, pretty_print=True))
f.close()
if generate_xml == ODT_ONLY or generate_xml == BOTH_XML_ODT:
zoznam_dni = [pondelok, utorok, streda, stvrtok, piatok]
for den in zoznam_dni:
for objekt_hodina in den:
ODTCreator.add_values(objekt_hodina.hodina,
objekt_hodina.vyuc,
objekt_hodina.ucebna,
objekt_hodina.den,
objekt_hodina.zaciatok,
objekt_hodina.trvanie)
ODTCreator.align_cells('pondelok')
ODTCreator.align_cells('utorok')
ODTCreator.align_cells('streda')
ODTCreator.align_cells('stvrtok')
ODTCreator.align_cells('piatok')
# Won't work in Windows
# subprocess.call(['./packNrun.sh'])
generate_xmls(XML_ONLY)
| eturn zac_hod[0:2]
| conditional_block |
renderer.rs | use crate::fft::*;
use crate::Opt;
use anyhow::Result;
use rustfft::num_traits::Zero;
use std::{fs::File, io::Read, path::PathBuf, slice};
use wgpu::util::DeviceExt;
use winit::{event::*, window::Window};
#[repr(transparent)]
#[derive(Copy, Clone)]
struct PodComplex(FftSample);
unsafe impl bytemuck::Zeroable for PodComplex {}
/// Safety: Complex<f32> is a repr(C) struct of two f32, and has alignment 4.
unsafe impl bytemuck::Pod for PodComplex {}
// PodComplex is casted to vec2 and requires alignment 8 when sent to the GPU.
// This is not a problem as long as the start position within the Buffer is aligned.
type PodVec = Vec<PodComplex>;
type PodSlice = [PodComplex];
fn fft_as_pod(my_slice: &FftSlice) -> &PodSlice {
unsafe { std::slice::from_raw_parts(my_slice.as_ptr() as *const _, my_slice.len()) }
}
/// Sent to GPU. Controls FFT layout and options.
#[repr(C)]
#[derive(Copy, Clone)]
struct GpuRenderParameters {
/// Screen size.
screen_wx: u32,
screen_hy: u32,
/// Samples per second.
sample_rate: u32,
/// Number of FFT bins between 0 and Nyquist inclusive.
/// Equals nsamp/2 + 1.
fft_out_size: u32,
}
unsafe impl bytemuck::Zeroable for GpuRenderParameters {}
unsafe impl bytemuck::Pod for GpuRenderParameters {}
/// The longest allowed FFT is ???.
/// The real FFT produces ??? complex bins.
fn fft_out_size(fft_input_size: usize) -> usize {
fft_input_size / 2 + 1
}
// Docs: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-swapchain/
// Code: https://github.com/sotrh/learn-wgpu/blob/master/code/beginner/tutorial2-swapchain/src/main.rs
// - https://github.com/sotrh/learn-wgpu/blob/3a46a215/code/beginner/tutorial2-swapchain/src/main.rs
pub struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
render_parameters: GpuRenderParameters,
fft_vec: PodVec,
render_parameters_buffer: wgpu::Buffer,
fft_vec_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
fn load_from_file(fname: &str) -> Result<String> {
let mut buf: Vec<u8> = vec![];
File::open(PathBuf::from(fname))?.read_to_end(&mut buf)?;
Ok(String::from_utf8(buf)?)
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window, opt: &Opt, sample_rate: u32) -> anyhow::Result<State> {
let size = window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // TODO change to Mailbox?
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let vs_src = load_from_file("shaders/shader.vert")?;
let fs_src = load_from_file("shaders/shader.frag")?;
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(
&vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)?;
let fs_spirv = compiler.compile_into_spirv(
&fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)?;
let vs_module =
device.create_shader_module(wgpu::util::make_spirv(&vs_spirv.as_binary_u8()));
let fs_module =
device.create_shader_module(wgpu::util::make_spirv(&fs_spirv.as_binary_u8()));
// # FFT SSBO
let fft_out_size = fft_out_size(opt.fft_size);
let render_parameters = GpuRenderParameters {
screen_wx: size.width,
screen_hy: size.height,
fft_out_size: fft_out_size as u32,
sample_rate,
};
let fft_vec: PodVec = vec![PodComplex(FftSample::zero()); fft_out_size];
let render_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT layout (size)"),
contents: bytemuck::cast_slice(slice::from_ref(&render_parameters)),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let fft_vec_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT data"),
contents: bytemuck::cast_slice(&fft_vec),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
],
label: Some("bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(render_param_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(fft_vec_buffer.slice(..)),
},
],
label: Some("bind_group"),
});
// # Shader pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main", // 1.
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
// 2.
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList, // 1.
depth_stencil_state: None, // 2.
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16, // 3.
vertex_buffers: &[], // 4.
},
sample_count: 1, // 5.
sample_mask: !0, // 6.
alpha_to_coverage_enabled: false, // 7.
});
Ok(State {
surface, | sc_desc,
swap_chain,
size,
render_pipeline,
render_parameters,
fft_vec,
render_parameters_buffer: render_param_buffer,
fft_vec_buffer,
bind_group,
})
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
pub fn input(&mut self, event: &WindowEvent) -> bool {
false
}
pub fn update(&mut self, spectrum: &FftSlice) {
self.render_parameters = GpuRenderParameters {
screen_wx: self.size.width,
screen_hy: self.size.height,
..self.render_parameters
};
self.queue.write_buffer(
&self.render_parameters_buffer,
0,
bytemuck::cast_slice(slice::from_ref(&self.render_parameters)),
);
self.fft_vec.copy_from_slice(fft_as_pod(spectrum));
self.queue
.write_buffer(&self.fft_vec_buffer, 0, bytemuck::cast_slice(&self.fft_vec));
}
pub fn render(&mut self) {
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline); // 2.
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..6, 0..1); // 3.
}
// submit will accept anything that implements IntoIter
self.queue.submit(std::iter::once(encoder.finish()));
}
} | device,
queue, | random_line_split |
renderer.rs | use crate::fft::*;
use crate::Opt;
use anyhow::Result;
use rustfft::num_traits::Zero;
use std::{fs::File, io::Read, path::PathBuf, slice};
use wgpu::util::DeviceExt;
use winit::{event::*, window::Window};
#[repr(transparent)]
#[derive(Copy, Clone)]
struct PodComplex(FftSample);
unsafe impl bytemuck::Zeroable for PodComplex {}
/// Safety: Complex<f32> is a repr(C) struct of two f32, and has alignment 4.
unsafe impl bytemuck::Pod for PodComplex {}
// PodComplex is casted to vec2 and requires alignment 8 when sent to the GPU.
// This is not a problem as long as the start position within the Buffer is aligned.
type PodVec = Vec<PodComplex>;
type PodSlice = [PodComplex];
fn fft_as_pod(my_slice: &FftSlice) -> &PodSlice {
unsafe { std::slice::from_raw_parts(my_slice.as_ptr() as *const _, my_slice.len()) }
}
/// Sent to GPU. Controls FFT layout and options.
#[repr(C)]
#[derive(Copy, Clone)]
struct GpuRenderParameters {
/// Screen size.
screen_wx: u32,
screen_hy: u32,
/// Samples per second.
sample_rate: u32,
/// Number of FFT bins between 0 and Nyquist inclusive.
/// Equals nsamp/2 + 1.
fft_out_size: u32,
}
unsafe impl bytemuck::Zeroable for GpuRenderParameters {}
unsafe impl bytemuck::Pod for GpuRenderParameters {}
/// The longest allowed FFT is ???.
/// The real FFT produces ??? complex bins.
fn fft_out_size(fft_input_size: usize) -> usize {
fft_input_size / 2 + 1
}
// Docs: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-swapchain/
// Code: https://github.com/sotrh/learn-wgpu/blob/master/code/beginner/tutorial2-swapchain/src/main.rs
// - https://github.com/sotrh/learn-wgpu/blob/3a46a215/code/beginner/tutorial2-swapchain/src/main.rs
pub struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
render_parameters: GpuRenderParameters,
fft_vec: PodVec,
render_parameters_buffer: wgpu::Buffer,
fft_vec_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
fn load_from_file(fname: &str) -> Result<String> {
let mut buf: Vec<u8> = vec![];
File::open(PathBuf::from(fname))?.read_to_end(&mut buf)?;
Ok(String::from_utf8(buf)?)
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window, opt: &Opt, sample_rate: u32) -> anyhow::Result<State> {
let size = window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // TODO change to Mailbox?
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let vs_src = load_from_file("shaders/shader.vert")?;
let fs_src = load_from_file("shaders/shader.frag")?;
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(
&vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)?;
let fs_spirv = compiler.compile_into_spirv(
&fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)?;
let vs_module =
device.create_shader_module(wgpu::util::make_spirv(&vs_spirv.as_binary_u8()));
let fs_module =
device.create_shader_module(wgpu::util::make_spirv(&fs_spirv.as_binary_u8()));
// # FFT SSBO
let fft_out_size = fft_out_size(opt.fft_size);
let render_parameters = GpuRenderParameters {
screen_wx: size.width,
screen_hy: size.height,
fft_out_size: fft_out_size as u32,
sample_rate,
};
let fft_vec: PodVec = vec![PodComplex(FftSample::zero()); fft_out_size];
let render_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT layout (size)"),
contents: bytemuck::cast_slice(slice::from_ref(&render_parameters)),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let fft_vec_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT data"),
contents: bytemuck::cast_slice(&fft_vec),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
],
label: Some("bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(render_param_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(fft_vec_buffer.slice(..)),
},
],
label: Some("bind_group"),
});
// # Shader pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main", // 1.
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
// 2.
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList, // 1.
depth_stencil_state: None, // 2.
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16, // 3.
vertex_buffers: &[], // 4.
},
sample_count: 1, // 5.
sample_mask: !0, // 6.
alpha_to_coverage_enabled: false, // 7.
});
Ok(State {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
render_pipeline,
render_parameters,
fft_vec,
render_parameters_buffer: render_param_buffer,
fft_vec_buffer,
bind_group,
})
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
pub fn input(&mut self, event: &WindowEvent) -> bool {
false
}
pub fn update(&mut self, spectrum: &FftSlice) |
pub fn render(&mut self) {
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline); // 2.
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..6, 0..1); // 3.
}
// submit will accept anything that implements IntoIter
self.queue.submit(std::iter::once(encoder.finish()));
}
}
| {
self.render_parameters = GpuRenderParameters {
screen_wx: self.size.width,
screen_hy: self.size.height,
..self.render_parameters
};
self.queue.write_buffer(
&self.render_parameters_buffer,
0,
bytemuck::cast_slice(slice::from_ref(&self.render_parameters)),
);
self.fft_vec.copy_from_slice(fft_as_pod(spectrum));
self.queue
.write_buffer(&self.fft_vec_buffer, 0, bytemuck::cast_slice(&self.fft_vec));
} | identifier_body |
renderer.rs | use crate::fft::*;
use crate::Opt;
use anyhow::Result;
use rustfft::num_traits::Zero;
use std::{fs::File, io::Read, path::PathBuf, slice};
use wgpu::util::DeviceExt;
use winit::{event::*, window::Window};
#[repr(transparent)]
#[derive(Copy, Clone)]
struct PodComplex(FftSample);
unsafe impl bytemuck::Zeroable for PodComplex {}
/// Safety: Complex<f32> is a repr(C) struct of two f32, and has alignment 4.
unsafe impl bytemuck::Pod for PodComplex {}
// PodComplex is casted to vec2 and requires alignment 8 when sent to the GPU.
// This is not a problem as long as the start position within the Buffer is aligned.
type PodVec = Vec<PodComplex>;
type PodSlice = [PodComplex];
fn fft_as_pod(my_slice: &FftSlice) -> &PodSlice {
unsafe { std::slice::from_raw_parts(my_slice.as_ptr() as *const _, my_slice.len()) }
}
/// Sent to GPU. Controls FFT layout and options.
#[repr(C)]
#[derive(Copy, Clone)]
struct GpuRenderParameters {
/// Screen size.
screen_wx: u32,
screen_hy: u32,
/// Samples per second.
sample_rate: u32,
/// Number of FFT bins between 0 and Nyquist inclusive.
/// Equals nsamp/2 + 1.
fft_out_size: u32,
}
unsafe impl bytemuck::Zeroable for GpuRenderParameters {}
unsafe impl bytemuck::Pod for GpuRenderParameters {}
/// The longest allowed FFT is ???.
/// The real FFT produces ??? complex bins.
fn fft_out_size(fft_input_size: usize) -> usize {
fft_input_size / 2 + 1
}
// Docs: https://sotrh.github.io/learn-wgpu/beginner/tutorial2-swapchain/
// Code: https://github.com/sotrh/learn-wgpu/blob/master/code/beginner/tutorial2-swapchain/src/main.rs
// - https://github.com/sotrh/learn-wgpu/blob/3a46a215/code/beginner/tutorial2-swapchain/src/main.rs
pub struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
render_parameters: GpuRenderParameters,
fft_vec: PodVec,
render_parameters_buffer: wgpu::Buffer,
fft_vec_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
fn load_from_file(fname: &str) -> Result<String> {
let mut buf: Vec<u8> = vec![];
File::open(PathBuf::from(fname))?.read_to_end(&mut buf)?;
Ok(String::from_utf8(buf)?)
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window, opt: &Opt, sample_rate: u32) -> anyhow::Result<State> {
let size = window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None, // Trace path
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo, // TODO change to Mailbox?
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let vs_src = load_from_file("shaders/shader.vert")?;
let fs_src = load_from_file("shaders/shader.frag")?;
let mut compiler = shaderc::Compiler::new().unwrap();
let vs_spirv = compiler.compile_into_spirv(
&vs_src,
shaderc::ShaderKind::Vertex,
"shader.vert",
"main",
None,
)?;
let fs_spirv = compiler.compile_into_spirv(
&fs_src,
shaderc::ShaderKind::Fragment,
"shader.frag",
"main",
None,
)?;
let vs_module =
device.create_shader_module(wgpu::util::make_spirv(&vs_spirv.as_binary_u8()));
let fs_module =
device.create_shader_module(wgpu::util::make_spirv(&fs_spirv.as_binary_u8()));
// # FFT SSBO
let fft_out_size = fft_out_size(opt.fft_size);
let render_parameters = GpuRenderParameters {
screen_wx: size.width,
screen_hy: size.height,
fft_out_size: fft_out_size as u32,
sample_rate,
};
let fft_vec: PodVec = vec![PodComplex(FftSample::zero()); fft_out_size];
let render_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT layout (size)"),
contents: bytemuck::cast_slice(slice::from_ref(&render_parameters)),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let fft_vec_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("FFT data"),
contents: bytemuck::cast_slice(&fft_vec),
usage: wgpu::BufferUsage::STORAGE | wgpu::BufferUsage::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
readonly: true,
min_binding_size: None,
},
count: None,
},
],
label: Some("bind_group_layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(render_param_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(fft_vec_buffer.slice(..)),
},
],
label: Some("bind_group"),
});
// # Shader pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main", // 1.
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
// 2.
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList, // 1.
depth_stencil_state: None, // 2.
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint16, // 3.
vertex_buffers: &[], // 4.
},
sample_count: 1, // 5.
sample_mask: !0, // 6.
alpha_to_coverage_enabled: false, // 7.
});
Ok(State {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
render_pipeline,
render_parameters,
fft_vec,
render_parameters_buffer: render_param_buffer,
fft_vec_buffer,
bind_group,
})
}
pub fn | (&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
pub fn input(&mut self, event: &WindowEvent) -> bool {
false
}
pub fn update(&mut self, spectrum: &FftSlice) {
self.render_parameters = GpuRenderParameters {
screen_wx: self.size.width,
screen_hy: self.size.height,
..self.render_parameters
};
self.queue.write_buffer(
&self.render_parameters_buffer,
0,
bytemuck::cast_slice(slice::from_ref(&self.render_parameters)),
);
self.fft_vec.copy_from_slice(fft_as_pod(spectrum));
self.queue
.write_buffer(&self.fft_vec_buffer, 0, bytemuck::cast_slice(&self.fft_vec));
}
pub fn render(&mut self) {
let frame = self
.swap_chain
.get_current_frame()
.expect("Timeout getting texture")
.output;
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline); // 2.
render_pass.set_bind_group(0, &self.bind_group, &[]);
render_pass.draw(0..6, 0..1); // 3.
}
// submit will accept anything that implements IntoIter
self.queue.submit(std::iter::once(encoder.finish()));
}
}
| resize | identifier_name |
error.rs | //! Errorand Result types.
use crate::database::Database;
use crate::types::Type;
use std::any::type_name;
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display};
use std::io;
#[allow(unused_macros)]
macro_rules! decode_err {
($s:literal, $($args:tt)*) => {
crate::Error::Decode(format!($s, $($args)*).into())
};
($expr:expr) => {
crate::Error::decode($expr)
};
}
/// A specialized `Result` type for rbatis_core.
pub type Result<T> = std::result::Result<T, Error>;
/// A generic error that represents all the ways a method can fail inside of rbatis_core.
#[derive(Debug)]
#[non_exhaustive]
pub enum Error {
/// Default Error
E(String),
/// Error communicating with the database.
Io(io::Error),
/// Connection URL was malformed.
UrlParse(url::ParseError),
/// An error was returned by the database.
Database(Box<dyn DatabaseError>),
/// No row was returned during [`query::Map::fetch_one`] or `QueryAs::fetch_one`.
///
/// [`query::Map::fetch_one`]: crate::query::Map::fetch_one
RowNotFound,
/// Column was not found by name in a Row (during [`Row::get`]).
///
/// [`Row::get`]: crate::row::Row::get
ColumnNotFound(Box<str>),
/// Column index was out of bounds (e.g., asking for column 4 in a 2-column row).
ColumnIndexOutOfBounds { index: usize, len: usize },
/// Unexpected or invalid data was encountered. This would indicate that we received
/// data that we were not expecting or it was in a format we did not understand. This
/// generally means either there is a programming error in a rbatis_core driver or
/// something with the connection or the database database itself is corrupted.
///
/// Context is provided by the included error message.
Protocol(Box<str>),
/// A [`Pool::acquire`] timed out due to connections not becoming available or
/// because another task encountered too many errors while trying to open a new connection.
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
PoolTimedOut(Option<Box<dyn StdError + Send + Sync>>),
/// [`Pool::close`] was called while we were waiting in [`Pool::acquire`].
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
/// [`Pool::close`]: crate::pool::Pool::close
PoolClosed,
/// An error occurred while attempting to setup TLS.
/// This should only be returned from an explicit ask for TLS.
Tls(Box<dyn StdError + Send + Sync>),
/// An error occurred decoding data received from the database.
Decode(Box<dyn StdError + Send + Sync>),
}
impl Error {
#[allow(dead_code)]
pub(crate) fn decode<E>(err: E) -> Self
where
E: StdError + Send + Sync + 'static,
{
Error::Decode(err.into())
}
#[allow(dead_code)]
pub(crate) fn mismatched_types<DB: Database, T>(expected: DB::TypeInfo) -> Self
where
T: Type<DB>,
{
let ty_name = type_name::<T>();
return decode_err!(
"mismatched types; Rust type `{}` (as SQL type {}) is not compatible with SQL type {}",
ty_name,
T::type_info(),
expected
);
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Error::Io(error) => Some(error),
Error::UrlParse(error) => Some(error),
Error::PoolTimedOut(Some(error)) => Some(&**error),
Error::Decode(error) => Some(&**error),
Error::Tls(error) => Some(&**error),
Error::Database(error) => Some(error.as_ref_err()),
_ => None,
}
}
}
impl Display for Error {
// IntellijRust does not understand that [non_exhaustive] applies only for downstream crates
// noinspection RsMatchCheck
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::E(error) => write!(f, "{}", error),
Error::Io(error) => write!(f, "{}", error),
Error::UrlParse(error) => write!(f, "{}", error),
Error::Decode(error) => write!(f, "{}", error),
Error::Database(error) => Display::fmt(error, f),
Error::RowNotFound => f.write_str("found no row when we expected at least one"),
Error::ColumnNotFound(ref name) => {
write!(f, "no column found with the name {:?}", name)
}
Error::ColumnIndexOutOfBounds { index, len } => write!(
f,
"column index out of bounds: there are {} columns but the index is {}",
len, index
),
Error::Protocol(ref err) => f.write_str(err),
Error::PoolTimedOut(Some(ref err)) => {
write!(f, "timed out while waiting for an open connection: {}", err)
}
Error::PoolTimedOut(None) => {
write!(f, "timed out while waiting for an open connection")
}
Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"),
Error::Tls(ref err) => write!(f, "error during TLS upgrade: {}", err),
}
}
}
impl From<io::Error> for Error {
#[inline]
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<io::ErrorKind> for Error {
#[inline]
fn from(err: io::ErrorKind) -> Self {
Error::Io(err.into())
}
}
impl From<url::ParseError> for Error {
#[inline]
fn from(err: url::ParseError) -> Self {
Error::UrlParse(err)
}
}
impl From<ProtocolError<'_>> for Error {
#[inline]
fn from(err: ProtocolError) -> Self {
Error::Protocol(err.args.to_string().into_boxed_str())
}
}
impl From<UnexpectedNullError> for Error {
#[inline]
fn from(err: UnexpectedNullError) -> Self {
Error::Decode(err.into())
}
}
#[cfg(feature = "tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "tls")))]
impl From<async_native_tls::Error> for Error {
#[inline]
fn from(err: async_native_tls::Error) -> Self {
Error::Tls(err.into())
}
}
impl From<TlsError<'_>> for Error {
#[inline]
fn from(err: TlsError<'_>) -> Self {
Error::Tls(err.args.to_string().into())
}
}
impl From<&str> for Error {
fn from(arg: &str) -> Self {
return Error::E(arg.to_string());
}
}
impl From<std::string::String> for Error {
fn from(arg: String) -> Self {
return Error::E(arg);
}
}
/// An error that was returned by the database.
pub trait DatabaseError: StdError + Send + Sync + 'static {
/// The primary, human-readable error message.
fn message(&self) -> &str;
/// The (SQLSTATE) code for the error.
fn code(&self) -> Option<&str> {
None
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
#[doc(hidden)]
fn as_ref_err(&self) -> &(dyn StdError + Send + Sync + 'static);
| #[doc(hidden)]
fn into_box_err(self: Box<Self>) -> Box<dyn StdError + Send + Sync + 'static>;
}
impl dyn DatabaseError {
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [StdError::downcast_ref]
/// which returns `Option`. This was a deliberate design decision in favor of brevity as in
/// almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast_ref] instead.
pub fn downcast_ref<T: DatabaseError>(&self) -> &T {
self.try_downcast_ref::<T>().unwrap_or_else(|| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
})
}
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `None` if the downcast fails (the types do not match)
pub fn try_downcast_ref<T: DatabaseError>(&self) -> Option<&T> {
self.as_ref_err().downcast_ref()
}
/// Only meant for internal use so no `try_` variant is currently provided
#[allow(dead_code)]
pub(crate) fn downcast_mut<T: DatabaseError>(&mut self) -> &mut T {
// tried to express this as the following:
//
// if let Some(e) = self.as_mut_err().downcast_mut() { return e; }
//
// however it didn't like using `self` again in the panic format
if self.as_ref_err().is::<T>() {
return self.as_mut_err().downcast_mut().unwrap();
}
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [std::error::Error::downcast]
/// which returns `Result`. This was a deliberate design decision in favor of
/// brevity as in almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast] instead.
pub fn downcast<T: DatabaseError>(self: Box<Self>) -> Box<T> {
self.try_downcast().unwrap_or_else(|e| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
e
)
})
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `Err(self)` if the downcast fails (the types do not match).
pub fn try_downcast<T: DatabaseError>(
self: Box<Self>,
) -> std::result::Result<Box<T>, Box<Self>> {
if self.as_ref_err().is::<T>() {
Ok(self
.into_box_err()
.downcast()
.expect("type mismatch between DatabaseError::as_ref_err() and into_box_err()"))
} else {
Err(self)
}
}
}
/// Used by the `protocol_error!()` macro for a lazily evaluated conversion to
/// `crate::Error::Protocol` so we can use the macro with `.ok_or()` without Clippy complaining.
pub(crate) struct ProtocolError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! protocol_err (
($($args:tt)*) => {
$crate::error::ProtocolError { args: format_args!($($args)*) }
}
);
pub(crate) struct TlsError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! tls_err {
($($args:tt)*) => { crate::error::TlsError { args: format_args!($($args)*)} };
}
/// An unexpected `NULL` was encountered during decoding.
///
/// Returned from `Row::get` if the value from the database is `NULL`
/// and you are not decoding into an `Option`.
#[derive(Debug, Clone, Copy)]
pub struct UnexpectedNullError;
impl Display for UnexpectedNullError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("unexpected null; try decoding as an `Option`")
}
}
impl StdError for UnexpectedNullError {}
impl Clone for Error {
fn clone(&self) -> Self {
Error::from(self.to_string())
}
fn clone_from(&mut self, source: &Self) {
*self = Self::from(source.to_string());
}
}
use serde::ser::{Serialize, Serializer};
use serde::{Deserialize, Deserializer};
use serde::de::{Visitor};
// This is what #[derive(Serialize)] would generate.
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_str())
}
}
struct ErrorVisitor;
impl<'de> Visitor<'de> for ErrorVisitor {
type Value = String;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a string")
}
fn visit_string<E>(self, v: String) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v)
}
fn visit_str<E>(self, v: &str) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v.to_string())
}
}
impl<'de> Deserialize<'de> for Error {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = deserializer.deserialize_string(ErrorVisitor)?;
return Ok(Error::from(r));
}
}
#[test]
fn test_json_error(){
let e=Error::from("fuck");
let s= serde_json::to_string(&e).unwrap();
println!("{}",s.as_str());
let new_e:Error=serde_json::from_str(s.as_str()).unwrap();
} | #[doc(hidden)]
fn as_mut_err(&mut self) -> &mut (dyn StdError + Send + Sync + 'static);
| random_line_split |
error.rs | //! Errorand Result types.
use crate::database::Database;
use crate::types::Type;
use std::any::type_name;
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display};
use std::io;
#[allow(unused_macros)]
macro_rules! decode_err {
($s:literal, $($args:tt)*) => {
crate::Error::Decode(format!($s, $($args)*).into())
};
($expr:expr) => {
crate::Error::decode($expr)
};
}
/// A specialized `Result` type for rbatis_core.
pub type Result<T> = std::result::Result<T, Error>;
/// A generic error that represents all the ways a method can fail inside of rbatis_core.
#[derive(Debug)]
#[non_exhaustive]
pub enum Error {
/// Default Error
E(String),
/// Error communicating with the database.
Io(io::Error),
/// Connection URL was malformed.
UrlParse(url::ParseError),
/// An error was returned by the database.
Database(Box<dyn DatabaseError>),
/// No row was returned during [`query::Map::fetch_one`] or `QueryAs::fetch_one`.
///
/// [`query::Map::fetch_one`]: crate::query::Map::fetch_one
RowNotFound,
/// Column was not found by name in a Row (during [`Row::get`]).
///
/// [`Row::get`]: crate::row::Row::get
ColumnNotFound(Box<str>),
/// Column index was out of bounds (e.g., asking for column 4 in a 2-column row).
ColumnIndexOutOfBounds { index: usize, len: usize },
/// Unexpected or invalid data was encountered. This would indicate that we received
/// data that we were not expecting or it was in a format we did not understand. This
/// generally means either there is a programming error in a rbatis_core driver or
/// something with the connection or the database database itself is corrupted.
///
/// Context is provided by the included error message.
Protocol(Box<str>),
/// A [`Pool::acquire`] timed out due to connections not becoming available or
/// because another task encountered too many errors while trying to open a new connection.
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
PoolTimedOut(Option<Box<dyn StdError + Send + Sync>>),
/// [`Pool::close`] was called while we were waiting in [`Pool::acquire`].
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
/// [`Pool::close`]: crate::pool::Pool::close
PoolClosed,
/// An error occurred while attempting to setup TLS.
/// This should only be returned from an explicit ask for TLS.
Tls(Box<dyn StdError + Send + Sync>),
/// An error occurred decoding data received from the database.
Decode(Box<dyn StdError + Send + Sync>),
}
impl Error {
#[allow(dead_code)]
pub(crate) fn decode<E>(err: E) -> Self
where
E: StdError + Send + Sync + 'static,
{
Error::Decode(err.into())
}
#[allow(dead_code)]
pub(crate) fn mismatched_types<DB: Database, T>(expected: DB::TypeInfo) -> Self
where
T: Type<DB>,
{
let ty_name = type_name::<T>();
return decode_err!(
"mismatched types; Rust type `{}` (as SQL type {}) is not compatible with SQL type {}",
ty_name,
T::type_info(),
expected
);
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Error::Io(error) => Some(error),
Error::UrlParse(error) => Some(error),
Error::PoolTimedOut(Some(error)) => Some(&**error),
Error::Decode(error) => Some(&**error),
Error::Tls(error) => Some(&**error),
Error::Database(error) => Some(error.as_ref_err()),
_ => None,
}
}
}
impl Display for Error {
// IntellijRust does not understand that [non_exhaustive] applies only for downstream crates
// noinspection RsMatchCheck
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::E(error) => write!(f, "{}", error),
Error::Io(error) => write!(f, "{}", error),
Error::UrlParse(error) => write!(f, "{}", error),
Error::Decode(error) => write!(f, "{}", error),
Error::Database(error) => Display::fmt(error, f),
Error::RowNotFound => f.write_str("found no row when we expected at least one"),
Error::ColumnNotFound(ref name) => {
write!(f, "no column found with the name {:?}", name)
}
Error::ColumnIndexOutOfBounds { index, len } => write!(
f,
"column index out of bounds: there are {} columns but the index is {}",
len, index
),
Error::Protocol(ref err) => f.write_str(err),
Error::PoolTimedOut(Some(ref err)) => {
write!(f, "timed out while waiting for an open connection: {}", err)
}
Error::PoolTimedOut(None) => {
write!(f, "timed out while waiting for an open connection")
}
Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"),
Error::Tls(ref err) => write!(f, "error during TLS upgrade: {}", err),
}
}
}
impl From<io::Error> for Error {
#[inline]
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<io::ErrorKind> for Error {
#[inline]
fn from(err: io::ErrorKind) -> Self {
Error::Io(err.into())
}
}
impl From<url::ParseError> for Error {
#[inline]
fn from(err: url::ParseError) -> Self {
Error::UrlParse(err)
}
}
impl From<ProtocolError<'_>> for Error {
#[inline]
fn from(err: ProtocolError) -> Self {
Error::Protocol(err.args.to_string().into_boxed_str())
}
}
impl From<UnexpectedNullError> for Error {
#[inline]
fn from(err: UnexpectedNullError) -> Self {
Error::Decode(err.into())
}
}
#[cfg(feature = "tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "tls")))]
impl From<async_native_tls::Error> for Error {
#[inline]
fn from(err: async_native_tls::Error) -> Self {
Error::Tls(err.into())
}
}
impl From<TlsError<'_>> for Error {
#[inline]
fn from(err: TlsError<'_>) -> Self {
Error::Tls(err.args.to_string().into())
}
}
impl From<&str> for Error {
fn from(arg: &str) -> Self {
return Error::E(arg.to_string());
}
}
impl From<std::string::String> for Error {
fn from(arg: String) -> Self {
return Error::E(arg);
}
}
/// An error that was returned by the database.
pub trait DatabaseError: StdError + Send + Sync + 'static {
/// The primary, human-readable error message.
fn message(&self) -> &str;
/// The (SQLSTATE) code for the error.
fn code(&self) -> Option<&str> {
None
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
#[doc(hidden)]
fn as_ref_err(&self) -> &(dyn StdError + Send + Sync + 'static);
#[doc(hidden)]
fn as_mut_err(&mut self) -> &mut (dyn StdError + Send + Sync + 'static);
#[doc(hidden)]
fn into_box_err(self: Box<Self>) -> Box<dyn StdError + Send + Sync + 'static>;
}
impl dyn DatabaseError {
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [StdError::downcast_ref]
/// which returns `Option`. This was a deliberate design decision in favor of brevity as in
/// almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast_ref] instead.
pub fn downcast_ref<T: DatabaseError>(&self) -> &T {
self.try_downcast_ref::<T>().unwrap_or_else(|| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
})
}
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `None` if the downcast fails (the types do not match)
pub fn try_downcast_ref<T: DatabaseError>(&self) -> Option<&T> {
self.as_ref_err().downcast_ref()
}
/// Only meant for internal use so no `try_` variant is currently provided
#[allow(dead_code)]
pub(crate) fn downcast_mut<T: DatabaseError>(&mut self) -> &mut T {
// tried to express this as the following:
//
// if let Some(e) = self.as_mut_err().downcast_mut() { return e; }
//
// however it didn't like using `self` again in the panic format
if self.as_ref_err().is::<T>() {
return self.as_mut_err().downcast_mut().unwrap();
}
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [std::error::Error::downcast]
/// which returns `Result`. This was a deliberate design decision in favor of
/// brevity as in almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast] instead.
pub fn downcast<T: DatabaseError>(self: Box<Self>) -> Box<T> {
self.try_downcast().unwrap_or_else(|e| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
e
)
})
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `Err(self)` if the downcast fails (the types do not match).
pub fn try_downcast<T: DatabaseError>(
self: Box<Self>,
) -> std::result::Result<Box<T>, Box<Self>> {
if self.as_ref_err().is::<T>() {
Ok(self
.into_box_err()
.downcast()
.expect("type mismatch between DatabaseError::as_ref_err() and into_box_err()"))
} else {
Err(self)
}
}
}
/// Used by the `protocol_error!()` macro for a lazily evaluated conversion to
/// `crate::Error::Protocol` so we can use the macro with `.ok_or()` without Clippy complaining.
pub(crate) struct ProtocolError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! protocol_err (
($($args:tt)*) => {
$crate::error::ProtocolError { args: format_args!($($args)*) }
}
);
pub(crate) struct TlsError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! tls_err {
($($args:tt)*) => { crate::error::TlsError { args: format_args!($($args)*)} };
}
/// An unexpected `NULL` was encountered during decoding.
///
/// Returned from `Row::get` if the value from the database is `NULL`
/// and you are not decoding into an `Option`.
#[derive(Debug, Clone, Copy)]
pub struct UnexpectedNullError;
impl Display for UnexpectedNullError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("unexpected null; try decoding as an `Option`")
}
}
impl StdError for UnexpectedNullError {}
impl Clone for Error {
fn clone(&self) -> Self {
Error::from(self.to_string())
}
fn clone_from(&mut self, source: &Self) {
*self = Self::from(source.to_string());
}
}
use serde::ser::{Serialize, Serializer};
use serde::{Deserialize, Deserializer};
use serde::de::{Visitor};
// This is what #[derive(Serialize)] would generate.
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_str())
}
}
struct ErrorVisitor;
impl<'de> Visitor<'de> for ErrorVisitor {
type Value = String;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a string")
}
fn visit_string<E>(self, v: String) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v)
}
fn visit_str<E>(self, v: &str) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
|
}
impl<'de> Deserialize<'de> for Error {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = deserializer.deserialize_string(ErrorVisitor)?;
return Ok(Error::from(r));
}
}
#[test]
fn test_json_error(){
let e=Error::from("fuck");
let s= serde_json::to_string(&e).unwrap();
println!("{}",s.as_str());
let new_e:Error=serde_json::from_str(s.as_str()).unwrap();
} | {
Ok(v.to_string())
} | identifier_body |
error.rs | //! Errorand Result types.
use crate::database::Database;
use crate::types::Type;
use std::any::type_name;
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display};
use std::io;
#[allow(unused_macros)]
macro_rules! decode_err {
($s:literal, $($args:tt)*) => {
crate::Error::Decode(format!($s, $($args)*).into())
};
($expr:expr) => {
crate::Error::decode($expr)
};
}
/// A specialized `Result` type for rbatis_core.
pub type Result<T> = std::result::Result<T, Error>;
/// A generic error that represents all the ways a method can fail inside of rbatis_core.
#[derive(Debug)]
#[non_exhaustive]
pub enum Error {
/// Default Error
E(String),
/// Error communicating with the database.
Io(io::Error),
/// Connection URL was malformed.
UrlParse(url::ParseError),
/// An error was returned by the database.
Database(Box<dyn DatabaseError>),
/// No row was returned during [`query::Map::fetch_one`] or `QueryAs::fetch_one`.
///
/// [`query::Map::fetch_one`]: crate::query::Map::fetch_one
RowNotFound,
/// Column was not found by name in a Row (during [`Row::get`]).
///
/// [`Row::get`]: crate::row::Row::get
ColumnNotFound(Box<str>),
/// Column index was out of bounds (e.g., asking for column 4 in a 2-column row).
ColumnIndexOutOfBounds { index: usize, len: usize },
/// Unexpected or invalid data was encountered. This would indicate that we received
/// data that we were not expecting or it was in a format we did not understand. This
/// generally means either there is a programming error in a rbatis_core driver or
/// something with the connection or the database database itself is corrupted.
///
/// Context is provided by the included error message.
Protocol(Box<str>),
/// A [`Pool::acquire`] timed out due to connections not becoming available or
/// because another task encountered too many errors while trying to open a new connection.
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
PoolTimedOut(Option<Box<dyn StdError + Send + Sync>>),
/// [`Pool::close`] was called while we were waiting in [`Pool::acquire`].
///
/// [`Pool::acquire`]: crate::pool::Pool::acquire
/// [`Pool::close`]: crate::pool::Pool::close
PoolClosed,
/// An error occurred while attempting to setup TLS.
/// This should only be returned from an explicit ask for TLS.
Tls(Box<dyn StdError + Send + Sync>),
/// An error occurred decoding data received from the database.
Decode(Box<dyn StdError + Send + Sync>),
}
impl Error {
#[allow(dead_code)]
pub(crate) fn decode<E>(err: E) -> Self
where
E: StdError + Send + Sync + 'static,
{
Error::Decode(err.into())
}
#[allow(dead_code)]
pub(crate) fn | <DB: Database, T>(expected: DB::TypeInfo) -> Self
where
T: Type<DB>,
{
let ty_name = type_name::<T>();
return decode_err!(
"mismatched types; Rust type `{}` (as SQL type {}) is not compatible with SQL type {}",
ty_name,
T::type_info(),
expected
);
}
}
impl StdError for Error {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
Error::Io(error) => Some(error),
Error::UrlParse(error) => Some(error),
Error::PoolTimedOut(Some(error)) => Some(&**error),
Error::Decode(error) => Some(&**error),
Error::Tls(error) => Some(&**error),
Error::Database(error) => Some(error.as_ref_err()),
_ => None,
}
}
}
impl Display for Error {
// IntellijRust does not understand that [non_exhaustive] applies only for downstream crates
// noinspection RsMatchCheck
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::E(error) => write!(f, "{}", error),
Error::Io(error) => write!(f, "{}", error),
Error::UrlParse(error) => write!(f, "{}", error),
Error::Decode(error) => write!(f, "{}", error),
Error::Database(error) => Display::fmt(error, f),
Error::RowNotFound => f.write_str("found no row when we expected at least one"),
Error::ColumnNotFound(ref name) => {
write!(f, "no column found with the name {:?}", name)
}
Error::ColumnIndexOutOfBounds { index, len } => write!(
f,
"column index out of bounds: there are {} columns but the index is {}",
len, index
),
Error::Protocol(ref err) => f.write_str(err),
Error::PoolTimedOut(Some(ref err)) => {
write!(f, "timed out while waiting for an open connection: {}", err)
}
Error::PoolTimedOut(None) => {
write!(f, "timed out while waiting for an open connection")
}
Error::PoolClosed => f.write_str("attempted to acquire a connection on a closed pool"),
Error::Tls(ref err) => write!(f, "error during TLS upgrade: {}", err),
}
}
}
impl From<io::Error> for Error {
#[inline]
fn from(err: io::Error) -> Self {
Error::Io(err)
}
}
impl From<io::ErrorKind> for Error {
#[inline]
fn from(err: io::ErrorKind) -> Self {
Error::Io(err.into())
}
}
impl From<url::ParseError> for Error {
#[inline]
fn from(err: url::ParseError) -> Self {
Error::UrlParse(err)
}
}
impl From<ProtocolError<'_>> for Error {
#[inline]
fn from(err: ProtocolError) -> Self {
Error::Protocol(err.args.to_string().into_boxed_str())
}
}
impl From<UnexpectedNullError> for Error {
#[inline]
fn from(err: UnexpectedNullError) -> Self {
Error::Decode(err.into())
}
}
#[cfg(feature = "tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "tls")))]
impl From<async_native_tls::Error> for Error {
#[inline]
fn from(err: async_native_tls::Error) -> Self {
Error::Tls(err.into())
}
}
impl From<TlsError<'_>> for Error {
#[inline]
fn from(err: TlsError<'_>) -> Self {
Error::Tls(err.args.to_string().into())
}
}
impl From<&str> for Error {
fn from(arg: &str) -> Self {
return Error::E(arg.to_string());
}
}
impl From<std::string::String> for Error {
fn from(arg: String) -> Self {
return Error::E(arg);
}
}
/// An error that was returned by the database.
pub trait DatabaseError: StdError + Send + Sync + 'static {
/// The primary, human-readable error message.
fn message(&self) -> &str;
/// The (SQLSTATE) code for the error.
fn code(&self) -> Option<&str> {
None
}
fn details(&self) -> Option<&str> {
None
}
fn hint(&self) -> Option<&str> {
None
}
fn table_name(&self) -> Option<&str> {
None
}
fn column_name(&self) -> Option<&str> {
None
}
fn constraint_name(&self) -> Option<&str> {
None
}
#[doc(hidden)]
fn as_ref_err(&self) -> &(dyn StdError + Send + Sync + 'static);
#[doc(hidden)]
fn as_mut_err(&mut self) -> &mut (dyn StdError + Send + Sync + 'static);
#[doc(hidden)]
fn into_box_err(self: Box<Self>) -> Box<dyn StdError + Send + Sync + 'static>;
}
impl dyn DatabaseError {
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [StdError::downcast_ref]
/// which returns `Option`. This was a deliberate design decision in favor of brevity as in
/// almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast_ref] instead.
pub fn downcast_ref<T: DatabaseError>(&self) -> &T {
self.try_downcast_ref::<T>().unwrap_or_else(|| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
})
}
/// Downcast this `&dyn DatabaseError` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `None` if the downcast fails (the types do not match)
pub fn try_downcast_ref<T: DatabaseError>(&self) -> Option<&T> {
self.as_ref_err().downcast_ref()
}
/// Only meant for internal use so no `try_` variant is currently provided
#[allow(dead_code)]
pub(crate) fn downcast_mut<T: DatabaseError>(&mut self) -> &mut T {
// tried to express this as the following:
//
// if let Some(e) = self.as_mut_err().downcast_mut() { return e; }
//
// however it didn't like using `self` again in the panic format
if self.as_ref_err().is::<T>() {
return self.as_mut_err().downcast_mut().unwrap();
}
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
self
)
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// ### Panics
/// If the type does not match; this is in contrast with [std::error::Error::downcast]
/// which returns `Result`. This was a deliberate design decision in favor of
/// brevity as in almost all cases you should know which database error type you're expecting.
///
/// In any other cases, use [Self::try_downcast] instead.
pub fn downcast<T: DatabaseError>(self: Box<Self>) -> Box<T> {
self.try_downcast().unwrap_or_else(|e| {
panic!(
"downcasting to wrong DatabaseError type; original error: {:?}",
e
)
})
}
/// Downcast this `Box<dyn DatabaseError>` to a specific database error type:
///
/// * [PgError][crate::postgres::PgError] (if the `postgres` feature is active)
/// * [MySqlError][crate::mysql::MySqlError] (if the `mysql` feature is active)
/// * [SqliteError][crate::sqlite::SqliteError] (if the `sqlite` feature is active)
///
/// In a generic context you can use the [crate::database::Database::Error] associated type.
///
/// Returns `Err(self)` if the downcast fails (the types do not match).
pub fn try_downcast<T: DatabaseError>(
self: Box<Self>,
) -> std::result::Result<Box<T>, Box<Self>> {
if self.as_ref_err().is::<T>() {
Ok(self
.into_box_err()
.downcast()
.expect("type mismatch between DatabaseError::as_ref_err() and into_box_err()"))
} else {
Err(self)
}
}
}
/// Used by the `protocol_error!()` macro for a lazily evaluated conversion to
/// `crate::Error::Protocol` so we can use the macro with `.ok_or()` without Clippy complaining.
pub(crate) struct ProtocolError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! protocol_err (
($($args:tt)*) => {
$crate::error::ProtocolError { args: format_args!($($args)*) }
}
);
pub(crate) struct TlsError<'a> {
pub args: fmt::Arguments<'a>,
}
#[allow(unused_macros)]
macro_rules! tls_err {
($($args:tt)*) => { crate::error::TlsError { args: format_args!($($args)*)} };
}
/// An unexpected `NULL` was encountered during decoding.
///
/// Returned from `Row::get` if the value from the database is `NULL`
/// and you are not decoding into an `Option`.
#[derive(Debug, Clone, Copy)]
pub struct UnexpectedNullError;
impl Display for UnexpectedNullError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("unexpected null; try decoding as an `Option`")
}
}
impl StdError for UnexpectedNullError {}
impl Clone for Error {
fn clone(&self) -> Self {
Error::from(self.to_string())
}
fn clone_from(&mut self, source: &Self) {
*self = Self::from(source.to_string());
}
}
use serde::ser::{Serialize, Serializer};
use serde::{Deserialize, Deserializer};
use serde::de::{Visitor};
// This is what #[derive(Serialize)] would generate.
impl Serialize for Error {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.to_string().as_str())
}
}
struct ErrorVisitor;
impl<'de> Visitor<'de> for ErrorVisitor {
type Value = String;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a string")
}
fn visit_string<E>(self, v: String) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v)
}
fn visit_str<E>(self, v: &str) -> std::result::Result<Self::Value, E>
where
E: std::error::Error,
{
Ok(v.to_string())
}
}
impl<'de> Deserialize<'de> for Error {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let r = deserializer.deserialize_string(ErrorVisitor)?;
return Ok(Error::from(r));
}
}
#[test]
fn test_json_error(){
let e=Error::from("fuck");
let s= serde_json::to_string(&e).unwrap();
println!("{}",s.as_str());
let new_e:Error=serde_json::from_str(s.as_str()).unwrap();
} | mismatched_types | identifier_name |
emitter.rs | extern crate xml;
use polyfill;
use std::str::FromStr;
use std::collections::BTreeMap;
/// Parse a float with appropriate panic message on failure.
macro_rules! parse_float {
($s:expr) => (f64::from_str($s).expect("Failed to parse float"))
}
pub struct Color {
r: u8, g: u8, b: u8
}
impl Color {
pub fn black() -> Color { Color { r: 0, g: 0, b: 0 } }
// pub fn white() -> Color { Color { r: u8::max_value(), g: u8::max_value(), b: u8::max_value() } }
pub fn parse(hexcode: &str) -> Color {
let mut chars = hexcode.chars();
// Skip over the leading '#'
{
let hash = chars.next().expect("Empty string passed as color hexcode");
assert_eq!(hash, '#');
}
// Grab 2 chars, parse those as hexadecimal
let mut next_component = || {
let a = chars.next().expect("Ran out of chars while parsing color hexcode");
let b = chars.next().expect("Ran out of chars while parsing color hexcode");
let ab = String::from_utf8(vec![a as u8, b as u8]).expect("What");
u8::from_str_radix(&ab, 16).expect("Invalid hex number in color string")
};
let r = next_component();
let g = next_component();
let b = next_component();
Color { r:r, g:g, b:b }
}
pub fn emit(&self) -> String {
format!(
"cocos2d::Color4F(cocos2d::Color3B({},{},{}))",
self.r, self.g, self.b
)
}
}
trait Shape {
/// Should generate code to draw the shape on a cocos2dx DrawNode
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>);
/// Should generate code for edge segments, and an encompassing shape
///
/// TODO probably also make those generated shapes have certain categories
/// and the encompassing should be a sensor.
fn emit_physics(&self, id: &str, physicsbody: &str);
}
pub struct Polygon {
verts: Vec<[f64; 2]>,
triangles: Vec<[usize; 3]>,
color: Color
}
impl Shape for Polygon {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Triangles for {}", id);
for ref t in &self.triangles {
println!(
"{}->drawTriangle(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.verts[t[0]][0], self.verts[t[0]][1],
self.verts[t[1]][0], self.verts[t[1]][1],
self.verts[t[2]][0], self.verts[t[2]][1],
color.unwrap_or(&self.color.emit())
);
}
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
if self.verts.len() <= 1 {
println!("// {} does not have enough vertices for a polygon", id);
return;
}
let mut verts = self.verts.iter();
let mut vert_a = verts.next();
let mut vert_b = verts.next();
let first_point = vert_a.unwrap();
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
while let (Some(a), Some(b)) = (vert_a, vert_b) {
emit_shape(&a, &b);
vert_a = vert_b;
vert_b = verts.next();
}
if self.verts.len() > 2 {
emit_shape(&vert_a.unwrap(), &first_point);
}
}
}
pub struct Circle {
cx: f64,
cy: f64,
r: f64,
color: Color
}
impl Shape for Circle {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Circle for {}", id);
// arguments: center, radius, angle, segments, color
println!(
"{}->drawSolidCircle(Vec2({:.10}f,{:.10}f), {:.10}f, 0.0f, 20, {});",
Emitter::varname(id, drawnode),
self.cx, self.cy, self.r,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, _id: &str, _physicsbody: &str) {
panic!("Can't do physics for circle yet (no use case)");
}
}
pub struct Rect {
x: f64,
y: f64,
w: f64,
h: f64,
color: Color
}
impl Shape for Rect {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Rect for {}", id);
// arguments: origin, destination, color
println!(
"{}->drawSolidRect(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.x, self.y,
self.x+self.w, self.y+self.h,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
// bottom-left to bottom-right
emit_shape(&[self.x, self.y], &[self.x+self.w, self.y]);
// bottom-right to top-right
emit_shape(&[self.x+self.w, self.y], &[self.x+self.w, self.y+self.h]);
// top-right to top-left
emit_shape(&[self.x+self.w, self.y+self.h], &[self.x, self.y+self.h]);
// top-left to bottom-left
emit_shape(&[self.x, self.y+self.h], &[self.x, self.y]);
}
}
/// The emitter itself. Holds onto shapes relative to their id.
pub struct Emitter {
origin: Option<[f64; 2]>,
shapes: BTreeMap<String, Box<Shape>>
}
impl Emitter {
pub fn new() -> Emitter {
Emitter {
origin: None,
shapes: BTreeMap::new()
}
}
///
/// Emit a single shape to stdout.
/// Returns true if a shape under `id` was found and emitted.
/// Returns false if there was no shape under `id`.
///
pub fn emit(&self, id: &str, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) -> bool {
match self.shapes.get(id) {
Some(shape) =>{
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
true
}
None => false
}
}
///
/// Emit all shapes to stdout.
///
pub fn emit_all(&self, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) {
for (ref id, ref shape) in &self.shapes {
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
}
}
///
/// How many shapes we've added.
///
pub fn len(&self) -> usize {
self.shapes.len()
}
///
/// Add a shape from xml attributes.
///
pub fn add_shape(&mut self, id: &str, tag: &str, attributes: &Vec<xml::attribute::OwnedAttribute>) {
let new_shape: Box<Shape> = match tag {
"path" => Box::new(self.parse_polygon(attributes)),
"circle" => Box::new(self.parse_circle(attributes)),
"rect" => Box::new(self.parse_rect(attributes)),
_ => return
};
self.shapes.insert(id.to_owned(), new_shape);
}
///
/// Parse a polygon with vertex positions based on `self.origin`.
///
pub fn parse_polygon(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Polygon {
let ref d = attributes.iter().find(|ref a| a.name.local_name == "d")
.expect("Path lacking 'd' attribute")
.value;
// Fill a vec with all verts
let mut current_pos = [0.0, 0.0];
let mut verts = Vec::with_capacity(d.len() / 5);
{
let mut n = 0;
let mut exprs = d.split(' ');
while let Some(expr) = exprs.next() {
if expr == "m" {
if n == 0 {
// Starting point is origin
let expr = exprs.next().unwrap();
let mut xy = expr.split(',');
let sx = xy.next().expect("Invalid pair");
let sy = xy.next().expect("Invalid pair");
self.assign_position_from_origin(&mut current_pos, sx, sy);
verts.push(current_pos);
continue;
}
else { panic!("'m' showing up more than once???") }
}
else if expr == "z" {
break
}
else if !expr.contains(',') {
panic!("Unsupported expression: {}", expr);
}
let mut xy = expr.split(',');
let x = xy.next().expect("Invalid pair");
let y = xy.next().expect("Invalid pair");
current_pos[0] += parse_float!(x);
current_pos[1] -= parse_float!(y);
verts.push(current_pos);
n = n + 1;
}
}
let ref style = attributes.iter().find(|ref a| a.name.local_name == "style")
.expect("Path lacking 'style' attribute")
.value;
Polygon {
triangles: polyfill::triangle_indices(&verts, polyfill::TANGENTIAL),
verts: verts,
color: Emitter::parse_color_from_style(style).unwrap_or(Color::black())
}
}
///
/// Parse a circle with center point based on `self.origin`.
///
pub fn parse_circle(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Circle {
// cx cy r color
let mut params = (None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"cx" => params.0 = Some(attr.value.clone()),
"cy" => params.1 = Some(attr.value.clone()),
"r" => params.2 = Some(attr.value.clone()),
"style" => params.3 = Some(attr.value.clone()),
_ => {}
}
if let (Some(cx), Some(cy), Some(r), Some(style)) = params {
let mut cxy = [0.0, 0.0];
self.assign_position_from_origin(&mut cxy, &cx, &cy);
return Circle {
cx: cxy[0], cy: cxy[1], r: parse_float!(&r),
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
};
}
}
panic!("Invalid circle");
}
///
/// Parse a rect with origin at the bottom right (??)
///
pub fn | (&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Rect {
// x, y, w, h, style
let mut params = (None, None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"x" => params.0 = Some(attr.value.clone()),
"y" => params.1 = Some(attr.value.clone()),
"width" => params.2 = Some(attr.value.clone()),
"height" => params.3 = Some(attr.value.clone()),
"style" => params.4 = Some(attr.value.clone()),
_ => {}
}
if let (Some(x), Some(y), Some(w), Some(h), Some(style)) = params {
let mut xy = [0.0, 0.0];
self.assign_position_from_origin(&mut xy, &x, &y);
let fw = parse_float!(&w);
let fh = parse_float!(&h);
return Rect {
x: xy[0], y: xy[1] - fh,
w: fw, h: fh,
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
}
}
}
panic!("Invalid rect");
}
// =========== Private helper functions ===========
fn parse_color_from_style(style: &str) -> Option<Color> {
let exprs = style.split(';');
for ref expr in exprs {
let mut keyval = expr.split(':');
let key = keyval.next().expect("Invalid style entry");
if key == "fill" {
let val = keyval.next().expect("Invalid style entry");
return Some(Color::parse(val));
}
}
None
}
fn varname(id: &str, drawnode: &str) -> String {
if drawnode.contains("{id}") {
drawnode.to_owned().replace("{id}", id)
}
else {
drawnode.to_owned()
}
}
fn assign_position_from_origin(&mut self, current_pos: &mut [f64; 2], sx: &str, sy: &str) {
let x = parse_float!(sx);
let y = -parse_float!(sy);
// Set this point to origin if we have none already,
// otherwise offset by this starting point.
match self.origin {
Some(origin) => *current_pos = [x - origin[0], y - origin[1]],
None => self.origin = Some([x, y])
}
}
}
| parse_rect | identifier_name |
emitter.rs | extern crate xml;
use polyfill;
use std::str::FromStr;
use std::collections::BTreeMap;
/// Parse a float with appropriate panic message on failure.
macro_rules! parse_float {
($s:expr) => (f64::from_str($s).expect("Failed to parse float"))
}
pub struct Color {
r: u8, g: u8, b: u8
}
impl Color {
pub fn black() -> Color { Color { r: 0, g: 0, b: 0 } }
// pub fn white() -> Color { Color { r: u8::max_value(), g: u8::max_value(), b: u8::max_value() } }
pub fn parse(hexcode: &str) -> Color {
let mut chars = hexcode.chars();
// Skip over the leading '#'
{
let hash = chars.next().expect("Empty string passed as color hexcode");
assert_eq!(hash, '#');
}
// Grab 2 chars, parse those as hexadecimal
let mut next_component = || {
let a = chars.next().expect("Ran out of chars while parsing color hexcode");
let b = chars.next().expect("Ran out of chars while parsing color hexcode");
let ab = String::from_utf8(vec![a as u8, b as u8]).expect("What");
u8::from_str_radix(&ab, 16).expect("Invalid hex number in color string")
};
let r = next_component();
let g = next_component();
let b = next_component();
Color { r:r, g:g, b:b }
}
pub fn emit(&self) -> String {
format!(
"cocos2d::Color4F(cocos2d::Color3B({},{},{}))",
self.r, self.g, self.b
)
}
}
trait Shape {
/// Should generate code to draw the shape on a cocos2dx DrawNode
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>);
/// Should generate code for edge segments, and an encompassing shape
///
/// TODO probably also make those generated shapes have certain categories
/// and the encompassing should be a sensor.
fn emit_physics(&self, id: &str, physicsbody: &str);
}
pub struct Polygon {
verts: Vec<[f64; 2]>,
triangles: Vec<[usize; 3]>,
color: Color
}
impl Shape for Polygon {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Triangles for {}", id);
for ref t in &self.triangles {
println!(
"{}->drawTriangle(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.verts[t[0]][0], self.verts[t[0]][1],
self.verts[t[1]][0], self.verts[t[1]][1],
self.verts[t[2]][0], self.verts[t[2]][1],
color.unwrap_or(&self.color.emit())
);
}
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
if self.verts.len() <= 1 {
println!("// {} does not have enough vertices for a polygon", id);
return;
}
let mut verts = self.verts.iter();
let mut vert_a = verts.next();
let mut vert_b = verts.next();
let first_point = vert_a.unwrap();
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
while let (Some(a), Some(b)) = (vert_a, vert_b) {
emit_shape(&a, &b);
vert_a = vert_b;
vert_b = verts.next();
}
if self.verts.len() > 2 {
emit_shape(&vert_a.unwrap(), &first_point);
}
}
}
pub struct Circle {
cx: f64,
cy: f64,
r: f64,
color: Color
}
impl Shape for Circle {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Circle for {}", id);
// arguments: center, radius, angle, segments, color
println!(
"{}->drawSolidCircle(Vec2({:.10}f,{:.10}f), {:.10}f, 0.0f, 20, {});",
Emitter::varname(id, drawnode),
self.cx, self.cy, self.r,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, _id: &str, _physicsbody: &str) {
panic!("Can't do physics for circle yet (no use case)");
}
}
pub struct Rect {
x: f64,
y: f64,
w: f64,
h: f64,
color: Color
}
impl Shape for Rect {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Rect for {}", id);
// arguments: origin, destination, color
println!(
"{}->drawSolidRect(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.x, self.y,
self.x+self.w, self.y+self.h,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
// bottom-left to bottom-right
emit_shape(&[self.x, self.y], &[self.x+self.w, self.y]);
// bottom-right to top-right
emit_shape(&[self.x+self.w, self.y], &[self.x+self.w, self.y+self.h]);
// top-right to top-left
emit_shape(&[self.x+self.w, self.y+self.h], &[self.x, self.y+self.h]);
// top-left to bottom-left
emit_shape(&[self.x, self.y+self.h], &[self.x, self.y]);
}
}
/// The emitter itself. Holds onto shapes relative to their id.
pub struct Emitter {
origin: Option<[f64; 2]>,
shapes: BTreeMap<String, Box<Shape>>
}
impl Emitter {
pub fn new() -> Emitter {
Emitter {
origin: None,
shapes: BTreeMap::new()
}
}
///
/// Emit a single shape to stdout.
/// Returns true if a shape under `id` was found and emitted.
/// Returns false if there was no shape under `id`.
///
pub fn emit(&self, id: &str, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) -> bool {
match self.shapes.get(id) {
Some(shape) =>{
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
true
}
None => false
}
}
///
/// Emit all shapes to stdout.
///
pub fn emit_all(&self, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) {
for (ref id, ref shape) in &self.shapes {
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
}
}
///
/// How many shapes we've added.
///
pub fn len(&self) -> usize {
self.shapes.len()
}
///
/// Add a shape from xml attributes.
///
pub fn add_shape(&mut self, id: &str, tag: &str, attributes: &Vec<xml::attribute::OwnedAttribute>) {
let new_shape: Box<Shape> = match tag {
"path" => Box::new(self.parse_polygon(attributes)),
"circle" => Box::new(self.parse_circle(attributes)),
"rect" => Box::new(self.parse_rect(attributes)),
_ => return
};
self.shapes.insert(id.to_owned(), new_shape);
}
///
/// Parse a polygon with vertex positions based on `self.origin`.
///
pub fn parse_polygon(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Polygon {
let ref d = attributes.iter().find(|ref a| a.name.local_name == "d")
.expect("Path lacking 'd' attribute")
.value;
// Fill a vec with all verts
let mut current_pos = [0.0, 0.0];
let mut verts = Vec::with_capacity(d.len() / 5);
{
let mut n = 0;
let mut exprs = d.split(' ');
while let Some(expr) = exprs.next() {
if expr == "m" {
if n == 0 {
// Starting point is origin
let expr = exprs.next().unwrap();
let mut xy = expr.split(',');
let sx = xy.next().expect("Invalid pair");
let sy = xy.next().expect("Invalid pair");
self.assign_position_from_origin(&mut current_pos, sx, sy);
verts.push(current_pos);
continue;
}
else { panic!("'m' showing up more than once???") }
}
else if expr == "z" {
break
}
else if !expr.contains(',') {
panic!("Unsupported expression: {}", expr);
}
let mut xy = expr.split(',');
let x = xy.next().expect("Invalid pair");
let y = xy.next().expect("Invalid pair");
current_pos[0] += parse_float!(x);
current_pos[1] -= parse_float!(y);
verts.push(current_pos);
n = n + 1;
}
}
let ref style = attributes.iter().find(|ref a| a.name.local_name == "style")
.expect("Path lacking 'style' attribute")
.value;
Polygon {
triangles: polyfill::triangle_indices(&verts, polyfill::TANGENTIAL),
verts: verts,
color: Emitter::parse_color_from_style(style).unwrap_or(Color::black())
}
}
///
/// Parse a circle with center point based on `self.origin`.
///
pub fn parse_circle(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Circle {
// cx cy r color
let mut params = (None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"cx" => params.0 = Some(attr.value.clone()),
"cy" => params.1 = Some(attr.value.clone()),
"r" => params.2 = Some(attr.value.clone()),
"style" => params.3 = Some(attr.value.clone()),
_ => {}
}
if let (Some(cx), Some(cy), Some(r), Some(style)) = params {
let mut cxy = [0.0, 0.0];
self.assign_position_from_origin(&mut cxy, &cx, &cy);
return Circle {
cx: cxy[0], cy: cxy[1], r: parse_float!(&r),
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
};
}
}
panic!("Invalid circle");
}
/// |
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"x" => params.0 = Some(attr.value.clone()),
"y" => params.1 = Some(attr.value.clone()),
"width" => params.2 = Some(attr.value.clone()),
"height" => params.3 = Some(attr.value.clone()),
"style" => params.4 = Some(attr.value.clone()),
_ => {}
}
if let (Some(x), Some(y), Some(w), Some(h), Some(style)) = params {
let mut xy = [0.0, 0.0];
self.assign_position_from_origin(&mut xy, &x, &y);
let fw = parse_float!(&w);
let fh = parse_float!(&h);
return Rect {
x: xy[0], y: xy[1] - fh,
w: fw, h: fh,
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
}
}
}
panic!("Invalid rect");
}
// =========== Private helper functions ===========
fn parse_color_from_style(style: &str) -> Option<Color> {
let exprs = style.split(';');
for ref expr in exprs {
let mut keyval = expr.split(':');
let key = keyval.next().expect("Invalid style entry");
if key == "fill" {
let val = keyval.next().expect("Invalid style entry");
return Some(Color::parse(val));
}
}
None
}
fn varname(id: &str, drawnode: &str) -> String {
if drawnode.contains("{id}") {
drawnode.to_owned().replace("{id}", id)
}
else {
drawnode.to_owned()
}
}
fn assign_position_from_origin(&mut self, current_pos: &mut [f64; 2], sx: &str, sy: &str) {
let x = parse_float!(sx);
let y = -parse_float!(sy);
// Set this point to origin if we have none already,
// otherwise offset by this starting point.
match self.origin {
Some(origin) => *current_pos = [x - origin[0], y - origin[1]],
None => self.origin = Some([x, y])
}
}
} | /// Parse a rect with origin at the bottom right (??)
///
pub fn parse_rect(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Rect {
// x, y, w, h, style
let mut params = (None, None, None, None, None); | random_line_split |
emitter.rs | extern crate xml;
use polyfill;
use std::str::FromStr;
use std::collections::BTreeMap;
/// Parse a float with appropriate panic message on failure.
macro_rules! parse_float {
($s:expr) => (f64::from_str($s).expect("Failed to parse float"))
}
pub struct Color {
r: u8, g: u8, b: u8
}
impl Color {
pub fn black() -> Color { Color { r: 0, g: 0, b: 0 } }
// pub fn white() -> Color { Color { r: u8::max_value(), g: u8::max_value(), b: u8::max_value() } }
pub fn parse(hexcode: &str) -> Color {
let mut chars = hexcode.chars();
// Skip over the leading '#'
{
let hash = chars.next().expect("Empty string passed as color hexcode");
assert_eq!(hash, '#');
}
// Grab 2 chars, parse those as hexadecimal
let mut next_component = || {
let a = chars.next().expect("Ran out of chars while parsing color hexcode");
let b = chars.next().expect("Ran out of chars while parsing color hexcode");
let ab = String::from_utf8(vec![a as u8, b as u8]).expect("What");
u8::from_str_radix(&ab, 16).expect("Invalid hex number in color string")
};
let r = next_component();
let g = next_component();
let b = next_component();
Color { r:r, g:g, b:b }
}
pub fn emit(&self) -> String {
format!(
"cocos2d::Color4F(cocos2d::Color3B({},{},{}))",
self.r, self.g, self.b
)
}
}
trait Shape {
/// Should generate code to draw the shape on a cocos2dx DrawNode
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>);
/// Should generate code for edge segments, and an encompassing shape
///
/// TODO probably also make those generated shapes have certain categories
/// and the encompassing should be a sensor.
fn emit_physics(&self, id: &str, physicsbody: &str);
}
pub struct Polygon {
verts: Vec<[f64; 2]>,
triangles: Vec<[usize; 3]>,
color: Color
}
impl Shape for Polygon {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Triangles for {}", id);
for ref t in &self.triangles {
println!(
"{}->drawTriangle(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.verts[t[0]][0], self.verts[t[0]][1],
self.verts[t[1]][0], self.verts[t[1]][1],
self.verts[t[2]][0], self.verts[t[2]][1],
color.unwrap_or(&self.color.emit())
);
}
}
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
if self.verts.len() <= 1 {
println!("// {} does not have enough vertices for a polygon", id);
return;
}
let mut verts = self.verts.iter();
let mut vert_a = verts.next();
let mut vert_b = verts.next();
let first_point = vert_a.unwrap();
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
while let (Some(a), Some(b)) = (vert_a, vert_b) {
emit_shape(&a, &b);
vert_a = vert_b;
vert_b = verts.next();
}
if self.verts.len() > 2 {
emit_shape(&vert_a.unwrap(), &first_point);
}
}
}
pub struct Circle {
cx: f64,
cy: f64,
r: f64,
color: Color
}
impl Shape for Circle {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) {
println!("// Circle for {}", id);
// arguments: center, radius, angle, segments, color
println!(
"{}->drawSolidCircle(Vec2({:.10}f,{:.10}f), {:.10}f, 0.0f, 20, {});",
Emitter::varname(id, drawnode),
self.cx, self.cy, self.r,
color.unwrap_or(&self.color.emit())
);
}
fn emit_physics(&self, _id: &str, _physicsbody: &str) {
panic!("Can't do physics for circle yet (no use case)");
}
}
pub struct Rect {
x: f64,
y: f64,
w: f64,
h: f64,
color: Color
}
impl Shape for Rect {
fn emit_graphics(&self, id: &str, drawnode: &str, color: Option<&str>) |
fn emit_physics(&self, id: &str, physicsbody: &str) {
println!("// Physics for {}", id);
let emit_shape = |a: &[f64; 2], b: &[f64; 2]|
println!(
"{}->addShape(PhysicsShapeEdgeSegment::create(Vec2({:.10}f, {:.10}f), Vec2({:.10}f, {:.10}f)));",
Emitter::varname(id, physicsbody),
a[0], a[1],
b[0], b[1]
);
// bottom-left to bottom-right
emit_shape(&[self.x, self.y], &[self.x+self.w, self.y]);
// bottom-right to top-right
emit_shape(&[self.x+self.w, self.y], &[self.x+self.w, self.y+self.h]);
// top-right to top-left
emit_shape(&[self.x+self.w, self.y+self.h], &[self.x, self.y+self.h]);
// top-left to bottom-left
emit_shape(&[self.x, self.y+self.h], &[self.x, self.y]);
}
}
/// The emitter itself. Holds onto shapes relative to their id.
pub struct Emitter {
origin: Option<[f64; 2]>,
shapes: BTreeMap<String, Box<Shape>>
}
impl Emitter {
pub fn new() -> Emitter {
Emitter {
origin: None,
shapes: BTreeMap::new()
}
}
///
/// Emit a single shape to stdout.
/// Returns true if a shape under `id` was found and emitted.
/// Returns false if there was no shape under `id`.
///
pub fn emit(&self, id: &str, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) -> bool {
match self.shapes.get(id) {
Some(shape) =>{
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
true
}
None => false
}
}
///
/// Emit all shapes to stdout.
///
pub fn emit_all(&self, drawnode: Option<&str>, physicsbody: Option<&str>, color: Option<&str>) {
for (ref id, ref shape) in &self.shapes {
if let Some(dn) = drawnode {
shape.emit_graphics(id, dn, color);
}
if let Some(pb) = physicsbody {
shape.emit_physics(id, pb);
}
}
}
///
/// How many shapes we've added.
///
pub fn len(&self) -> usize {
self.shapes.len()
}
///
/// Add a shape from xml attributes.
///
pub fn add_shape(&mut self, id: &str, tag: &str, attributes: &Vec<xml::attribute::OwnedAttribute>) {
let new_shape: Box<Shape> = match tag {
"path" => Box::new(self.parse_polygon(attributes)),
"circle" => Box::new(self.parse_circle(attributes)),
"rect" => Box::new(self.parse_rect(attributes)),
_ => return
};
self.shapes.insert(id.to_owned(), new_shape);
}
///
/// Parse a polygon with vertex positions based on `self.origin`.
///
pub fn parse_polygon(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Polygon {
let ref d = attributes.iter().find(|ref a| a.name.local_name == "d")
.expect("Path lacking 'd' attribute")
.value;
// Fill a vec with all verts
let mut current_pos = [0.0, 0.0];
let mut verts = Vec::with_capacity(d.len() / 5);
{
let mut n = 0;
let mut exprs = d.split(' ');
while let Some(expr) = exprs.next() {
if expr == "m" {
if n == 0 {
// Starting point is origin
let expr = exprs.next().unwrap();
let mut xy = expr.split(',');
let sx = xy.next().expect("Invalid pair");
let sy = xy.next().expect("Invalid pair");
self.assign_position_from_origin(&mut current_pos, sx, sy);
verts.push(current_pos);
continue;
}
else { panic!("'m' showing up more than once???") }
}
else if expr == "z" {
break
}
else if !expr.contains(',') {
panic!("Unsupported expression: {}", expr);
}
let mut xy = expr.split(',');
let x = xy.next().expect("Invalid pair");
let y = xy.next().expect("Invalid pair");
current_pos[0] += parse_float!(x);
current_pos[1] -= parse_float!(y);
verts.push(current_pos);
n = n + 1;
}
}
let ref style = attributes.iter().find(|ref a| a.name.local_name == "style")
.expect("Path lacking 'style' attribute")
.value;
Polygon {
triangles: polyfill::triangle_indices(&verts, polyfill::TANGENTIAL),
verts: verts,
color: Emitter::parse_color_from_style(style).unwrap_or(Color::black())
}
}
///
/// Parse a circle with center point based on `self.origin`.
///
pub fn parse_circle(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Circle {
// cx cy r color
let mut params = (None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"cx" => params.0 = Some(attr.value.clone()),
"cy" => params.1 = Some(attr.value.clone()),
"r" => params.2 = Some(attr.value.clone()),
"style" => params.3 = Some(attr.value.clone()),
_ => {}
}
if let (Some(cx), Some(cy), Some(r), Some(style)) = params {
let mut cxy = [0.0, 0.0];
self.assign_position_from_origin(&mut cxy, &cx, &cy);
return Circle {
cx: cxy[0], cy: cxy[1], r: parse_float!(&r),
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
};
}
}
panic!("Invalid circle");
}
///
/// Parse a rect with origin at the bottom right (??)
///
pub fn parse_rect(&mut self, attributes: &Vec<xml::attribute::OwnedAttribute>) -> Rect {
// x, y, w, h, style
let mut params = (None, None, None, None, None);
for ref attr in attributes {
let name: &str = &attr.name.local_name;
match name {
"x" => params.0 = Some(attr.value.clone()),
"y" => params.1 = Some(attr.value.clone()),
"width" => params.2 = Some(attr.value.clone()),
"height" => params.3 = Some(attr.value.clone()),
"style" => params.4 = Some(attr.value.clone()),
_ => {}
}
if let (Some(x), Some(y), Some(w), Some(h), Some(style)) = params {
let mut xy = [0.0, 0.0];
self.assign_position_from_origin(&mut xy, &x, &y);
let fw = parse_float!(&w);
let fh = parse_float!(&h);
return Rect {
x: xy[0], y: xy[1] - fh,
w: fw, h: fh,
color: Emitter::parse_color_from_style(&style).unwrap_or(Color::black())
}
}
}
panic!("Invalid rect");
}
// =========== Private helper functions ===========
fn parse_color_from_style(style: &str) -> Option<Color> {
let exprs = style.split(';');
for ref expr in exprs {
let mut keyval = expr.split(':');
let key = keyval.next().expect("Invalid style entry");
if key == "fill" {
let val = keyval.next().expect("Invalid style entry");
return Some(Color::parse(val));
}
}
None
}
fn varname(id: &str, drawnode: &str) -> String {
if drawnode.contains("{id}") {
drawnode.to_owned().replace("{id}", id)
}
else {
drawnode.to_owned()
}
}
fn assign_position_from_origin(&mut self, current_pos: &mut [f64; 2], sx: &str, sy: &str) {
let x = parse_float!(sx);
let y = -parse_float!(sy);
// Set this point to origin if we have none already,
// otherwise offset by this starting point.
match self.origin {
Some(origin) => *current_pos = [x - origin[0], y - origin[1]],
None => self.origin = Some([x, y])
}
}
}
| {
println!("// Rect for {}", id);
// arguments: origin, destination, color
println!(
"{}->drawSolidRect(Vec2({:.10}f,{:.10}f), Vec2({:.10}f,{:.10}f), {});",
Emitter::varname(id, drawnode),
self.x, self.y,
self.x+self.w, self.y+self.h,
color.unwrap_or(&self.color.emit())
);
} | identifier_body |
suicidegirls.py | import time, subprocess, os.path, re, multiprocessing, threading
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
class SuicideGirls:
driver = None
dispatcher_thread = None
argument_lists = []
stop_dispatching = False
def __init__(self, exec_dir, username, password, dir, process_limit, urls, type, time_period):
SuicideGirls.dispatcher_thread = threading.Thread(target=self.__dispatch)
self.username = username
self.password = password
self.root_url = "https://www.suicidegirls.com/"
self.dir = dir
self.exec_dir = exec_dir
self.process_limit = process_limit
self.urls = []
self.__type = type
self.process_limit = process_limit
self.time_period = time_period
self.girls_completed = 0
self.sets_completed = 0
if type in ["girl", "hopeful"]:
for url in urls:
self.urls.append(self.__build_url(url))
else:
self.urls = urls
SuicideGirls.dispatcher_thread.start()
def __dispatch(self):
print("Beginning dispatcher thread...")
while not SuicideGirls.stop_dispatching or len(SuicideGirls.argument_lists) != 0:
if len(SuicideGirls.argument_lists) != 0:
print("Argument list found! Dispatching...")
argument_list = SuicideGirls.argument_lists.pop(0)
pool = multiprocessing.Pool(self.process_limit)
pool.map(self.download_image, argument_list)
# Girls: Riae (36), Fishball (28), Vandoll (7)
# Total photosets: 71
# Processes: 8
# map: 00:24:37
# map_async: 00:12:33
print("Exiting dispatcher thread...")
def startup(self):
SuicideGirls.driver = webdriver.Chrome(executable_path="dependencies/chromedriver.exe")
SuicideGirls.driver.maximize_window()
SuicideGirls.driver.implicitly_wait(5)
SuicideGirls.driver.get(self.root_url)
self.__login()
def shutdown(self):
SuicideGirls.driver.quit()
def __login(self):
login_button_xpath = "//a[@class='login button' or @class='button login']"
login_form_submit_xpath = "//button[@type='submit' and text()='Login']"
username_box_xpath = "//input[@name='username']"
password_box_xpath = "//input[@name='password']"
SuicideGirls.driver.find_element_by_xpath(login_button_xpath).click()
SuicideGirls.driver.find_element_by_xpath(username_box_xpath).send_keys(self.username)
SuicideGirls.driver.find_element_by_xpath(password_box_xpath).send_keys(self.password)
SuicideGirls.driver.find_element_by_xpath(login_form_submit_xpath).click()
time.sleep(5)
flag = False;
while True:
try:
image_select = SuicideGirls.driver.find_element_by_xpath("//iframe[@title='recaptcha challenge']")
if not flag:
print("Found a captcha!")
flag = True
except:
break;
print("No captcha found!")
def rip(self):
for url in self.urls:
SuicideGirls.driver.get(url)
if self.__type == "girl":
print("Single girl")
self.__rip_girl()
elif self.__type == "girls":
print("All Suicide Girls")
self.__rip_all_girls()
elif self.__type == "hopefuls":
print("All hopefuls")
self.__rip_all_hopefuls()
elif self.__type == "sotds":
print("All sets of the day")
self.__rip_all_sets_of_the_day()
elif self.__type == "set":
print("Single set")
self.__rip_set()
elif self.__type == "all":
print("All!")
self.__rip_all_photos()
SuicideGirls.stop_dispatching = True
SuicideGirls.dispatcher_thread.join()
print("Rip completed.")
print("Total girls/hopefuls ripped: " + str(self.girls_completed))
print("Total sets ripped: " + str(self.sets_completed))
def __rip_all_photos(self):
SuicideGirls.driver.get(self.urls[0])
self.__type = "hopefuls"
self.__rip_all_hopefuls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "girls"
self.__rip_all_girls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "sotds"
self.__rip_all_sets_of_the_day()
def __rip_all_girls(self):
suicide_girls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'SuicideGirls']"
self.__rip_all(suicide_girls_xpath)
def __rip_all_hopefuls(self):
hopefuls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Hopefuls']"
self.__rip_all(hopefuls_xpath)
def __rip_all_sets_of_the_day(self):
sotds_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Sets Of The Day']"
self.__rip_all(sotds_xpath)
def __rip_all(self, type_xpath):
time_period_xpath = "//li[@class='dropdown'][3]//ul/li/a[text() = '" + self.time_period + "']"
girl_name_xpath = "//article/header//h2/a"
load_more_xpath = "//a[@id='load-more']"
choice = SuicideGirls.driver.find_element_by_xpath(type_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
choice = SuicideGirls.driver.find_element_by_xpath(time_period_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
girls = []
iteration = 0
while True:
iteration += 1
names = SuicideGirls.driver.find_elements_by_xpath(girl_name_xpath)
for name in names:
girls.append(name.text)
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<24;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
girls = list(set(girls))
for girl in sorted(girls):
url = self.__build_url(girl)
SuicideGirls.driver.get(url)
self.__rip_girl()
def __rip_girl(self):
load_more_xpath = "//a[@id='load-more']"
photos_xpath = "//div[@id='content-container']//a[text()='Photos']"
photosets_xpath = "//div[@id='content-container']//a[text()='Photosets']"
set_title_xpath = "//article/header//h2/a"
url = SuicideGirls.driver.find_element_by_xpath(photos_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
url = SuicideGirls.driver.find_element_by_xpath(photosets_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
set_links = []
iteration = 0
while True:
iteration += 1
titles = SuicideGirls.driver.find_elements_by_xpath(set_title_xpath)
for title in titles:
set_links.append(title.get_attribute("href"))
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<9;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
set_links = list(set(set_links))
for link in set_links:
SuicideGirls.driver.get(link)
self.__rip_set()
self.girls_completed += 1
def __rip_set(self):
girl_xpath = "//h1/a"
title_xpath = "//header[@class='header']/div[@class='top-bar']/h2[@class='title']"
full_image_button_xpath = "//a[@id='button-view_full_size']"
full_image_url_xpath = "//div[@data-image_url]"
girl = SuicideGirls.driver.find_element_by_xpath(girl_xpath).text
title = SuicideGirls.driver.find_element_by_xpath(title_xpath).text
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
check = False
if os.path.exists(dir_name):
check = True
SuicideGirls.driver.find_element_by_xpath(full_image_button_xpath).click()
time.sleep(5)
images = SuicideGirls.driver.find_elements_by_xpath(full_image_url_xpath)
image_urls = []
for i in range(0, len(images)):
url = images[i].get_attribute("data-image_url")
ext = url[url.rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if not os.path.exists(os.path.join(dir_name, file_name)):
image_urls.append(url)
else:
prin |
self.__download_and_save_set(image_urls, girl, title)
self.sets_completed += 1
def __download_and_save_set(self, urls, girl, title):
aria_path = os.path.join(self.exec_dir, "dependencies", "aria2", "aria2c.exe")
error_strings = []
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
with multiprocessing.Pool(8) as pool:
args = []
for i in range (0, len(urls)):
command = [aria_path, "-d", dir_name, "-o"]
ext = urls[i][urls[i].rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if os.path.exists(dir_name + file_name):
continue
command.append(file_name)
command.append(urls[i])
args.append((error_strings, command, str(i + 1), urls[i], girl, title))
SuicideGirls.argument_lists.append(args)
if len(error_strings) > 0:
f = open(os.path.join(dir_name, "errors.txt", "w"))
f.write("\n".join(sorted(error_strings)))
f.close()
def __build_url(self, name):
if self.__type in ["girl", "girls", "sotds"]:
return "https://www.suicidegirls.com/girls/" + name
elif self.__type in ["hopeful", "hopefuls"]:
return "https://www.suicidegirls.com/members/" + name
def download_image(self, args):
process = subprocess.run(args[1])
if process.returncode != 0:
args[0].append("\tImage " + args[2] + " failed; URL: " + args[3])
print(args[4].title() + "/" + args[5].title() + " #" + args[2] + " complete")
def start_processes(async_result):
async_result.get()
def print_warning():
print("This file is meant to be imported by other Python files, not run directly. Exiting now.")
if __name__ == "__main__":
print_warning()
| t(girl.title() + "/" + title.title() + " Img" + str(i).zfill(3) + " already exists, skipping...")
| conditional_block |
suicidegirls.py | import time, subprocess, os.path, re, multiprocessing, threading
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
class SuicideGirls:
driver = None
dispatcher_thread = None
argument_lists = []
stop_dispatching = False
def __init__(self, exec_dir, username, password, dir, process_limit, urls, type, time_period):
SuicideGirls.dispatcher_thread = threading.Thread(target=self.__dispatch)
self.username = username
self.password = password
self.root_url = "https://www.suicidegirls.com/"
self.dir = dir
self.exec_dir = exec_dir
self.process_limit = process_limit
self.urls = []
self.__type = type
self.process_limit = process_limit
self.time_period = time_period
self.girls_completed = 0
self.sets_completed = 0
if type in ["girl", "hopeful"]:
for url in urls:
self.urls.append(self.__build_url(url))
else:
self.urls = urls
SuicideGirls.dispatcher_thread.start()
def __dispatch(self):
print("Beginning dispatcher thread...")
while not SuicideGirls.stop_dispatching or len(SuicideGirls.argument_lists) != 0:
if len(SuicideGirls.argument_lists) != 0:
print("Argument list found! Dispatching...")
argument_list = SuicideGirls.argument_lists.pop(0)
pool = multiprocessing.Pool(self.process_limit)
pool.map(self.download_image, argument_list)
# Girls: Riae (36), Fishball (28), Vandoll (7)
# Total photosets: 71
# Processes: 8
# map: 00:24:37
# map_async: 00:12:33
print("Exiting dispatcher thread...")
def startup(self):
SuicideGirls.driver = webdriver.Chrome(executable_path="dependencies/chromedriver.exe")
SuicideGirls.driver.maximize_window()
SuicideGirls.driver.implicitly_wait(5)
SuicideGirls.driver.get(self.root_url)
self.__login()
def shutdown(self):
SuicideGirls.driver.quit()
def __login(self):
login_button_xpath = "//a[@class='login button' or @class='button login']"
login_form_submit_xpath = "//button[@type='submit' and text()='Login']"
username_box_xpath = "//input[@name='username']"
password_box_xpath = "//input[@name='password']"
SuicideGirls.driver.find_element_by_xpath(login_button_xpath).click()
SuicideGirls.driver.find_element_by_xpath(username_box_xpath).send_keys(self.username)
SuicideGirls.driver.find_element_by_xpath(password_box_xpath).send_keys(self.password)
SuicideGirls.driver.find_element_by_xpath(login_form_submit_xpath).click()
time.sleep(5)
flag = False;
while True:
try:
image_select = SuicideGirls.driver.find_element_by_xpath("//iframe[@title='recaptcha challenge']")
if not flag:
print("Found a captcha!")
flag = True
except:
break;
print("No captcha found!")
def rip(self):
for url in self.urls:
SuicideGirls.driver.get(url)
if self.__type == "girl":
print("Single girl")
self.__rip_girl()
elif self.__type == "girls":
print("All Suicide Girls")
self.__rip_all_girls()
elif self.__type == "hopefuls":
print("All hopefuls")
self.__rip_all_hopefuls()
elif self.__type == "sotds":
print("All sets of the day")
self.__rip_all_sets_of_the_day()
elif self.__type == "set":
print("Single set")
self.__rip_set()
elif self.__type == "all":
print("All!")
self.__rip_all_photos()
SuicideGirls.stop_dispatching = True
SuicideGirls.dispatcher_thread.join()
print("Rip completed.")
print("Total girls/hopefuls ripped: " + str(self.girls_completed))
print("Total sets ripped: " + str(self.sets_completed))
def __rip_all_photos(self):
SuicideGirls.driver.get(self.urls[0])
self.__type = "hopefuls"
self.__rip_all_hopefuls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "girls"
self.__rip_all_girls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "sotds"
self.__rip_all_sets_of_the_day()
def __rip_all_girls(self):
suicide_girls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'SuicideGirls']"
self.__rip_all(suicide_girls_xpath)
def __rip_all_hopefuls(self):
hopefuls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Hopefuls']"
self.__rip_all(hopefuls_xpath)
def __rip_all_sets_of_the_day(self):
sotds_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Sets Of The Day']"
self.__rip_all(sotds_xpath)
def __rip_all(self, type_xpath):
time_period_xpath = "//li[@class='dropdown'][3]//ul/li/a[text() = '" + self.time_period + "']"
girl_name_xpath = "//article/header//h2/a"
load_more_xpath = "//a[@id='load-more']"
choice = SuicideGirls.driver.find_element_by_xpath(type_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
choice = SuicideGirls.driver.find_element_by_xpath(time_period_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
girls = []
iteration = 0
while True:
iteration += 1
names = SuicideGirls.driver.find_elements_by_xpath(girl_name_xpath)
for name in names:
girls.append(name.text)
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<24;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
girls = list(set(girls))
for girl in sorted(girls):
url = self.__build_url(girl)
SuicideGirls.driver.get(url)
self.__rip_girl()
def __rip_girl(self):
load_more_xpath = "//a[@id='load-more']"
photos_xpath = "//div[@id='content-container']//a[text()='Photos']"
photosets_xpath = "//div[@id='content-container']//a[text()='Photosets']"
set_title_xpath = "//article/header//h2/a"
url = SuicideGirls.driver.find_element_by_xpath(photos_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
url = SuicideGirls.driver.find_element_by_xpath(photosets_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
set_links = []
iteration = 0
while True:
iteration += 1
titles = SuicideGirls.driver.find_elements_by_xpath(set_title_xpath)
for title in titles:
set_links.append(title.get_attribute("href"))
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<9;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
set_links = list(set(set_links))
for link in set_links:
SuicideGirls.driver.get(link)
self.__rip_set()
self.girls_completed += 1
def __rip_set(self):
girl_xpath = "//h1/a"
title_xpath = "//header[@class='header']/div[@class='top-bar']/h2[@class='title']"
full_image_button_xpath = "//a[@id='button-view_full_size']"
full_image_url_xpath = "//div[@data-image_url]"
girl = SuicideGirls.driver.find_element_by_xpath(girl_xpath).text
title = SuicideGirls.driver.find_element_by_xpath(title_xpath).text
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
check = False
if os.path.exists(dir_name):
check = True
SuicideGirls.driver.find_element_by_xpath(full_image_button_xpath).click()
time.sleep(5)
images = SuicideGirls.driver.find_elements_by_xpath(full_image_url_xpath)
image_urls = []
for i in range(0, len(images)):
url = images[i].get_attribute("data-image_url")
ext = url[url.rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if not os.path.exists(os.path.join(dir_name, file_name)):
image_urls.append(url)
else:
print(girl.title() + "/" + title.title() + " Img" + str(i).zfill(3) + " already exists, skipping...")
self.__download_and_save_set(image_urls, girl, title)
self.sets_completed += 1
def __download_and_save_set(self, urls, girl, title):
aria_path = os.path.join(self.exec_dir, "dependencies", "aria2", "aria2c.exe")
error_strings = []
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
with multiprocessing.Pool(8) as pool:
args = []
for i in range (0, len(urls)):
command = [aria_path, "-d", dir_name, "-o"]
ext = urls[i][urls[i].rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if os.path.exists(dir_name + file_name):
continue
command.append(file_name)
command.append(urls[i])
args.append((error_strings, command, str(i + 1), urls[i], girl, title))
SuicideGirls.argument_lists.append(args)
if len(error_strings) > 0:
f = open(os.path.join(dir_name, "errors.txt", "w"))
f.write("\n".join(sorted(error_strings)))
f.close()
def __build_url(self, name):
if self.__type in ["girl", "girls", "sotds"]:
return "https://www.suicidegirls.com/girls/" + name
elif self.__type in ["hopeful", "hopefuls"]:
return "https://www.suicidegirls.com/members/" + name
def download_image(self, args):
process = subprocess.run(args[1])
if process.returncode != 0:
args[0].append("\tImage " + args[2] + " failed; URL: " + args[3])
print(args[4].title() + "/" + args[5].title() + " #" + args[2] + " complete")
def start_processes(async_result):
async_result.get()
def print_warning():
print("T | ame__ == "__main__":
print_warning()
| his file is meant to be imported by other Python files, not run directly. Exiting now.")
if __n | identifier_body |
suicidegirls.py | import time, subprocess, os.path, re, multiprocessing, threading
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
class SuicideGirls:
driver = None
dispatcher_thread = None
argument_lists = []
stop_dispatching = False
def __init__(self, exec_dir, username, password, dir, process_limit, urls, type, time_period):
SuicideGirls.dispatcher_thread = threading.Thread(target=self.__dispatch)
self.username = username
self.password = password
self.root_url = "https://www.suicidegirls.com/"
self.dir = dir
self.exec_dir = exec_dir
self.process_limit = process_limit
self.urls = []
self.__type = type
self.process_limit = process_limit
self.time_period = time_period
self.girls_completed = 0
self.sets_completed = 0
if type in ["girl", "hopeful"]:
for url in urls:
self.urls.append(self.__build_url(url))
else:
self.urls = urls
SuicideGirls.dispatcher_thread.start()
def __dispatch(self):
print("Beginning dispatcher thread...")
while not SuicideGirls.stop_dispatching or len(SuicideGirls.argument_lists) != 0:
if len(SuicideGirls.argument_lists) != 0:
print("Argument list found! Dispatching...")
argument_list = SuicideGirls.argument_lists.pop(0)
pool = multiprocessing.Pool(self.process_limit)
pool.map(self.download_image, argument_list)
# Girls: Riae (36), Fishball (28), Vandoll (7)
# Total photosets: 71
# Processes: 8
# map: 00:24:37
# map_async: 00:12:33
print("Exiting dispatcher thread...")
def startup(self):
SuicideGirls.driver = webdriver.Chrome(executable_path="dependencies/chromedriver.exe")
SuicideGirls.driver.maximize_window()
SuicideGirls.driver.implicitly_wait(5)
SuicideGirls.driver.get(self.root_url)
self.__login()
def shutdown(self):
SuicideGirls.driver.quit()
def __login(self):
login_button_xpath = "//a[@class='login button' or @class='button login']"
login_form_submit_xpath = "//button[@type='submit' and text()='Login']"
username_box_xpath = "//input[@name='username']"
password_box_xpath = "//input[@name='password']"
SuicideGirls.driver.find_element_by_xpath(login_button_xpath).click()
SuicideGirls.driver.find_element_by_xpath(username_box_xpath).send_keys(self.username)
SuicideGirls.driver.find_element_by_xpath(password_box_xpath).send_keys(self.password)
SuicideGirls.driver.find_element_by_xpath(login_form_submit_xpath).click()
time.sleep(5)
flag = False;
while True:
try:
image_select = SuicideGirls.driver.find_element_by_xpath("//iframe[@title='recaptcha challenge']")
if not flag:
print("Found a captcha!")
flag = True
except:
break;
print("No captcha found!")
def rip(self):
for url in self.urls:
SuicideGirls.driver.get(url)
if self.__type == "girl":
print("Single girl")
self.__rip_girl()
elif self.__type == "girls":
print("All Suicide Girls")
self.__rip_all_girls()
elif self.__type == "hopefuls":
print("All hopefuls")
self.__rip_all_hopefuls()
elif self.__type == "sotds":
print("All sets of the day")
self.__rip_all_sets_of_the_day()
elif self.__type == "set":
print("Single set")
self.__rip_set()
elif self.__type == "all":
print("All!")
self.__rip_all_photos()
SuicideGirls.stop_dispatching = True
SuicideGirls.dispatcher_thread.join()
print("Rip completed.")
print("Total girls/hopefuls ripped: " + str(self.girls_completed))
print("Total sets ripped: " + str(self.sets_completed))
def __rip_all_photos(self):
SuicideGirls.driver.get(self.urls[0])
self.__type = "hopefuls"
self.__rip_all_hopefuls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "girls"
self.__rip_all_girls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "sotds"
self.__rip_all_sets_of_the_day()
def __rip_all_girls(self):
suicide_girls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'SuicideGirls']"
self.__rip_all(suicide_girls_xpath)
def __rip_all_hopefuls(self):
hopefuls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Hopefuls']"
self.__rip_all(hopefuls_xpath)
def __rip_all_sets_of_the_day(self):
sotds_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Sets Of The Day']"
self.__rip_all(sotds_xpath)
def | (self, type_xpath):
time_period_xpath = "//li[@class='dropdown'][3]//ul/li/a[text() = '" + self.time_period + "']"
girl_name_xpath = "//article/header//h2/a"
load_more_xpath = "//a[@id='load-more']"
choice = SuicideGirls.driver.find_element_by_xpath(type_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
choice = SuicideGirls.driver.find_element_by_xpath(time_period_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
girls = []
iteration = 0
while True:
iteration += 1
names = SuicideGirls.driver.find_elements_by_xpath(girl_name_xpath)
for name in names:
girls.append(name.text)
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<24;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
girls = list(set(girls))
for girl in sorted(girls):
url = self.__build_url(girl)
SuicideGirls.driver.get(url)
self.__rip_girl()
def __rip_girl(self):
load_more_xpath = "//a[@id='load-more']"
photos_xpath = "//div[@id='content-container']//a[text()='Photos']"
photosets_xpath = "//div[@id='content-container']//a[text()='Photosets']"
set_title_xpath = "//article/header//h2/a"
url = SuicideGirls.driver.find_element_by_xpath(photos_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
url = SuicideGirls.driver.find_element_by_xpath(photosets_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
set_links = []
iteration = 0
while True:
iteration += 1
titles = SuicideGirls.driver.find_elements_by_xpath(set_title_xpath)
for title in titles:
set_links.append(title.get_attribute("href"))
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<9;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
set_links = list(set(set_links))
for link in set_links:
SuicideGirls.driver.get(link)
self.__rip_set()
self.girls_completed += 1
def __rip_set(self):
girl_xpath = "//h1/a"
title_xpath = "//header[@class='header']/div[@class='top-bar']/h2[@class='title']"
full_image_button_xpath = "//a[@id='button-view_full_size']"
full_image_url_xpath = "//div[@data-image_url]"
girl = SuicideGirls.driver.find_element_by_xpath(girl_xpath).text
title = SuicideGirls.driver.find_element_by_xpath(title_xpath).text
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
check = False
if os.path.exists(dir_name):
check = True
SuicideGirls.driver.find_element_by_xpath(full_image_button_xpath).click()
time.sleep(5)
images = SuicideGirls.driver.find_elements_by_xpath(full_image_url_xpath)
image_urls = []
for i in range(0, len(images)):
url = images[i].get_attribute("data-image_url")
ext = url[url.rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if not os.path.exists(os.path.join(dir_name, file_name)):
image_urls.append(url)
else:
print(girl.title() + "/" + title.title() + " Img" + str(i).zfill(3) + " already exists, skipping...")
self.__download_and_save_set(image_urls, girl, title)
self.sets_completed += 1
def __download_and_save_set(self, urls, girl, title):
aria_path = os.path.join(self.exec_dir, "dependencies", "aria2", "aria2c.exe")
error_strings = []
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
with multiprocessing.Pool(8) as pool:
args = []
for i in range (0, len(urls)):
command = [aria_path, "-d", dir_name, "-o"]
ext = urls[i][urls[i].rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if os.path.exists(dir_name + file_name):
continue
command.append(file_name)
command.append(urls[i])
args.append((error_strings, command, str(i + 1), urls[i], girl, title))
SuicideGirls.argument_lists.append(args)
if len(error_strings) > 0:
f = open(os.path.join(dir_name, "errors.txt", "w"))
f.write("\n".join(sorted(error_strings)))
f.close()
def __build_url(self, name):
if self.__type in ["girl", "girls", "sotds"]:
return "https://www.suicidegirls.com/girls/" + name
elif self.__type in ["hopeful", "hopefuls"]:
return "https://www.suicidegirls.com/members/" + name
def download_image(self, args):
process = subprocess.run(args[1])
if process.returncode != 0:
args[0].append("\tImage " + args[2] + " failed; URL: " + args[3])
print(args[4].title() + "/" + args[5].title() + " #" + args[2] + " complete")
def start_processes(async_result):
async_result.get()
def print_warning():
print("This file is meant to be imported by other Python files, not run directly. Exiting now.")
if __name__ == "__main__":
print_warning()
| __rip_all | identifier_name |
suicidegirls.py | import time, subprocess, os.path, re, multiprocessing, threading
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
class SuicideGirls:
driver = None
dispatcher_thread = None
argument_lists = []
stop_dispatching = False
def __init__(self, exec_dir, username, password, dir, process_limit, urls, type, time_period):
SuicideGirls.dispatcher_thread = threading.Thread(target=self.__dispatch)
self.username = username
self.password = password
self.root_url = "https://www.suicidegirls.com/"
self.dir = dir
self.exec_dir = exec_dir
self.process_limit = process_limit
self.urls = []
self.__type = type
self.process_limit = process_limit
self.time_period = time_period
self.girls_completed = 0
self.sets_completed = 0
if type in ["girl", "hopeful"]:
for url in urls:
self.urls.append(self.__build_url(url))
else:
self.urls = urls
SuicideGirls.dispatcher_thread.start()
def __dispatch(self):
print("Beginning dispatcher thread...")
while not SuicideGirls.stop_dispatching or len(SuicideGirls.argument_lists) != 0:
if len(SuicideGirls.argument_lists) != 0:
print("Argument list found! Dispatching...")
argument_list = SuicideGirls.argument_lists.pop(0)
pool = multiprocessing.Pool(self.process_limit)
pool.map(self.download_image, argument_list)
# Girls: Riae (36), Fishball (28), Vandoll (7)
# Total photosets: 71
# Processes: 8
# map: 00:24:37
# map_async: 00:12:33
print("Exiting dispatcher thread...")
def startup(self):
SuicideGirls.driver = webdriver.Chrome(executable_path="dependencies/chromedriver.exe")
SuicideGirls.driver.maximize_window()
SuicideGirls.driver.implicitly_wait(5)
SuicideGirls.driver.get(self.root_url)
self.__login()
def shutdown(self):
SuicideGirls.driver.quit()
def __login(self):
login_button_xpath = "//a[@class='login button' or @class='button login']"
login_form_submit_xpath = "//button[@type='submit' and text()='Login']"
username_box_xpath = "//input[@name='username']"
password_box_xpath = "//input[@name='password']"
SuicideGirls.driver.find_element_by_xpath(login_button_xpath).click()
SuicideGirls.driver.find_element_by_xpath(username_box_xpath).send_keys(self.username)
SuicideGirls.driver.find_element_by_xpath(password_box_xpath).send_keys(self.password)
SuicideGirls.driver.find_element_by_xpath(login_form_submit_xpath).click()
time.sleep(5)
flag = False;
while True:
try:
image_select = SuicideGirls.driver.find_element_by_xpath("//iframe[@title='recaptcha challenge']")
if not flag:
print("Found a captcha!")
flag = True
except:
break;
print("No captcha found!")
def rip(self):
for url in self.urls:
SuicideGirls.driver.get(url)
if self.__type == "girl":
print("Single girl")
self.__rip_girl()
elif self.__type == "girls":
print("All Suicide Girls")
self.__rip_all_girls()
elif self.__type == "hopefuls":
print("All hopefuls")
self.__rip_all_hopefuls()
elif self.__type == "sotds":
print("All sets of the day")
self.__rip_all_sets_of_the_day()
elif self.__type == "set":
print("Single set")
self.__rip_set()
elif self.__type == "all":
print("All!")
self.__rip_all_photos()
SuicideGirls.stop_dispatching = True
SuicideGirls.dispatcher_thread.join()
print("Rip completed.")
print("Total girls/hopefuls ripped: " + str(self.girls_completed))
print("Total sets ripped: " + str(self.sets_completed))
def __rip_all_photos(self):
SuicideGirls.driver.get(self.urls[0])
self.__type = "hopefuls"
self.__rip_all_hopefuls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "girls"
self.__rip_all_girls()
SuicideGirls.driver.get(self.urls[0])
self.__type = "sotds"
self.__rip_all_sets_of_the_day()
def __rip_all_girls(self):
suicide_girls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'SuicideGirls']"
self.__rip_all(suicide_girls_xpath)
def __rip_all_hopefuls(self):
hopefuls_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Hopefuls']"
self.__rip_all(hopefuls_xpath)
def __rip_all_sets_of_the_day(self):
sotds_xpath = "//li[@class='dropdown'][1]//ul/li/a[text() = 'Sets Of The Day']"
self.__rip_all(sotds_xpath)
def __rip_all(self, type_xpath):
time_period_xpath = "//li[@class='dropdown'][3]//ul/li/a[text() = '" + self.time_period + "']"
girl_name_xpath = "//article/header//h2/a"
load_more_xpath = "//a[@id='load-more']"
choice = SuicideGirls.driver.find_element_by_xpath(type_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
choice = SuicideGirls.driver.find_element_by_xpath(time_period_xpath)
SuicideGirls.driver.get(choice.get_attribute("href"))
girls = []
iteration = 0
while True:
iteration += 1
names = SuicideGirls.driver.find_elements_by_xpath(girl_name_xpath)
for name in names:
girls.append(name.text)
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<24;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}")
time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath)
if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
girls = list(set(girls))
for girl in sorted(girls):
url = self.__build_url(girl)
SuicideGirls.driver.get(url)
self.__rip_girl()
def __rip_girl(self):
load_more_xpath = "//a[@id='load-more']"
photos_xpath = "//div[@id='content-container']//a[text()='Photos']"
photosets_xpath = "//div[@id='content-container']//a[text()='Photosets']"
set_title_xpath = "//article/header//h2/a"
url = SuicideGirls.driver.find_element_by_xpath(photos_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
url = SuicideGirls.driver.find_element_by_xpath(photosets_xpath).get_attribute("href")
SuicideGirls.driver.get(url)
set_links = []
iteration = 0
while True:
iteration += 1
titles = SuicideGirls.driver.find_elements_by_xpath(set_title_xpath)
for title in titles:
set_links.append(title.get_attribute("href"))
if iteration > 1:
SuicideGirls.driver.execute_script("for(i=0;i<9;i++) {e = document.evaluate(\"//article[1]\", document.documentElement); e = e.iterateNext(); if (e == null) {break;}e.parentNode.removeChild(e);}") | if len(lmb) > 0 and lmb[0].is_displayed():
lmb[0].click()
time.sleep(10)
else:
break
set_links = list(set(set_links))
for link in set_links:
SuicideGirls.driver.get(link)
self.__rip_set()
self.girls_completed += 1
def __rip_set(self):
girl_xpath = "//h1/a"
title_xpath = "//header[@class='header']/div[@class='top-bar']/h2[@class='title']"
full_image_button_xpath = "//a[@id='button-view_full_size']"
full_image_url_xpath = "//div[@data-image_url]"
girl = SuicideGirls.driver.find_element_by_xpath(girl_xpath).text
title = SuicideGirls.driver.find_element_by_xpath(title_xpath).text
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
check = False
if os.path.exists(dir_name):
check = True
SuicideGirls.driver.find_element_by_xpath(full_image_button_xpath).click()
time.sleep(5)
images = SuicideGirls.driver.find_elements_by_xpath(full_image_url_xpath)
image_urls = []
for i in range(0, len(images)):
url = images[i].get_attribute("data-image_url")
ext = url[url.rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if not os.path.exists(os.path.join(dir_name, file_name)):
image_urls.append(url)
else:
print(girl.title() + "/" + title.title() + " Img" + str(i).zfill(3) + " already exists, skipping...")
self.__download_and_save_set(image_urls, girl, title)
self.sets_completed += 1
def __download_and_save_set(self, urls, girl, title):
aria_path = os.path.join(self.exec_dir, "dependencies", "aria2", "aria2c.exe")
error_strings = []
dir_name = os.path.join("Suicide Girls", girl.title(), title.title())
dir_name = re.subn("[<>:\"/\|?*]", "", dir_name)[0]
dir_name = re.subn("\\.{3,}", "…", dir_name)[0]
dir_name = os.path.join(self.dir, dir_name)
with multiprocessing.Pool(8) as pool:
args = []
for i in range (0, len(urls)):
command = [aria_path, "-d", dir_name, "-o"]
ext = urls[i][urls[i].rindex("."):]
file_name = "Suicide Girls - " + girl.title() + " - " + title.title() + " - Img" + str(i + 1).zfill(3) + ext
file_name = re.subn("[<>:\"/\|?*]", "", file_name)[0]
file_name = re.subn("\\.{3,}", "…", file_name)[0]
if os.path.exists(dir_name + file_name):
continue
command.append(file_name)
command.append(urls[i])
args.append((error_strings, command, str(i + 1), urls[i], girl, title))
SuicideGirls.argument_lists.append(args)
if len(error_strings) > 0:
f = open(os.path.join(dir_name, "errors.txt", "w"))
f.write("\n".join(sorted(error_strings)))
f.close()
def __build_url(self, name):
if self.__type in ["girl", "girls", "sotds"]:
return "https://www.suicidegirls.com/girls/" + name
elif self.__type in ["hopeful", "hopefuls"]:
return "https://www.suicidegirls.com/members/" + name
def download_image(self, args):
process = subprocess.run(args[1])
if process.returncode != 0:
args[0].append("\tImage " + args[2] + " failed; URL: " + args[3])
print(args[4].title() + "/" + args[5].title() + " #" + args[2] + " complete")
def start_processes(async_result):
async_result.get()
def print_warning():
print("This file is meant to be imported by other Python files, not run directly. Exiting now.")
if __name__ == "__main__":
print_warning() | time.sleep(2)
lmb = SuicideGirls.driver.find_elements_by_xpath(load_more_xpath) | random_line_split |
app.go | package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"io/ioutil"
"github.com/go-martini/martini"
"github.com/martini-contrib/render"
redis "gopkg.in/redis.v3"
)
type Ad struct {
Slot string `json:"slot"`
Id string `json:"id"`
Title string `json:"title"`
Type string `json:"type"`
Advertiser string `json:"advertiser"`
Destination string `json:"destination"`
Impressions int `json:"impressions"`
}
type AdWithEndpoints struct {
Ad
Asset string `json:"asset"`
Redirect string `json:"redirect"`
Counter string `json:"counter"`
}
type ClickLog struct {
AdId string `json:"ad_id"`
User string `json:"user"`
Agent string `json:"agent"`
Gender string `json:"gender"`
Age int `json:"age"`
}
type Report struct {
Ad *Ad `json:"ad"`
Clicks int `json:"clicks"`
Impressions int `json:"impressions"`
Breakdown *BreakdownReport `json:"breakdown,omitempty"`
}
type BreakdownReport struct {
Gender map[string]int `json:"gender"`
Agents map[string]int `json:"agents"`
Generations map[string]int `json:"generations"`
}
var rd *redis.Client
var port = flag.Uint("port", 0, "port to listen")
var isMaster = flag.Bool("master", false, "is master?")
var globalIP []string = []string{"130.211.255.138", "104.155.201.2", "130.211.251.102"}
var internalIP []string = []string{"10.240.0.2", "10.240.0.3", "10.240.0.4"}
func init() {
rd = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
DB: 0,
})
flag.Parse()
}
func getDir(name string) string {
base_dir := "/tmp/go/"
path := base_dir + name
os.MkdirAll(path, 0755)
return path
}
func urlFor(req *http.Request, path string) string {
host := req.Host
if host != "" {
return "http://" + host + path
} else {
return path
}
}
func urlFor2(path string, id string) string {
i, _ := strconv.Atoi(id)
host := internalIP[i%3]
if host != "" {
return "http://" + host + path
} else {
return path
}
}
func fetch(hash map[string]string, key string, defaultValue string) string {
if hash[key] == "" {
return defaultValue
} else {
return hash[key]
}
}
func incr_map(dict *map[string]int, key string) {
_, exists := (*dict)[key]
if !exists {
(*dict)[key] = 0
}
(*dict)[key]++
}
func advertiserId(req *http.Request) string {
return req.Header.Get("X-Advertiser-Id")
}
func adKey(slot string, id string) string {
return "isu4:ad:" + slot + "-" + id
}
func assetKey(slot string, id string) string {
return "isu4:asset:" + slot + "-" + id
}
const assetBaseDir = "/var/tmp/isu4"
func initAssetBaseDir() {
err := os.RemoveAll(assetBaseDir)
if err != nil {
panic(err)
}
err = os.MkdirAll(assetBaseDir, os.ModePerm)
if err != nil {
panic(err)
}
}
func assetFile(slot string, id string) string {
return assetBaseDir + "/slots/" + slot + "/ads/" + id + "/asset"
}
func advertiserKey(id string) string {
return "isu4:advertiser:" + id
}
func slotKey(slot string) string {
return "isu4:slot:" + slot
}
func nextAdId() string {
id, _ := rd.Incr("isu4:ad-next").Result()
return strconv.FormatInt(id, 10)
}
func nextAd(req *http.Request, slot string) *AdWithEndpoints {
key := slotKey(slot)
id, _ := rd.RPopLPush(key, key).Result()
if id == "" {
return nil
}
ad := getAd(req, slot, id)
if ad != nil {
return ad
} else {
rd.LRem(key, 0, id).Result()
return nextAd(req, slot)
}
}
func getAd(req *http.Request, slot string, id string) *AdWithEndpoints {
key := adKey(slot, id)
m, _ := rd.HGetAllMap(key).Result()
if m == nil {
return nil
}
if _, exists := m["id"]; !exists {
return nil
}
imp, _ := strconv.Atoi(m["impressions"])
path_base := "/slots/" + slot + "/ads/" + id
var ad *AdWithEndpoints
ad = &AdWithEndpoints{
Ad{
m["slot"],
m["id"],
m["title"],
m["type"],
m["advertiser"],
m["destination"],
imp,
},
urlFor2(path_base+"/asset", id),
urlFor(req, path_base+"/redirect"),
urlFor(req, path_base+"/count"),
}
return ad
}
func decodeUserKey(id string) (string, int) {
if id == "" {
return "unknown", -1
}
splitted := strings.Split(id, "/")
gender := "male"
if splitted[0] == "0" {
gender = "female"
}
age, _ := strconv.Atoi(splitted[1])
return gender, age
}
func getLogPath(advrId string) string {
dir := getDir("log")
splitted := strings.Split(advrId, "/")
return dir + "/" + splitted[len(splitted)-1]
}
func getLog(id string) map[string][]ClickLog {
path := getLogPath(id)
result := map[string][]ClickLog{}
if _, err := os.Stat(path); os.IsNotExist(err) {
return result
}
f, err := os.Open(path)
if err != nil {
panic(err)
}
defer f.Close()
err = syscall.Flock(int(f.Fd()), syscall.LOCK_SH)
if err != nil {
panic(err)
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
line = strings.TrimRight(line, "\n")
sp := strings.Split(line, "\t")
ad_id := sp[0]
user := sp[1]
agent := sp[2]
if agent == "" {
agent = "unknown"
}
gender, age := decodeUserKey(sp[1])
if result[ad_id] == nil {
result[ad_id] = []ClickLog{}
}
data := ClickLog{ad_id, user, agent, gender, age}
result[ad_id] = append(result[ad_id], data)
}
return result
}
func routePostAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
advrId := advertiserId(req)
if advrId == "" {
r.Status(404)
return
}
req.ParseMultipartForm(100000)
asset := req.MultipartForm.File["asset"][0]
id := nextAdId()
key := adKey(slot, id)
content_type := ""
if len(req.Form["type"]) > 0 {
content_type = req.Form["type"][0]
}
if content_type == "" && len(asset.Header["Content-Type"]) > 0 {
content_type = asset.Header["Content-Type"][0]
}
if content_type == "" {
content_type = "video/mp4"
}
title := ""
if a := req.Form["title"]; a != nil {
title = a[0]
}
destination := ""
if a := req.Form["destination"]; a != nil {
destination = a[0]
}
rd.HMSet(key,
"slot", slot,
"id", id,
"title", title,
"type", content_type,
"advertiser", advrId,
"destination", destination,
"impressions", "0",
)
f, _ := asset.Open()
defer f.Close()
buf := bytes.NewBuffer(nil)
io.Copy(buf, f)
fname := assetFile(slot, id)
err := os.MkdirAll(filepath.Dir(fname), os.ModePerm)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(fname, buf.Bytes(), os.ModePerm)
if err != nil {
panic(err)
}
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/fs"+assetFile(slot, id), content_type, bytes.NewReader(buf.Bytes()))
if err != nil {
panic(err)
}
}
}
rd.RPush(slotKey(slot), id)
rd.SAdd(advertiserKey(advrId), key)
r.JSON(200, getAd(req, slot, id))
}
func routeGetAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
ad := nextAd(req, slot)
if ad != nil {
r.Redirect("/slots/" + slot + "/ads/" + ad.Id)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdWithId(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad != nil {
r.JSON(200, ad)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdAsset(r render.Render, res http.ResponseWriter, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
content_type := "application/octet-stream"
if ad.Type != "" {
content_type = ad.Type
}
res.Header().Set("Content-Type", content_type)
data, err := ioutil.ReadFile(assetFile(slot, id))
if err != nil {
panic(err)
}
range_str := req.Header.Get("Range")
if range_str == "" {
r.Data(200, []byte(data))
return
}
re := regexp.MustCompile("^bytes=(\\d*)-(\\d*)$")
m := re.FindAllStringSubmatch(range_str, -1)
if m == nil {
r.Status(416)
return
}
head_str := m[0][1]
tail_str := m[0][2]
if head_str == "" && tail_str == "" {
r.Status(416)
return
}
head := 0
tail := 0
if head_str != "" {
head, _ = strconv.Atoi(head_str)
}
if tail_str != "" {
tail, _ = strconv.Atoi(tail_str)
} else {
tail = len(data) - 1
}
if head < 0 || head >= len(data) || tail < 0 {
r.Status(416)
return
}
range_data := data[head:(tail + 1)]
content_range := fmt.Sprintf("bytes %d-%d/%d", head, tail, len(data))
res.Header().Set("Content-Range", content_range)
res.Header().Set("Content-Length", strconv.Itoa(len(range_data)))
r.Data(206, []byte(range_data))
}
func routeGetAdCount(r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
key := adKey(slot, id)
exists, _ := rd.Exists(key).Result()
if !exists {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
rd.HIncrBy(key, "impressions", 1).Result()
r.Status(204)
}
func routeGetAdRedirect(req *http.Request, r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
isuad := ""
cookie, err := req.Cookie("isuad")
if err != nil {
if err != http.ErrNoCookie {
panic(err)
}
} else {
isuad = cookie.Value
}
ua := req.Header.Get("User-Agent")
path := getLogPath(ad.Advertiser)
var f *os.File
f, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
panic(err)
}
err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
if err != nil {
panic(err)
}
fmt.Fprintf(f, "%s\t%s\t%s\n", ad.Id, isuad, ua)
f.Close()
r.Redirect(ad.Destination)
}
func routeGetReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
report := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
report[ad["id"]] = data
}
for adId, clicks := range getLog(advrId) {
if _, exists := report[adId]; !exists {
report[adId] = &Report{}
}
report[adId].Clicks = len(clicks)
}
r.JSON(200, report)
}
func routeGetFinalReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
reports := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
reports[ad["id"]] = data
}
logs := getLog(advrId)
for adId, report := range reports {
log, exists := logs[adId]
if exists {
report.Clicks = len(log)
}
breakdown := &BreakdownReport{
map[string]int{},
map[string]int{},
map[string]int{},
}
for i := range log {
click := log[i]
incr_map(&breakdown.Gender, click.Gender)
incr_map(&breakdown.Agents, click.Agent)
generation := "unknown"
if click.Age != -1 {
generation = strconv.Itoa(click.Age / 10)
}
incr_map(&breakdown.Generations, generation)
}
report.Breakdown = breakdown
reports[adId] = report
}
r.JSON(200, reports)
}
func routePostInitialize() (int, string) {
keys, _ := rd.Keys("isu4:*").Result()
for i := range keys {
key := keys[i]
rd.Del(key)
}
path := getDir("log")
os.RemoveAll(path)
initAssetBaseDir()
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/initialize_slave", "", nil)
if err != nil {
panic(err)
}
}
}
return 200, "OK"
}
func routePostInitializeSlave() (int, string) {
initAssetBaseDir()
return 200, "OK"
}
var FSPathPrefix = "/fs"
var FSRoot = "/"
var FSDirPermission os.FileMode = 0777
// curl -XPOST --data-binary "hoge" -v http://127.0.0.1:8080/fs/foo
func routePostFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
dir := filepath.Dir(path)
log.Println(dir)
err := os.MkdirAll(dir, FSDirPermission)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
file, err := os.Create(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer file.Close()
written, err := io.Copy(file, r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println(written)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeDeleteFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
info, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
http.Error(w, "", http.StatusNotFound)
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if info.IsDir() {
err = os.RemoveAll(path)
if err != nil |
return
}
err = os.Remove(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
w.WriteHeader(http.StatusOK)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeGetFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
http.ServeFile(w, r, path)
return
}
func main() {
m := martini.Classic()
m.Use(martini.Static("../public"))
m.Use(render.Renderer(render.Options{
Layout: "layout",
}))
m.Group("/slots/:slot", func(r martini.Router) {
m.Post("/ads", routePostAd)
m.Get("/ad", routeGetAd)
m.Get("/ads/:id", routeGetAdWithId)
m.Get("/ads/:id/asset", routeGetAdAsset)
m.Post("/ads/:id/count", routeGetAdCount)
m.Get("/ads/:id/redirect", routeGetAdRedirect)
})
m.Group("/me", func(r martini.Router) {
m.Get("/report", routeGetReport)
m.Get("/final_report", routeGetFinalReport)
})
m.Post("/initialize", routePostInitialize)
m.Post("/initialize_slave", routePostInitializeSlave)
m.Group(FSPathPrefix, func(r martini.Router) {
m.Post("/(?P<path>[a-zA-Z0-9._/-]+)", routePostFs)
m.Delete("/(?P<path>[a-zA-Z0-9._/-]+)", routeDeleteFs)
m.Get("/(?P<path>[a-zA-Z0-9._/-]+)", routeGetFs)
})
sigchan := make(chan os.Signal)
signal.Notify(sigchan, syscall.SIGTERM)
signal.Notify(sigchan, syscall.SIGINT)
var l net.Listener
var err error
sock := "/tmp/server.sock"
if *port == 0 {
ferr := os.Remove(sock)
if ferr != nil {
if !os.IsNotExist(ferr) {
panic(ferr.Error())
}
}
l, err = net.Listen("unix", sock)
cerr := os.Chmod(sock, 0666)
if cerr != nil {
panic(cerr.Error())
}
} else {
l, err = net.ListenTCP("tcp", &net.TCPAddr{Port: int(*port)})
}
if err != nil {
panic(err.Error())
}
go func() {
// func Serve(l net.Listener, handler Handler) error
log.Println(http.Serve(l, m))
}()
<-sigchan
}
| {
http.Error(w, err.Error(), http.StatusInternalServerError)
} | conditional_block |
app.go | package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"io/ioutil"
"github.com/go-martini/martini"
"github.com/martini-contrib/render"
redis "gopkg.in/redis.v3"
)
type Ad struct {
Slot string `json:"slot"`
Id string `json:"id"`
Title string `json:"title"`
Type string `json:"type"`
Advertiser string `json:"advertiser"`
Destination string `json:"destination"`
Impressions int `json:"impressions"`
}
type AdWithEndpoints struct {
Ad
Asset string `json:"asset"`
Redirect string `json:"redirect"`
Counter string `json:"counter"`
}
type ClickLog struct {
AdId string `json:"ad_id"`
User string `json:"user"`
Agent string `json:"agent"`
Gender string `json:"gender"`
Age int `json:"age"`
}
type Report struct {
Ad *Ad `json:"ad"`
Clicks int `json:"clicks"`
Impressions int `json:"impressions"`
Breakdown *BreakdownReport `json:"breakdown,omitempty"`
}
type BreakdownReport struct {
Gender map[string]int `json:"gender"`
Agents map[string]int `json:"agents"`
Generations map[string]int `json:"generations"`
}
var rd *redis.Client
var port = flag.Uint("port", 0, "port to listen")
var isMaster = flag.Bool("master", false, "is master?")
var globalIP []string = []string{"130.211.255.138", "104.155.201.2", "130.211.251.102"}
var internalIP []string = []string{"10.240.0.2", "10.240.0.3", "10.240.0.4"}
func init() {
rd = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
DB: 0,
})
flag.Parse()
}
func getDir(name string) string {
base_dir := "/tmp/go/"
path := base_dir + name
os.MkdirAll(path, 0755)
return path
}
func urlFor(req *http.Request, path string) string {
host := req.Host
if host != "" {
return "http://" + host + path
} else {
return path
}
}
func urlFor2(path string, id string) string {
i, _ := strconv.Atoi(id)
host := internalIP[i%3]
if host != "" {
return "http://" + host + path
} else {
return path
}
}
func fetch(hash map[string]string, key string, defaultValue string) string {
if hash[key] == "" {
return defaultValue
} else {
return hash[key]
}
}
func incr_map(dict *map[string]int, key string) {
_, exists := (*dict)[key]
if !exists {
(*dict)[key] = 0
}
(*dict)[key]++
}
func advertiserId(req *http.Request) string {
return req.Header.Get("X-Advertiser-Id")
}
func adKey(slot string, id string) string {
return "isu4:ad:" + slot + "-" + id
}
func assetKey(slot string, id string) string {
return "isu4:asset:" + slot + "-" + id
}
const assetBaseDir = "/var/tmp/isu4"
func initAssetBaseDir() {
err := os.RemoveAll(assetBaseDir)
if err != nil {
panic(err)
}
err = os.MkdirAll(assetBaseDir, os.ModePerm)
if err != nil {
panic(err)
}
}
func assetFile(slot string, id string) string {
return assetBaseDir + "/slots/" + slot + "/ads/" + id + "/asset"
}
func advertiserKey(id string) string {
return "isu4:advertiser:" + id
}
func slotKey(slot string) string {
return "isu4:slot:" + slot
}
func nextAdId() string {
id, _ := rd.Incr("isu4:ad-next").Result()
return strconv.FormatInt(id, 10)
}
func nextAd(req *http.Request, slot string) *AdWithEndpoints {
key := slotKey(slot)
id, _ := rd.RPopLPush(key, key).Result()
if id == "" {
return nil
}
ad := getAd(req, slot, id)
if ad != nil {
return ad
} else {
rd.LRem(key, 0, id).Result()
return nextAd(req, slot)
}
}
func getAd(req *http.Request, slot string, id string) *AdWithEndpoints {
key := adKey(slot, id)
m, _ := rd.HGetAllMap(key).Result()
if m == nil {
return nil
}
if _, exists := m["id"]; !exists {
return nil
}
imp, _ := strconv.Atoi(m["impressions"])
path_base := "/slots/" + slot + "/ads/" + id
var ad *AdWithEndpoints
ad = &AdWithEndpoints{
Ad{
m["slot"],
m["id"],
m["title"],
m["type"],
m["advertiser"],
m["destination"],
imp,
},
urlFor2(path_base+"/asset", id),
urlFor(req, path_base+"/redirect"),
urlFor(req, path_base+"/count"),
}
return ad
}
func decodeUserKey(id string) (string, int) {
if id == "" {
return "unknown", -1
}
splitted := strings.Split(id, "/")
gender := "male"
if splitted[0] == "0" {
gender = "female"
}
age, _ := strconv.Atoi(splitted[1])
return gender, age
}
func getLogPath(advrId string) string {
dir := getDir("log")
splitted := strings.Split(advrId, "/")
return dir + "/" + splitted[len(splitted)-1]
}
func getLog(id string) map[string][]ClickLog {
path := getLogPath(id)
result := map[string][]ClickLog{}
if _, err := os.Stat(path); os.IsNotExist(err) {
return result
}
f, err := os.Open(path)
if err != nil {
panic(err)
}
defer f.Close()
err = syscall.Flock(int(f.Fd()), syscall.LOCK_SH)
if err != nil {
panic(err)
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
line = strings.TrimRight(line, "\n")
sp := strings.Split(line, "\t")
ad_id := sp[0]
user := sp[1]
agent := sp[2]
if agent == "" {
agent = "unknown"
}
gender, age := decodeUserKey(sp[1])
if result[ad_id] == nil {
result[ad_id] = []ClickLog{}
}
data := ClickLog{ad_id, user, agent, gender, age}
result[ad_id] = append(result[ad_id], data)
}
return result
}
func routePostAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
advrId := advertiserId(req)
if advrId == "" {
r.Status(404)
return
}
req.ParseMultipartForm(100000)
asset := req.MultipartForm.File["asset"][0]
id := nextAdId()
key := adKey(slot, id)
content_type := ""
if len(req.Form["type"]) > 0 {
content_type = req.Form["type"][0]
}
if content_type == "" && len(asset.Header["Content-Type"]) > 0 {
content_type = asset.Header["Content-Type"][0]
}
if content_type == "" {
content_type = "video/mp4"
}
title := ""
if a := req.Form["title"]; a != nil {
title = a[0]
}
destination := ""
if a := req.Form["destination"]; a != nil {
destination = a[0]
}
rd.HMSet(key,
"slot", slot,
"id", id,
"title", title,
"type", content_type,
"advertiser", advrId,
"destination", destination,
"impressions", "0",
)
f, _ := asset.Open()
defer f.Close()
buf := bytes.NewBuffer(nil)
io.Copy(buf, f)
fname := assetFile(slot, id)
err := os.MkdirAll(filepath.Dir(fname), os.ModePerm)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(fname, buf.Bytes(), os.ModePerm)
if err != nil {
panic(err)
}
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/fs"+assetFile(slot, id), content_type, bytes.NewReader(buf.Bytes()))
if err != nil {
panic(err)
}
}
}
rd.RPush(slotKey(slot), id)
rd.SAdd(advertiserKey(advrId), key)
r.JSON(200, getAd(req, slot, id))
}
func routeGetAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
ad := nextAd(req, slot)
if ad != nil {
r.Redirect("/slots/" + slot + "/ads/" + ad.Id)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdWithId(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad != nil {
r.JSON(200, ad)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdAsset(r render.Render, res http.ResponseWriter, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
content_type := "application/octet-stream"
if ad.Type != "" {
content_type = ad.Type
}
res.Header().Set("Content-Type", content_type)
data, err := ioutil.ReadFile(assetFile(slot, id))
if err != nil {
panic(err)
}
range_str := req.Header.Get("Range")
if range_str == "" {
r.Data(200, []byte(data))
return
}
re := regexp.MustCompile("^bytes=(\\d*)-(\\d*)$")
m := re.FindAllStringSubmatch(range_str, -1)
if m == nil {
r.Status(416)
return
}
head_str := m[0][1]
tail_str := m[0][2]
if head_str == "" && tail_str == "" {
r.Status(416)
return
}
head := 0
tail := 0
if head_str != "" {
head, _ = strconv.Atoi(head_str)
}
if tail_str != "" {
tail, _ = strconv.Atoi(tail_str)
} else {
tail = len(data) - 1
}
if head < 0 || head >= len(data) || tail < 0 {
r.Status(416)
return
}
range_data := data[head:(tail + 1)]
content_range := fmt.Sprintf("bytes %d-%d/%d", head, tail, len(data))
res.Header().Set("Content-Range", content_range)
res.Header().Set("Content-Length", strconv.Itoa(len(range_data)))
r.Data(206, []byte(range_data))
}
func routeGetAdCount(r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
key := adKey(slot, id)
exists, _ := rd.Exists(key).Result()
if !exists {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
rd.HIncrBy(key, "impressions", 1).Result()
r.Status(204)
}
func routeGetAdRedirect(req *http.Request, r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
isuad := ""
cookie, err := req.Cookie("isuad")
if err != nil {
if err != http.ErrNoCookie {
panic(err)
}
} else {
isuad = cookie.Value
}
ua := req.Header.Get("User-Agent")
path := getLogPath(ad.Advertiser)
var f *os.File
f, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
panic(err)
}
err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
if err != nil {
panic(err)
}
fmt.Fprintf(f, "%s\t%s\t%s\n", ad.Id, isuad, ua)
f.Close()
r.Redirect(ad.Destination)
}
func routeGetReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
report := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
report[ad["id"]] = data
}
for adId, clicks := range getLog(advrId) {
if _, exists := report[adId]; !exists {
report[adId] = &Report{}
}
report[adId].Clicks = len(clicks)
}
r.JSON(200, report)
}
func routeGetFinalReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
reports := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
reports[ad["id"]] = data
}
logs := getLog(advrId)
for adId, report := range reports {
log, exists := logs[adId]
if exists {
report.Clicks = len(log)
}
breakdown := &BreakdownReport{
map[string]int{},
map[string]int{},
map[string]int{},
}
for i := range log {
click := log[i]
incr_map(&breakdown.Gender, click.Gender)
incr_map(&breakdown.Agents, click.Agent)
generation := "unknown"
if click.Age != -1 {
generation = strconv.Itoa(click.Age / 10)
}
incr_map(&breakdown.Generations, generation)
}
report.Breakdown = breakdown
reports[adId] = report
}
r.JSON(200, reports)
}
func routePostInitialize() (int, string) {
keys, _ := rd.Keys("isu4:*").Result()
for i := range keys {
key := keys[i]
rd.Del(key)
}
path := getDir("log")
os.RemoveAll(path)
initAssetBaseDir()
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/initialize_slave", "", nil)
if err != nil {
panic(err)
}
}
}
return 200, "OK"
}
func routePostInitializeSlave() (int, string) {
initAssetBaseDir()
return 200, "OK"
}
var FSPathPrefix = "/fs"
var FSRoot = "/"
var FSDirPermission os.FileMode = 0777
// curl -XPOST --data-binary "hoge" -v http://127.0.0.1:8080/fs/foo
func routePostFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
dir := filepath.Dir(path)
log.Println(dir)
err := os.MkdirAll(dir, FSDirPermission)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
file, err := os.Create(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer file.Close()
written, err := io.Copy(file, r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println(written)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeDeleteFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
info, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
http.Error(w, "", http.StatusNotFound)
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if info.IsDir() {
err = os.RemoveAll(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
err = os.Remove(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
w.WriteHeader(http.StatusOK)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeGetFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) |
func main() {
m := martini.Classic()
m.Use(martini.Static("../public"))
m.Use(render.Renderer(render.Options{
Layout: "layout",
}))
m.Group("/slots/:slot", func(r martini.Router) {
m.Post("/ads", routePostAd)
m.Get("/ad", routeGetAd)
m.Get("/ads/:id", routeGetAdWithId)
m.Get("/ads/:id/asset", routeGetAdAsset)
m.Post("/ads/:id/count", routeGetAdCount)
m.Get("/ads/:id/redirect", routeGetAdRedirect)
})
m.Group("/me", func(r martini.Router) {
m.Get("/report", routeGetReport)
m.Get("/final_report", routeGetFinalReport)
})
m.Post("/initialize", routePostInitialize)
m.Post("/initialize_slave", routePostInitializeSlave)
m.Group(FSPathPrefix, func(r martini.Router) {
m.Post("/(?P<path>[a-zA-Z0-9._/-]+)", routePostFs)
m.Delete("/(?P<path>[a-zA-Z0-9._/-]+)", routeDeleteFs)
m.Get("/(?P<path>[a-zA-Z0-9._/-]+)", routeGetFs)
})
sigchan := make(chan os.Signal)
signal.Notify(sigchan, syscall.SIGTERM)
signal.Notify(sigchan, syscall.SIGINT)
var l net.Listener
var err error
sock := "/tmp/server.sock"
if *port == 0 {
ferr := os.Remove(sock)
if ferr != nil {
if !os.IsNotExist(ferr) {
panic(ferr.Error())
}
}
l, err = net.Listen("unix", sock)
cerr := os.Chmod(sock, 0666)
if cerr != nil {
panic(cerr.Error())
}
} else {
l, err = net.ListenTCP("tcp", &net.TCPAddr{Port: int(*port)})
}
if err != nil {
panic(err.Error())
}
go func() {
// func Serve(l net.Listener, handler Handler) error
log.Println(http.Serve(l, m))
}()
<-sigchan
}
| {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
http.ServeFile(w, r, path)
return
} | identifier_body |
app.go | package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"io/ioutil"
"github.com/go-martini/martini"
"github.com/martini-contrib/render"
redis "gopkg.in/redis.v3"
)
type Ad struct {
Slot string `json:"slot"`
Id string `json:"id"`
Title string `json:"title"`
Type string `json:"type"`
Advertiser string `json:"advertiser"`
Destination string `json:"destination"`
Impressions int `json:"impressions"`
}
type AdWithEndpoints struct {
Ad
Asset string `json:"asset"`
Redirect string `json:"redirect"`
Counter string `json:"counter"`
}
type ClickLog struct {
AdId string `json:"ad_id"`
User string `json:"user"`
Agent string `json:"agent"`
Gender string `json:"gender"`
Age int `json:"age"`
}
type Report struct {
Ad *Ad `json:"ad"`
Clicks int `json:"clicks"`
Impressions int `json:"impressions"`
Breakdown *BreakdownReport `json:"breakdown,omitempty"`
}
type BreakdownReport struct {
Gender map[string]int `json:"gender"`
Agents map[string]int `json:"agents"`
Generations map[string]int `json:"generations"`
}
var rd *redis.Client
var port = flag.Uint("port", 0, "port to listen")
var isMaster = flag.Bool("master", false, "is master?")
var globalIP []string = []string{"130.211.255.138", "104.155.201.2", "130.211.251.102"}
var internalIP []string = []string{"10.240.0.2", "10.240.0.3", "10.240.0.4"}
func init() {
rd = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
DB: 0,
})
flag.Parse()
}
func getDir(name string) string {
base_dir := "/tmp/go/"
path := base_dir + name
os.MkdirAll(path, 0755)
return path
}
func urlFor(req *http.Request, path string) string {
host := req.Host
if host != "" {
return "http://" + host + path
} else {
return path
}
}
func urlFor2(path string, id string) string {
i, _ := strconv.Atoi(id)
host := internalIP[i%3]
if host != "" {
return "http://" + host + path
} else {
return path
}
}
func fetch(hash map[string]string, key string, defaultValue string) string {
if hash[key] == "" {
return defaultValue
} else {
return hash[key]
}
}
func incr_map(dict *map[string]int, key string) {
_, exists := (*dict)[key]
if !exists {
(*dict)[key] = 0
}
(*dict)[key]++
}
func advertiserId(req *http.Request) string {
return req.Header.Get("X-Advertiser-Id")
}
func adKey(slot string, id string) string {
return "isu4:ad:" + slot + "-" + id
}
func assetKey(slot string, id string) string {
return "isu4:asset:" + slot + "-" + id
}
const assetBaseDir = "/var/tmp/isu4"
func initAssetBaseDir() {
err := os.RemoveAll(assetBaseDir)
if err != nil {
panic(err)
}
err = os.MkdirAll(assetBaseDir, os.ModePerm)
if err != nil {
panic(err)
}
}
func assetFile(slot string, id string) string {
return assetBaseDir + "/slots/" + slot + "/ads/" + id + "/asset"
}
func advertiserKey(id string) string {
return "isu4:advertiser:" + id
}
func slotKey(slot string) string {
return "isu4:slot:" + slot
}
func nextAdId() string {
id, _ := rd.Incr("isu4:ad-next").Result()
return strconv.FormatInt(id, 10)
}
func nextAd(req *http.Request, slot string) *AdWithEndpoints {
key := slotKey(slot)
id, _ := rd.RPopLPush(key, key).Result()
if id == "" {
return nil
}
ad := getAd(req, slot, id)
if ad != nil {
return ad
} else {
rd.LRem(key, 0, id).Result()
return nextAd(req, slot)
}
}
func getAd(req *http.Request, slot string, id string) *AdWithEndpoints {
key := adKey(slot, id)
m, _ := rd.HGetAllMap(key).Result()
if m == nil {
return nil
}
if _, exists := m["id"]; !exists {
return nil
}
imp, _ := strconv.Atoi(m["impressions"])
path_base := "/slots/" + slot + "/ads/" + id
var ad *AdWithEndpoints
ad = &AdWithEndpoints{
Ad{
m["slot"],
m["id"],
m["title"],
m["type"],
m["advertiser"],
m["destination"],
imp,
},
urlFor2(path_base+"/asset", id),
urlFor(req, path_base+"/redirect"),
urlFor(req, path_base+"/count"),
}
return ad
}
func decodeUserKey(id string) (string, int) {
if id == "" {
return "unknown", -1
}
splitted := strings.Split(id, "/")
gender := "male"
if splitted[0] == "0" {
gender = "female"
}
age, _ := strconv.Atoi(splitted[1])
return gender, age
}
func getLogPath(advrId string) string {
dir := getDir("log")
splitted := strings.Split(advrId, "/")
return dir + "/" + splitted[len(splitted)-1]
}
func getLog(id string) map[string][]ClickLog {
path := getLogPath(id)
result := map[string][]ClickLog{}
if _, err := os.Stat(path); os.IsNotExist(err) {
return result
}
f, err := os.Open(path)
if err != nil {
panic(err)
}
defer f.Close()
err = syscall.Flock(int(f.Fd()), syscall.LOCK_SH)
if err != nil {
panic(err)
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
line = strings.TrimRight(line, "\n")
sp := strings.Split(line, "\t")
ad_id := sp[0]
user := sp[1]
agent := sp[2]
if agent == "" {
agent = "unknown"
}
gender, age := decodeUserKey(sp[1])
if result[ad_id] == nil {
result[ad_id] = []ClickLog{}
}
data := ClickLog{ad_id, user, agent, gender, age}
result[ad_id] = append(result[ad_id], data)
}
return result
}
func routePostAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
advrId := advertiserId(req)
if advrId == "" {
r.Status(404)
return
}
req.ParseMultipartForm(100000)
asset := req.MultipartForm.File["asset"][0]
id := nextAdId()
key := adKey(slot, id)
content_type := ""
if len(req.Form["type"]) > 0 {
content_type = req.Form["type"][0]
}
if content_type == "" && len(asset.Header["Content-Type"]) > 0 {
content_type = asset.Header["Content-Type"][0]
}
if content_type == "" {
content_type = "video/mp4"
}
title := ""
if a := req.Form["title"]; a != nil {
title = a[0]
}
destination := ""
if a := req.Form["destination"]; a != nil {
destination = a[0]
}
rd.HMSet(key,
"slot", slot,
"id", id,
"title", title,
"type", content_type,
"advertiser", advrId,
"destination", destination,
"impressions", "0",
)
f, _ := asset.Open()
defer f.Close()
buf := bytes.NewBuffer(nil)
io.Copy(buf, f)
fname := assetFile(slot, id)
err := os.MkdirAll(filepath.Dir(fname), os.ModePerm)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(fname, buf.Bytes(), os.ModePerm)
if err != nil {
panic(err)
}
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/fs"+assetFile(slot, id), content_type, bytes.NewReader(buf.Bytes()))
if err != nil {
panic(err)
}
}
}
rd.RPush(slotKey(slot), id)
rd.SAdd(advertiserKey(advrId), key)
r.JSON(200, getAd(req, slot, id))
}
func routeGetAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
ad := nextAd(req, slot)
if ad != nil {
r.Redirect("/slots/" + slot + "/ads/" + ad.Id)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdWithId(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad != nil {
r.JSON(200, ad)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdAsset(r render.Render, res http.ResponseWriter, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
content_type := "application/octet-stream"
if ad.Type != "" {
content_type = ad.Type
}
res.Header().Set("Content-Type", content_type)
data, err := ioutil.ReadFile(assetFile(slot, id))
if err != nil {
panic(err)
}
range_str := req.Header.Get("Range")
if range_str == "" {
r.Data(200, []byte(data))
return
}
re := regexp.MustCompile("^bytes=(\\d*)-(\\d*)$")
m := re.FindAllStringSubmatch(range_str, -1)
if m == nil {
r.Status(416)
return
}
head_str := m[0][1]
tail_str := m[0][2]
if head_str == "" && tail_str == "" {
r.Status(416)
return
}
head := 0
tail := 0
if head_str != "" {
head, _ = strconv.Atoi(head_str)
}
if tail_str != "" {
tail, _ = strconv.Atoi(tail_str)
} else {
tail = len(data) - 1
}
if head < 0 || head >= len(data) || tail < 0 {
r.Status(416)
return
}
range_data := data[head:(tail + 1)]
content_range := fmt.Sprintf("bytes %d-%d/%d", head, tail, len(data))
res.Header().Set("Content-Range", content_range)
res.Header().Set("Content-Length", strconv.Itoa(len(range_data)))
r.Data(206, []byte(range_data))
}
func routeGetAdCount(r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
key := adKey(slot, id)
exists, _ := rd.Exists(key).Result()
if !exists {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
rd.HIncrBy(key, "impressions", 1).Result()
r.Status(204)
}
func routeGetAdRedirect(req *http.Request, r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
isuad := ""
cookie, err := req.Cookie("isuad")
if err != nil {
if err != http.ErrNoCookie {
panic(err)
}
} else {
isuad = cookie.Value
}
ua := req.Header.Get("User-Agent")
path := getLogPath(ad.Advertiser)
var f *os.File
f, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
panic(err)
}
err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
if err != nil {
panic(err)
}
fmt.Fprintf(f, "%s\t%s\t%s\n", ad.Id, isuad, ua)
f.Close()
r.Redirect(ad.Destination)
}
func routeGetReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
report := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
report[ad["id"]] = data
}
for adId, clicks := range getLog(advrId) {
if _, exists := report[adId]; !exists {
report[adId] = &Report{}
}
report[adId].Clicks = len(clicks)
}
r.JSON(200, report)
}
func routeGetFinalReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
reports := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
reports[ad["id"]] = data
}
logs := getLog(advrId)
for adId, report := range reports {
log, exists := logs[adId]
if exists {
report.Clicks = len(log)
}
breakdown := &BreakdownReport{
map[string]int{},
map[string]int{},
map[string]int{},
}
for i := range log {
click := log[i]
incr_map(&breakdown.Gender, click.Gender)
incr_map(&breakdown.Agents, click.Agent)
generation := "unknown"
if click.Age != -1 {
generation = strconv.Itoa(click.Age / 10)
}
incr_map(&breakdown.Generations, generation)
}
report.Breakdown = breakdown
reports[adId] = report
}
r.JSON(200, reports)
}
func routePostInitialize() (int, string) {
keys, _ := rd.Keys("isu4:*").Result()
for i := range keys {
key := keys[i]
rd.Del(key)
}
path := getDir("log")
os.RemoveAll(path)
initAssetBaseDir()
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/initialize_slave", "", nil)
if err != nil {
panic(err)
}
}
}
return 200, "OK"
}
func routePostInitializeSlave() (int, string) {
initAssetBaseDir()
return 200, "OK"
} |
// curl -XPOST --data-binary "hoge" -v http://127.0.0.1:8080/fs/foo
func routePostFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
dir := filepath.Dir(path)
log.Println(dir)
err := os.MkdirAll(dir, FSDirPermission)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
file, err := os.Create(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer file.Close()
written, err := io.Copy(file, r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println(written)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeDeleteFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
info, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
http.Error(w, "", http.StatusNotFound)
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if info.IsDir() {
err = os.RemoveAll(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
err = os.Remove(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
w.WriteHeader(http.StatusOK)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeGetFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
http.ServeFile(w, r, path)
return
}
func main() {
m := martini.Classic()
m.Use(martini.Static("../public"))
m.Use(render.Renderer(render.Options{
Layout: "layout",
}))
m.Group("/slots/:slot", func(r martini.Router) {
m.Post("/ads", routePostAd)
m.Get("/ad", routeGetAd)
m.Get("/ads/:id", routeGetAdWithId)
m.Get("/ads/:id/asset", routeGetAdAsset)
m.Post("/ads/:id/count", routeGetAdCount)
m.Get("/ads/:id/redirect", routeGetAdRedirect)
})
m.Group("/me", func(r martini.Router) {
m.Get("/report", routeGetReport)
m.Get("/final_report", routeGetFinalReport)
})
m.Post("/initialize", routePostInitialize)
m.Post("/initialize_slave", routePostInitializeSlave)
m.Group(FSPathPrefix, func(r martini.Router) {
m.Post("/(?P<path>[a-zA-Z0-9._/-]+)", routePostFs)
m.Delete("/(?P<path>[a-zA-Z0-9._/-]+)", routeDeleteFs)
m.Get("/(?P<path>[a-zA-Z0-9._/-]+)", routeGetFs)
})
sigchan := make(chan os.Signal)
signal.Notify(sigchan, syscall.SIGTERM)
signal.Notify(sigchan, syscall.SIGINT)
var l net.Listener
var err error
sock := "/tmp/server.sock"
if *port == 0 {
ferr := os.Remove(sock)
if ferr != nil {
if !os.IsNotExist(ferr) {
panic(ferr.Error())
}
}
l, err = net.Listen("unix", sock)
cerr := os.Chmod(sock, 0666)
if cerr != nil {
panic(cerr.Error())
}
} else {
l, err = net.ListenTCP("tcp", &net.TCPAddr{Port: int(*port)})
}
if err != nil {
panic(err.Error())
}
go func() {
// func Serve(l net.Listener, handler Handler) error
log.Println(http.Serve(l, m))
}()
<-sigchan
} |
var FSPathPrefix = "/fs"
var FSRoot = "/"
var FSDirPermission os.FileMode = 0777 | random_line_split |
app.go | package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"io/ioutil"
"github.com/go-martini/martini"
"github.com/martini-contrib/render"
redis "gopkg.in/redis.v3"
)
type Ad struct {
Slot string `json:"slot"`
Id string `json:"id"`
Title string `json:"title"`
Type string `json:"type"`
Advertiser string `json:"advertiser"`
Destination string `json:"destination"`
Impressions int `json:"impressions"`
}
type AdWithEndpoints struct {
Ad
Asset string `json:"asset"`
Redirect string `json:"redirect"`
Counter string `json:"counter"`
}
type ClickLog struct {
AdId string `json:"ad_id"`
User string `json:"user"`
Agent string `json:"agent"`
Gender string `json:"gender"`
Age int `json:"age"`
}
type Report struct {
Ad *Ad `json:"ad"`
Clicks int `json:"clicks"`
Impressions int `json:"impressions"`
Breakdown *BreakdownReport `json:"breakdown,omitempty"`
}
type BreakdownReport struct {
Gender map[string]int `json:"gender"`
Agents map[string]int `json:"agents"`
Generations map[string]int `json:"generations"`
}
var rd *redis.Client
var port = flag.Uint("port", 0, "port to listen")
var isMaster = flag.Bool("master", false, "is master?")
var globalIP []string = []string{"130.211.255.138", "104.155.201.2", "130.211.251.102"}
var internalIP []string = []string{"10.240.0.2", "10.240.0.3", "10.240.0.4"}
func init() {
rd = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
DB: 0,
})
flag.Parse()
}
func getDir(name string) string {
base_dir := "/tmp/go/"
path := base_dir + name
os.MkdirAll(path, 0755)
return path
}
func urlFor(req *http.Request, path string) string {
host := req.Host
if host != "" {
return "http://" + host + path
} else {
return path
}
}
func | (path string, id string) string {
i, _ := strconv.Atoi(id)
host := internalIP[i%3]
if host != "" {
return "http://" + host + path
} else {
return path
}
}
func fetch(hash map[string]string, key string, defaultValue string) string {
if hash[key] == "" {
return defaultValue
} else {
return hash[key]
}
}
func incr_map(dict *map[string]int, key string) {
_, exists := (*dict)[key]
if !exists {
(*dict)[key] = 0
}
(*dict)[key]++
}
func advertiserId(req *http.Request) string {
return req.Header.Get("X-Advertiser-Id")
}
func adKey(slot string, id string) string {
return "isu4:ad:" + slot + "-" + id
}
func assetKey(slot string, id string) string {
return "isu4:asset:" + slot + "-" + id
}
const assetBaseDir = "/var/tmp/isu4"
func initAssetBaseDir() {
err := os.RemoveAll(assetBaseDir)
if err != nil {
panic(err)
}
err = os.MkdirAll(assetBaseDir, os.ModePerm)
if err != nil {
panic(err)
}
}
func assetFile(slot string, id string) string {
return assetBaseDir + "/slots/" + slot + "/ads/" + id + "/asset"
}
func advertiserKey(id string) string {
return "isu4:advertiser:" + id
}
func slotKey(slot string) string {
return "isu4:slot:" + slot
}
func nextAdId() string {
id, _ := rd.Incr("isu4:ad-next").Result()
return strconv.FormatInt(id, 10)
}
func nextAd(req *http.Request, slot string) *AdWithEndpoints {
key := slotKey(slot)
id, _ := rd.RPopLPush(key, key).Result()
if id == "" {
return nil
}
ad := getAd(req, slot, id)
if ad != nil {
return ad
} else {
rd.LRem(key, 0, id).Result()
return nextAd(req, slot)
}
}
func getAd(req *http.Request, slot string, id string) *AdWithEndpoints {
key := adKey(slot, id)
m, _ := rd.HGetAllMap(key).Result()
if m == nil {
return nil
}
if _, exists := m["id"]; !exists {
return nil
}
imp, _ := strconv.Atoi(m["impressions"])
path_base := "/slots/" + slot + "/ads/" + id
var ad *AdWithEndpoints
ad = &AdWithEndpoints{
Ad{
m["slot"],
m["id"],
m["title"],
m["type"],
m["advertiser"],
m["destination"],
imp,
},
urlFor2(path_base+"/asset", id),
urlFor(req, path_base+"/redirect"),
urlFor(req, path_base+"/count"),
}
return ad
}
func decodeUserKey(id string) (string, int) {
if id == "" {
return "unknown", -1
}
splitted := strings.Split(id, "/")
gender := "male"
if splitted[0] == "0" {
gender = "female"
}
age, _ := strconv.Atoi(splitted[1])
return gender, age
}
func getLogPath(advrId string) string {
dir := getDir("log")
splitted := strings.Split(advrId, "/")
return dir + "/" + splitted[len(splitted)-1]
}
func getLog(id string) map[string][]ClickLog {
path := getLogPath(id)
result := map[string][]ClickLog{}
if _, err := os.Stat(path); os.IsNotExist(err) {
return result
}
f, err := os.Open(path)
if err != nil {
panic(err)
}
defer f.Close()
err = syscall.Flock(int(f.Fd()), syscall.LOCK_SH)
if err != nil {
panic(err)
}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
line = strings.TrimRight(line, "\n")
sp := strings.Split(line, "\t")
ad_id := sp[0]
user := sp[1]
agent := sp[2]
if agent == "" {
agent = "unknown"
}
gender, age := decodeUserKey(sp[1])
if result[ad_id] == nil {
result[ad_id] = []ClickLog{}
}
data := ClickLog{ad_id, user, agent, gender, age}
result[ad_id] = append(result[ad_id], data)
}
return result
}
func routePostAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
advrId := advertiserId(req)
if advrId == "" {
r.Status(404)
return
}
req.ParseMultipartForm(100000)
asset := req.MultipartForm.File["asset"][0]
id := nextAdId()
key := adKey(slot, id)
content_type := ""
if len(req.Form["type"]) > 0 {
content_type = req.Form["type"][0]
}
if content_type == "" && len(asset.Header["Content-Type"]) > 0 {
content_type = asset.Header["Content-Type"][0]
}
if content_type == "" {
content_type = "video/mp4"
}
title := ""
if a := req.Form["title"]; a != nil {
title = a[0]
}
destination := ""
if a := req.Form["destination"]; a != nil {
destination = a[0]
}
rd.HMSet(key,
"slot", slot,
"id", id,
"title", title,
"type", content_type,
"advertiser", advrId,
"destination", destination,
"impressions", "0",
)
f, _ := asset.Open()
defer f.Close()
buf := bytes.NewBuffer(nil)
io.Copy(buf, f)
fname := assetFile(slot, id)
err := os.MkdirAll(filepath.Dir(fname), os.ModePerm)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(fname, buf.Bytes(), os.ModePerm)
if err != nil {
panic(err)
}
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/fs"+assetFile(slot, id), content_type, bytes.NewReader(buf.Bytes()))
if err != nil {
panic(err)
}
}
}
rd.RPush(slotKey(slot), id)
rd.SAdd(advertiserKey(advrId), key)
r.JSON(200, getAd(req, slot, id))
}
func routeGetAd(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
ad := nextAd(req, slot)
if ad != nil {
r.Redirect("/slots/" + slot + "/ads/" + ad.Id)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdWithId(r render.Render, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad != nil {
r.JSON(200, ad)
} else {
r.JSON(404, map[string]string{"error": "not_found"})
}
}
func routeGetAdAsset(r render.Render, res http.ResponseWriter, req *http.Request, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
content_type := "application/octet-stream"
if ad.Type != "" {
content_type = ad.Type
}
res.Header().Set("Content-Type", content_type)
data, err := ioutil.ReadFile(assetFile(slot, id))
if err != nil {
panic(err)
}
range_str := req.Header.Get("Range")
if range_str == "" {
r.Data(200, []byte(data))
return
}
re := regexp.MustCompile("^bytes=(\\d*)-(\\d*)$")
m := re.FindAllStringSubmatch(range_str, -1)
if m == nil {
r.Status(416)
return
}
head_str := m[0][1]
tail_str := m[0][2]
if head_str == "" && tail_str == "" {
r.Status(416)
return
}
head := 0
tail := 0
if head_str != "" {
head, _ = strconv.Atoi(head_str)
}
if tail_str != "" {
tail, _ = strconv.Atoi(tail_str)
} else {
tail = len(data) - 1
}
if head < 0 || head >= len(data) || tail < 0 {
r.Status(416)
return
}
range_data := data[head:(tail + 1)]
content_range := fmt.Sprintf("bytes %d-%d/%d", head, tail, len(data))
res.Header().Set("Content-Range", content_range)
res.Header().Set("Content-Length", strconv.Itoa(len(range_data)))
r.Data(206, []byte(range_data))
}
func routeGetAdCount(r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
key := adKey(slot, id)
exists, _ := rd.Exists(key).Result()
if !exists {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
rd.HIncrBy(key, "impressions", 1).Result()
r.Status(204)
}
func routeGetAdRedirect(req *http.Request, r render.Render, params martini.Params) {
slot := params["slot"]
id := params["id"]
ad := getAd(req, slot, id)
if ad == nil {
r.JSON(404, map[string]string{"error": "not_found"})
return
}
isuad := ""
cookie, err := req.Cookie("isuad")
if err != nil {
if err != http.ErrNoCookie {
panic(err)
}
} else {
isuad = cookie.Value
}
ua := req.Header.Get("User-Agent")
path := getLogPath(ad.Advertiser)
var f *os.File
f, err = os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
panic(err)
}
err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
if err != nil {
panic(err)
}
fmt.Fprintf(f, "%s\t%s\t%s\n", ad.Id, isuad, ua)
f.Close()
r.Redirect(ad.Destination)
}
func routeGetReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
report := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
report[ad["id"]] = data
}
for adId, clicks := range getLog(advrId) {
if _, exists := report[adId]; !exists {
report[adId] = &Report{}
}
report[adId].Clicks = len(clicks)
}
r.JSON(200, report)
}
func routeGetFinalReport(req *http.Request, r render.Render) {
advrId := advertiserId(req)
if advrId == "" {
r.Status(401)
return
}
reports := map[string]*Report{}
adKeys, _ := rd.SMembers(advertiserKey(advrId)).Result()
for _, adKey := range adKeys {
ad, _ := rd.HGetAllMap(adKey).Result()
if ad == nil {
continue
}
imp, _ := strconv.Atoi(ad["impressions"])
data := &Report{
&Ad{
ad["slot"],
ad["id"],
ad["title"],
ad["type"],
ad["advertiser"],
ad["destination"],
imp,
},
0,
imp,
nil,
}
reports[ad["id"]] = data
}
logs := getLog(advrId)
for adId, report := range reports {
log, exists := logs[adId]
if exists {
report.Clicks = len(log)
}
breakdown := &BreakdownReport{
map[string]int{},
map[string]int{},
map[string]int{},
}
for i := range log {
click := log[i]
incr_map(&breakdown.Gender, click.Gender)
incr_map(&breakdown.Agents, click.Agent)
generation := "unknown"
if click.Age != -1 {
generation = strconv.Itoa(click.Age / 10)
}
incr_map(&breakdown.Generations, generation)
}
report.Breakdown = breakdown
reports[adId] = report
}
r.JSON(200, reports)
}
func routePostInitialize() (int, string) {
keys, _ := rd.Keys("isu4:*").Result()
for i := range keys {
key := keys[i]
rd.Del(key)
}
path := getDir("log")
os.RemoveAll(path)
initAssetBaseDir()
if *isMaster {
for i, ip := range internalIP {
if i == 0 {
continue
}
_, err := http.Post("http://"+ip+"/initialize_slave", "", nil)
if err != nil {
panic(err)
}
}
}
return 200, "OK"
}
func routePostInitializeSlave() (int, string) {
initAssetBaseDir()
return 200, "OK"
}
var FSPathPrefix = "/fs"
var FSRoot = "/"
var FSDirPermission os.FileMode = 0777
// curl -XPOST --data-binary "hoge" -v http://127.0.0.1:8080/fs/foo
func routePostFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
dir := filepath.Dir(path)
log.Println(dir)
err := os.MkdirAll(dir, FSDirPermission)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
file, err := os.Create(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer file.Close()
written, err := io.Copy(file, r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Println(written)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeDeleteFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
info, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
http.Error(w, "", http.StatusNotFound)
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if info.IsDir() {
err = os.RemoveAll(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
err = os.Remove(path)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
w.WriteHeader(http.StatusOK)
return
}
// curl -XDELETE -v http://127.0.0.1:8080/fs/foo
func routeGetFs(rdr render.Render, w http.ResponseWriter, r *http.Request, params martini.Params) {
path := FSRoot + strings.TrimPrefix(r.URL.Path, FSPathPrefix)
http.ServeFile(w, r, path)
return
}
func main() {
m := martini.Classic()
m.Use(martini.Static("../public"))
m.Use(render.Renderer(render.Options{
Layout: "layout",
}))
m.Group("/slots/:slot", func(r martini.Router) {
m.Post("/ads", routePostAd)
m.Get("/ad", routeGetAd)
m.Get("/ads/:id", routeGetAdWithId)
m.Get("/ads/:id/asset", routeGetAdAsset)
m.Post("/ads/:id/count", routeGetAdCount)
m.Get("/ads/:id/redirect", routeGetAdRedirect)
})
m.Group("/me", func(r martini.Router) {
m.Get("/report", routeGetReport)
m.Get("/final_report", routeGetFinalReport)
})
m.Post("/initialize", routePostInitialize)
m.Post("/initialize_slave", routePostInitializeSlave)
m.Group(FSPathPrefix, func(r martini.Router) {
m.Post("/(?P<path>[a-zA-Z0-9._/-]+)", routePostFs)
m.Delete("/(?P<path>[a-zA-Z0-9._/-]+)", routeDeleteFs)
m.Get("/(?P<path>[a-zA-Z0-9._/-]+)", routeGetFs)
})
sigchan := make(chan os.Signal)
signal.Notify(sigchan, syscall.SIGTERM)
signal.Notify(sigchan, syscall.SIGINT)
var l net.Listener
var err error
sock := "/tmp/server.sock"
if *port == 0 {
ferr := os.Remove(sock)
if ferr != nil {
if !os.IsNotExist(ferr) {
panic(ferr.Error())
}
}
l, err = net.Listen("unix", sock)
cerr := os.Chmod(sock, 0666)
if cerr != nil {
panic(cerr.Error())
}
} else {
l, err = net.ListenTCP("tcp", &net.TCPAddr{Port: int(*port)})
}
if err != nil {
panic(err.Error())
}
go func() {
// func Serve(l net.Listener, handler Handler) error
log.Println(http.Serve(l, m))
}()
<-sigchan
}
| urlFor2 | identifier_name |
tk.py | #!/usr/bin/env python3
import time
from tkinter import *
from PIL import ImageTk, Image
import csv
from subprocess import Popen
import RPi.GPIO as GPIO
from subprocess import call
from datetime import datetime
from crontab import CronTab
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(22, GPIO.OUT, initial=1) # Set pin 22 gpio to an output
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Pulldown the input resistor on pin 23
GPIO.setup(24, GPIO.OUT, initial=0) # Set gpio pin 24 to an output as to turn the pump on when needed
# Main window
root = Tk()
root_status_string = StringVar() # Live timer countdown
timer_input_value = IntVar() # Keypad textbox value 1
daily_timer_input_value = IntVar() # Keypad textbox value 2
timer_set_run_text = StringVar() # Text string showing output of timer.
timer_recurrence_string = 0 # How often the pump will run
timer_time_string = "" # What time the pump will run
timer_status = StringVar() # Timer info on set text
water_level = StringVar() # Current water level string
timer_error_string = StringVar() # Timer set error string
timer_status_1 = StringVar() # Timer data info on set text
# Convert data from input value to mins/seconds
minute, sec = divmod(int(daily_timer_input_value.get()), 60)
hour, minute = divmod(minute, 60)
# Image/CSV data
keyboard_image = "keypad.jpg"
timer_data = 'timer_data.csv'
plot_img = "temp.png"
screen_off = "perl /home/pi/wateringsys/screen-off.pl"
speed_image = "/home/pi/wateringsys/speed.png"
class NumPad:
def | (self):
# Setup number pad screen
self.number_pad = Toplevel(root)
self.keypad_entery = Entry(self.number_pad,width=5,font=("Helvetica", 55))
self.keypad_entery.grid(row=0, column=0, columnspan=3, ipady=5)
self.number_pad.attributes('-fullscreen',True)
# Variables of keys to loop though
self.keys = [
['1', '2', '3'],
['4', '5', '6'],
['7', '8', '9'],
['Clear', '0', 'Exit'], ]
# Loop threw the keys and create the button with lambda command
for self.y, self.row in enumerate(self.keys, 1):
for self.x, self.key in enumerate(self.row):
self.b = Button(self.number_pad, text=self.key, command=lambda val=self.key:__numb_enter(val))
self.b.grid(row=self.y, column=self.x, ipadx=108, ipady=30)
self.exit = Button(
self.number_pad,
text="Exit",
command=self.number_pad.destroy).grid(
row=self.y, column=self.x, ipadx=100, ipady=30)
# Set the exit button at the end of the loop
def __numb_enter(arg):
# All globals required for updating the timer daily_timer_input_value
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
global timer_error_string
self.pin = ''
# Set the pin var to empty
if arg == 'Clear':
# remove last number from `pin`
self.pin = self.pin[:-1]
self.keypad_entery.delete('0', 'end')
self.keypad_entery.insert('end', self.pin)
elif arg == 'Exit':
self.number_pad.destroy
# Exit the keypad window
else:
# add number to pin
self.pin += arg
# add number to `entry`
self.keypad_entery.insert('end', arg)
self.pad_val = self.keypad_entery.get()
daily_timer_input_value.set(self.pad_val)
timer_input_value.set(self.pad_val)
# Set calculate the minuets and seconds for the label
minute, sec = divmod(int(self.pad_val), 60)
hours, minute = divmod(minute, 60)
# Set the label to update the current seconds/minutes
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
class Timers(object):
def __init__(self):
global timer_set_run_text
global daily_timer_input_value
global timer_status
global timer_error_string
global keyboard_img
self.timer_set_page = Toplevel(root)
# Setup the window for the timer selections
# Strings for all of the buttons
self.timer_run_text = Label(
self.timer_set_page,
text="Please choose a time of day to run the pump.",
font=('Helvetica', 20)).grid(row= 1,columnspan=8)
self.hours_in_day = [["1AM", "2AM", "3AM", "4AM", "5AM", "6AM", "7AM","8AM"],
["9AM", "10AM", "11AM", "12PM", "1PM", "2PM","3PM","4PM"],
["5PM", "6PM", "7PM", "8PM", "9PM", "10PM","11PM","12AM"]]
self.timer_entery = Entry(
self.timer_set_page,
textvariable=daily_timer_input_value,
width=23).grid(row=9, columnspan=3, column=0)
# Entery box for run time
daily_timer_input_value.set("") # Set the eatery to blank
self.keyboard_button = Button(self.timer_set_page,command=NumPad) # Button Image to open number pad
self.keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image)) #
self.keyboard_button.config(image=self.keyboard_img) #
self.keyboard_button.image = self.keyboard_img # Keep an instance of the image so
self.keyboard_button.grid(row=9, sticky=E, columnspan=2, column=1) # that it doesnt get garbage collected
self.exit = Button(
self.timer_set_page,
text="Exit",
command=self.timer_set_page.destroy).grid(row=9, columnspan=4,column=6, ipadx=50, ipady=15)
# Exit button back to main screen
self.set_timer = Button(
self.timer_set_page,
text="Set Timer",
command=self.__set_timer_cron,
bg="green").grid(row=9, columnspan=4, column=3, ipadx=50, ipady=15)
# Set the timer outputs the data to CVS
self.timer_run_text = Label(
self.timer_set_page,
textvariable=timer_set_run_text,
font=('Helvetica', 14)).grid(row=10, columnspan=8)
# Set the text variable for timer run label
Timers.timer_run_failed = Label(
self.timer_set_page,
textvariable=timer_status,
font=('Helvetica', 14), foreground='red')
Timers.timer_run_failed.grid(row=11, columnspan=8)
# Set the text variable for a failed CSV
timer_status.set("")
Timers.err_label = Label(
self.timer_set_page,
textvariable=timer_error_string,
font=('Helvetica', 14), foreground='red')
Timers.err_label.grid(row=12, columnspan=8)
# Set the text variable for a failed CSV
timer_error_string.set("")
self.timer_length_text = Label(
self.timer_set_page,
text="Please choose how long to run the timer for in seconds.",
font=('Helvetica', 20)).grid(row=7, columnspan=8)
self.z = 0
# Loop threw the hours in the day z will provide the hour of the day to return in lambda to timer_return function
# which manipulates the string and outputs to the label
for self.y, self.row in enumerate(self.hours_in_day, 1):
for self.x, self.key in enumerate(self.row):
self.z += 1
if self.z == 24:
self.z = 0
self.b = Button(self.timer_set_page, text=self.key, command=lambda val=self.z:self.__timer_return(val))
self.b.grid(row=self.y + 1, column=self.x, ipadx=20, ipady=10)
self.timer_set_page.attributes('-fullscreen', True)
# Strings for all recurrence rate
self.recurrence = ["1 Day", "2 Day", "3 Day", "4 Day", "5 Day", "6 Day","7 Day"]
self.timer_reoc_text = Label(
self.timer_set_page, text="Please choose how often you would like to run the timer.",
font=('Helvetica', 20)).grid(row=5, columnspan=8)
self.r = 0
self.col = 0
# Loop threw the recurrence options r will provide the amount
# of days between running and return in lambda to recurrence_return function
# which manipulates the string and outputs to the label
for self.d in self.recurrence:
self.r += 1
self.c = Button(self.timer_set_page, text=self.d, command=lambda val=self.r:self.__recurrence_return(val))
self.c.grid(row=6, column=self.col, ipadx=12, ipady=12)
self.col += 1
def __recurrence_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the recurrence rate, and set the new label string
timer_recurrence_string = str(arg)
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __timer_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the time of day, and set the new label string
self.pump_run_time = str(arg)
timer_time_string = str(str(arg) + ":00")
if len(timer_time_string) <= 4:
timer_time_string = "0" + timer_time_string
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __set_timer_cron(self):
global timer_status
global timer_status_1
# Remove all existing timer cron jobs.
try:
run_time = self.pump_run_time
repeat = int(timer_recurrence_string)
run_length = int(daily_timer_input_value.get())
cron = CronTab(user=True)
cron.remove_all(comment='water_timer')
cron.write()
# Insert new cron job timer.
cron = CronTab(user=True)
job = cron.new(
command='sudo python3 /home/pi/wateringsys/crontimer.py {}'.format(run_length),
comment='water_timer')
if repeat == 1:
job.hour.on(run_time)
job.minute.on(0)
if repeat >= 2:
job.setall(0, run_time, '*/{}'.format(repeat), None, None)
cron.write()
daily_timer_input_value.set("")
timer_input_value.set("")
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def __set_timer_csv(self):
global timer_status
global timer_status_1
try:
run_time = self.pump_run_time
repeat = str(timer_recurrence_string)
run_length = str(daily_timer_input_value.get())
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
# Set both enterys back to empty
daily_timer_input_value.set("")
timer_input_value.set("")
call(["sudo", "systemctl", "restart", "pumptimer.service"])
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def timer(): # Simple timer class,
try: # If any errors usually due to no input pass
run_time = timer_input_value.get()
root_status_string.set(str("Pump Running"))
timer_input_value.set("")
if GPIO.input(23) == 1:
GPIO.output(24, 1)
for i in range(1, run_time + 1, +1):
m, s = divmod(i, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
time.sleep(1)
GPIO.output(24, 0)
root_status_string.set(str("The pump run has finished"))
except:
GPIO.output(24, 0) # Turn the pump off.
print("failed")
pass
manual_timer = 0
def man_start(force=True):
global running
global manual_timer
try:
if force:
running = True
if running:
if GPIO.input(23) == 1:
root_status_string.set(str("Pump Running"))
GPIO.output(24, 1)
manual_timer += 1
m, s = divmod(manual_timer, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
root.after(1000, man_start, False)
if GPIO.input(23) == 0:
root_status_string.set(str("The pump will not run when the water level is low."))
except:
GPIO.output(24, 0) # Stop the pump.
def man_stop():
global running
global manual_timer
GPIO.output(24, 0)
running = False
manual_timer = 0
root_status_string.set(str("The Pump has been manually stopped."))
def img_updater(): # Auto image updater for home screen.
# Open image
try:
global counter
timer_set_time, time_until_run = csv_read()
if GPIO.input(23) == 0:
water_level_label.config(fg="Red")
water_level.set(str("The water level is LOW."))
if GPIO.input(23) == 1:
water_level_label.config(fg="Green")
water_level.set(str("The water level is OK."))
# Every 10 seconds change the timer_status_1 string which is the label on the front page.
counter += 1
if counter >= 1:
timer_status_1.set(str(timer_set_time))
plant_stat_img = ImageTk.PhotoImage(Image.open(plot_img))
plant_stat_panel.config(image=plant_stat_img)
plant_stat_panel.image = plant_stat_img
if counter >= 11:
timer_status_1.set(str(time_until_run))
speed_img = ImageTk.PhotoImage(Image.open(speed_image)) # /home/pi/html/
plant_stat_panel.config(image=speed_img)
plant_stat_panel.image = speed_img
if counter >= 21:
counter = 0
# Re load page every 10 seconds
root.after(1000, img_updater)
except:
timer_status_1.set(str('Please enter a timer, there is currently no timer set.'))
root.after(1000, img_updater)
pass
def back_light():
# Start the perl script which turns off the screen back light when the screensaver is active.
# The perl script calls back light.py which turns the back light on and off.
proc = Popen(
[screen_off], shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
def csv_read():
# Consider changing the times of day to a dict to use AM PM times inline with the loop.
try:
with open(timer_data) as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
days = int(row[1])
runtime = int(row[2])
time_of_day = int(row[0])
csvfile.close()
# Due to using plain numbers in the number pad loop must convert it to something people can read.
# Following is to read the set timer and make a label out of it.
if int(int(row[0])) <= 9:
run_hour = "0{}:00".format(str(int(row[0])))
if int(int(row[0])) >= 10:
run_hour = "{}:00".format(str(int(row[0])))
days = int(row[1])
m, s = divmod(int(row[2]), 60)
h, m = divmod(m, 60)
run_time = (str("{} Minutes and {} Seconds".format(m, s)))
current_runtime = "The timer is set to run for {} every {} day(s) at {}".format(run_time, days, run_hour)
# the following is to read the set timer and print out how much time is left on the timer.
now = datetime.now()
seconds_since_last_run = (now - now.replace(hour=time_of_day, minute=0, second=0, microsecond=0)).total_seconds()
if days == 1:
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if countdown <= 1:
total_seconds = days * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if days > 1:
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
m, s = divmod(countdown, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
times = (
"There is {} day(s) {} hour(s) {} minute(s) and {} seconds remaining on the timer.".format(d, h, m, s))
# Return the strings.
return current_runtime, times
except IndexError:
timer_status_1.set(str("Please set a pump timer."))
# Buttons
pump_manual_start_button = Button(
root, text="Start Pump Manual",
command=man_start,
height=3,
width=15,
bg="green").grid(row=0, sticky=E, column=1, rowspan=2)
pump_manual_stop_button = Button(
root, text="Stop Pump Manual",
command=man_stop,
height=3,
width=15,
bg="red").grid(row=0, sticky=E, column=2, rowspan=2)
keyboard_button = Button(root, command=NumPad)
keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image))
keyboard_button.config(image=keyboard_img)
keyboard_button.image = keyboard_img
keyboard_button.grid(row=2, sticky=E)
timer_button = Button(root, text="Set Timer", command=Timers, height=3, width = 15).grid(row=5, column=2)
pump_timer_start_button = Button(root, text="Start Pump Timer", command=timer).grid(row =2, sticky=W)
# Labels
hedding_label = Label(root, text="Newcastle Pad Watering System", font=("Helvetica", 16)).grid(row=0)
timer_entery_text = Label(root, text="Please enter how many seconds you wish to run the pump for.").grid(row=1)
timer_string_label = Label(root, textvariable=root_status_string).grid(row = 3,sticky=N)
plant_stat_panel = Label(root)
plant_stat_panel.grid(row=5, column=0, columnspan=2, sticky=W)
current_timer_label = Label(root, textvariable=timer_status_1).grid(row = 4,columnspan =2, sticky=S)
# If you want to dynamicaly alter the state of the lable later on you need to asign the grid seperatly
# otherwise it just returns a None type when trying to change it.
water_level_label = Label(root, textvariable=water_level, font=("Times New Roman", 16))
water_level_label.grid(row=2, column=1, columnspan=2, sticky=E)
# Enterys
timer_entery = Entry(root, textvariable=timer_input_value, width=15).grid(row = 2)
timer_input_value.set("")
counter = 0
# Main Loop
root.attributes('-fullscreen', True) # Run Fullscreen.
root.after(0, img_updater) # Run "img_updater" function on startup.
root.after(0, back_light)
root.mainloop() # Run the main page.
| __init__ | identifier_name |
tk.py | #!/usr/bin/env python3
import time
from tkinter import *
from PIL import ImageTk, Image
import csv
from subprocess import Popen
import RPi.GPIO as GPIO
from subprocess import call
from datetime import datetime
from crontab import CronTab
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(22, GPIO.OUT, initial=1) # Set pin 22 gpio to an output
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Pulldown the input resistor on pin 23
GPIO.setup(24, GPIO.OUT, initial=0) # Set gpio pin 24 to an output as to turn the pump on when needed
# Main window
root = Tk()
root_status_string = StringVar() # Live timer countdown
timer_input_value = IntVar() # Keypad textbox value 1
daily_timer_input_value = IntVar() # Keypad textbox value 2
timer_set_run_text = StringVar() # Text string showing output of timer.
timer_recurrence_string = 0 # How often the pump will run
timer_time_string = "" # What time the pump will run
timer_status = StringVar() # Timer info on set text
water_level = StringVar() # Current water level string
timer_error_string = StringVar() # Timer set error string
timer_status_1 = StringVar() # Timer data info on set text
# Convert data from input value to mins/seconds
minute, sec = divmod(int(daily_timer_input_value.get()), 60)
hour, minute = divmod(minute, 60)
# Image/CSV data
keyboard_image = "keypad.jpg"
timer_data = 'timer_data.csv'
plot_img = "temp.png"
screen_off = "perl /home/pi/wateringsys/screen-off.pl"
speed_image = "/home/pi/wateringsys/speed.png"
class NumPad:
def __init__(self):
# Setup number pad screen
self.number_pad = Toplevel(root)
self.keypad_entery = Entry(self.number_pad,width=5,font=("Helvetica", 55))
self.keypad_entery.grid(row=0, column=0, columnspan=3, ipady=5)
self.number_pad.attributes('-fullscreen',True)
# Variables of keys to loop though
self.keys = [
['1', '2', '3'],
['4', '5', '6'],
['7', '8', '9'],
['Clear', '0', 'Exit'], ]
# Loop threw the keys and create the button with lambda command
for self.y, self.row in enumerate(self.keys, 1):
for self.x, self.key in enumerate(self.row):
self.b = Button(self.number_pad, text=self.key, command=lambda val=self.key:__numb_enter(val))
self.b.grid(row=self.y, column=self.x, ipadx=108, ipady=30)
self.exit = Button(
self.number_pad,
text="Exit",
command=self.number_pad.destroy).grid(
row=self.y, column=self.x, ipadx=100, ipady=30)
# Set the exit button at the end of the loop
def __numb_enter(arg):
# All globals required for updating the timer daily_timer_input_value
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
global timer_error_string
self.pin = ''
# Set the pin var to empty
if arg == 'Clear':
# remove last number from `pin`
self.pin = self.pin[:-1]
self.keypad_entery.delete('0', 'end')
self.keypad_entery.insert('end', self.pin)
elif arg == 'Exit':
self.number_pad.destroy
# Exit the keypad window
else:
# add number to pin
self.pin += arg
# add number to `entry`
self.keypad_entery.insert('end', arg)
self.pad_val = self.keypad_entery.get()
daily_timer_input_value.set(self.pad_val)
timer_input_value.set(self.pad_val)
# Set calculate the minuets and seconds for the label
minute, sec = divmod(int(self.pad_val), 60)
hours, minute = divmod(minute, 60)
# Set the label to update the current seconds/minutes
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
class Timers(object):
def __init__(self):
global timer_set_run_text
global daily_timer_input_value
global timer_status
global timer_error_string
global keyboard_img
self.timer_set_page = Toplevel(root)
# Setup the window for the timer selections
# Strings for all of the buttons
self.timer_run_text = Label(
self.timer_set_page,
text="Please choose a time of day to run the pump.",
font=('Helvetica', 20)).grid(row= 1,columnspan=8)
self.hours_in_day = [["1AM", "2AM", "3AM", "4AM", "5AM", "6AM", "7AM","8AM"],
["9AM", "10AM", "11AM", "12PM", "1PM", "2PM","3PM","4PM"],
["5PM", "6PM", "7PM", "8PM", "9PM", "10PM","11PM","12AM"]]
self.timer_entery = Entry(
self.timer_set_page,
textvariable=daily_timer_input_value,
width=23).grid(row=9, columnspan=3, column=0)
# Entery box for run time
daily_timer_input_value.set("") # Set the eatery to blank
self.keyboard_button = Button(self.timer_set_page,command=NumPad) # Button Image to open number pad
self.keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image)) #
self.keyboard_button.config(image=self.keyboard_img) #
self.keyboard_button.image = self.keyboard_img # Keep an instance of the image so
self.keyboard_button.grid(row=9, sticky=E, columnspan=2, column=1) # that it doesnt get garbage collected
self.exit = Button(
self.timer_set_page,
text="Exit",
command=self.timer_set_page.destroy).grid(row=9, columnspan=4,column=6, ipadx=50, ipady=15)
# Exit button back to main screen
self.set_timer = Button(
self.timer_set_page,
text="Set Timer",
command=self.__set_timer_cron,
bg="green").grid(row=9, columnspan=4, column=3, ipadx=50, ipady=15)
# Set the timer outputs the data to CVS
self.timer_run_text = Label(
self.timer_set_page,
textvariable=timer_set_run_text,
font=('Helvetica', 14)).grid(row=10, columnspan=8)
# Set the text variable for timer run label
Timers.timer_run_failed = Label(
self.timer_set_page,
textvariable=timer_status,
font=('Helvetica', 14), foreground='red')
Timers.timer_run_failed.grid(row=11, columnspan=8)
# Set the text variable for a failed CSV
timer_status.set("")
Timers.err_label = Label(
self.timer_set_page,
textvariable=timer_error_string,
font=('Helvetica', 14), foreground='red')
Timers.err_label.grid(row=12, columnspan=8)
# Set the text variable for a failed CSV
timer_error_string.set("")
self.timer_length_text = Label(
self.timer_set_page,
text="Please choose how long to run the timer for in seconds.",
font=('Helvetica', 20)).grid(row=7, columnspan=8)
self.z = 0
# Loop threw the hours in the day z will provide the hour of the day to return in lambda to timer_return function
# which manipulates the string and outputs to the label
for self.y, self.row in enumerate(self.hours_in_day, 1):
for self.x, self.key in enumerate(self.row):
self.z += 1
if self.z == 24:
self.z = 0
self.b = Button(self.timer_set_page, text=self.key, command=lambda val=self.z:self.__timer_return(val))
self.b.grid(row=self.y + 1, column=self.x, ipadx=20, ipady=10)
self.timer_set_page.attributes('-fullscreen', True)
# Strings for all recurrence rate
self.recurrence = ["1 Day", "2 Day", "3 Day", "4 Day", "5 Day", "6 Day","7 Day"]
self.timer_reoc_text = Label(
self.timer_set_page, text="Please choose how often you would like to run the timer.",
font=('Helvetica', 20)).grid(row=5, columnspan=8)
self.r = 0
self.col = 0
# Loop threw the recurrence options r will provide the amount
# of days between running and return in lambda to recurrence_return function
# which manipulates the string and outputs to the label
for self.d in self.recurrence:
self.r += 1
self.c = Button(self.timer_set_page, text=self.d, command=lambda val=self.r:self.__recurrence_return(val))
self.c.grid(row=6, column=self.col, ipadx=12, ipady=12)
self.col += 1
def __recurrence_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the recurrence rate, and set the new label string
timer_recurrence_string = str(arg)
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __timer_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the time of day, and set the new label string
self.pump_run_time = str(arg)
timer_time_string = str(str(arg) + ":00")
if len(timer_time_string) <= 4:
timer_time_string = "0" + timer_time_string
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __set_timer_cron(self):
global timer_status
global timer_status_1
# Remove all existing timer cron jobs.
try:
run_time = self.pump_run_time
repeat = int(timer_recurrence_string)
run_length = int(daily_timer_input_value.get())
cron = CronTab(user=True)
cron.remove_all(comment='water_timer')
cron.write()
# Insert new cron job timer.
cron = CronTab(user=True)
job = cron.new(
command='sudo python3 /home/pi/wateringsys/crontimer.py {}'.format(run_length),
comment='water_timer')
if repeat == 1:
job.hour.on(run_time)
job.minute.on(0)
if repeat >= 2:
job.setall(0, run_time, '*/{}'.format(repeat), None, None)
cron.write()
daily_timer_input_value.set("")
timer_input_value.set("")
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def __set_timer_csv(self):
global timer_status
global timer_status_1
try:
run_time = self.pump_run_time
repeat = str(timer_recurrence_string)
run_length = str(daily_timer_input_value.get())
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
# Set both enterys back to empty
daily_timer_input_value.set("")
timer_input_value.set("")
call(["sudo", "systemctl", "restart", "pumptimer.service"])
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def timer(): # Simple timer class,
try: # If any errors usually due to no input pass
run_time = timer_input_value.get()
root_status_string.set(str("Pump Running"))
timer_input_value.set("")
if GPIO.input(23) == 1:
GPIO.output(24, 1)
for i in range(1, run_time + 1, +1):
m, s = divmod(i, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
time.sleep(1)
GPIO.output(24, 0)
root_status_string.set(str("The pump run has finished"))
except:
GPIO.output(24, 0) # Turn the pump off.
print("failed")
pass
manual_timer = 0
def man_start(force=True):
global running
global manual_timer
try:
if force:
running = True
if running:
if GPIO.input(23) == 1:
root_status_string.set(str("Pump Running"))
GPIO.output(24, 1)
manual_timer += 1
m, s = divmod(manual_timer, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
root.after(1000, man_start, False)
if GPIO.input(23) == 0:
root_status_string.set(str("The pump will not run when the water level is low."))
except:
GPIO.output(24, 0) # Stop the pump.
def man_stop():
global running
global manual_timer
GPIO.output(24, 0)
running = False
manual_timer = 0
root_status_string.set(str("The Pump has been manually stopped."))
def img_updater(): # Auto image updater for home screen.
# Open image
try:
global counter
timer_set_time, time_until_run = csv_read()
if GPIO.input(23) == 0:
water_level_label.config(fg="Red")
water_level.set(str("The water level is LOW."))
if GPIO.input(23) == 1:
water_level_label.config(fg="Green")
water_level.set(str("The water level is OK."))
# Every 10 seconds change the timer_status_1 string which is the label on the front page.
counter += 1
if counter >= 1:
timer_status_1.set(str(timer_set_time))
plant_stat_img = ImageTk.PhotoImage(Image.open(plot_img))
plant_stat_panel.config(image=plant_stat_img)
plant_stat_panel.image = plant_stat_img
if counter >= 11:
timer_status_1.set(str(time_until_run))
speed_img = ImageTk.PhotoImage(Image.open(speed_image)) # /home/pi/html/
plant_stat_panel.config(image=speed_img)
plant_stat_panel.image = speed_img
if counter >= 21:
counter = 0
# Re load page every 10 seconds
root.after(1000, img_updater)
except:
timer_status_1.set(str('Please enter a timer, there is currently no timer set.'))
root.after(1000, img_updater)
pass
def back_light():
# Start the perl script which turns off the screen back light when the screensaver is active.
# The perl script calls back light.py which turns the back light on and off.
proc = Popen(
[screen_off], shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
def csv_read():
# Consider changing the times of day to a dict to use AM PM times inline with the loop.
try:
with open(timer_data) as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
days = int(row[1])
runtime = int(row[2])
time_of_day = int(row[0])
csvfile.close()
# Due to using plain numbers in the number pad loop must convert it to something people can read.
# Following is to read the set timer and make a label out of it.
if int(int(row[0])) <= 9:
run_hour = "0{}:00".format(str(int(row[0])))
if int(int(row[0])) >= 10:
run_hour = "{}:00".format(str(int(row[0])))
days = int(row[1])
m, s = divmod(int(row[2]), 60)
h, m = divmod(m, 60)
run_time = (str("{} Minutes and {} Seconds".format(m, s)))
current_runtime = "The timer is set to run for {} every {} day(s) at {}".format(run_time, days, run_hour)
# the following is to read the set timer and print out how much time is left on the timer.
now = datetime.now()
seconds_since_last_run = (now - now.replace(hour=time_of_day, minute=0, second=0, microsecond=0)).total_seconds()
if days == 1:
|
if days > 1:
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
m, s = divmod(countdown, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
times = (
"There is {} day(s) {} hour(s) {} minute(s) and {} seconds remaining on the timer.".format(d, h, m, s))
# Return the strings.
return current_runtime, times
except IndexError:
timer_status_1.set(str("Please set a pump timer."))
# Buttons
pump_manual_start_button = Button(
root, text="Start Pump Manual",
command=man_start,
height=3,
width=15,
bg="green").grid(row=0, sticky=E, column=1, rowspan=2)
pump_manual_stop_button = Button(
root, text="Stop Pump Manual",
command=man_stop,
height=3,
width=15,
bg="red").grid(row=0, sticky=E, column=2, rowspan=2)
keyboard_button = Button(root, command=NumPad)
keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image))
keyboard_button.config(image=keyboard_img)
keyboard_button.image = keyboard_img
keyboard_button.grid(row=2, sticky=E)
timer_button = Button(root, text="Set Timer", command=Timers, height=3, width = 15).grid(row=5, column=2)
pump_timer_start_button = Button(root, text="Start Pump Timer", command=timer).grid(row =2, sticky=W)
# Labels
hedding_label = Label(root, text="Newcastle Pad Watering System", font=("Helvetica", 16)).grid(row=0)
timer_entery_text = Label(root, text="Please enter how many seconds you wish to run the pump for.").grid(row=1)
timer_string_label = Label(root, textvariable=root_status_string).grid(row = 3,sticky=N)
plant_stat_panel = Label(root)
plant_stat_panel.grid(row=5, column=0, columnspan=2, sticky=W)
current_timer_label = Label(root, textvariable=timer_status_1).grid(row = 4,columnspan =2, sticky=S)
# If you want to dynamicaly alter the state of the lable later on you need to asign the grid seperatly
# otherwise it just returns a None type when trying to change it.
water_level_label = Label(root, textvariable=water_level, font=("Times New Roman", 16))
water_level_label.grid(row=2, column=1, columnspan=2, sticky=E)
# Enterys
timer_entery = Entry(root, textvariable=timer_input_value, width=15).grid(row = 2)
timer_input_value.set("")
counter = 0
# Main Loop
root.attributes('-fullscreen', True) # Run Fullscreen.
root.after(0, img_updater) # Run "img_updater" function on startup.
root.after(0, back_light)
root.mainloop() # Run the main page.
| total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if countdown <= 1:
total_seconds = days * 86400
countdown = total_seconds - int(round(seconds_since_last_run)) | conditional_block |
tk.py | #!/usr/bin/env python3
import time
from tkinter import *
from PIL import ImageTk, Image
import csv
from subprocess import Popen
import RPi.GPIO as GPIO
from subprocess import call
from datetime import datetime
from crontab import CronTab
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(22, GPIO.OUT, initial=1) # Set pin 22 gpio to an output
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Pulldown the input resistor on pin 23
GPIO.setup(24, GPIO.OUT, initial=0) # Set gpio pin 24 to an output as to turn the pump on when needed
# Main window
root = Tk()
root_status_string = StringVar() # Live timer countdown
timer_input_value = IntVar() # Keypad textbox value 1
daily_timer_input_value = IntVar() # Keypad textbox value 2
timer_set_run_text = StringVar() # Text string showing output of timer.
timer_recurrence_string = 0 # How often the pump will run
timer_time_string = "" # What time the pump will run
timer_status = StringVar() # Timer info on set text
water_level = StringVar() # Current water level string
timer_error_string = StringVar() # Timer set error string
timer_status_1 = StringVar() # Timer data info on set text
# Convert data from input value to mins/seconds
minute, sec = divmod(int(daily_timer_input_value.get()), 60)
hour, minute = divmod(minute, 60)
# Image/CSV data
keyboard_image = "keypad.jpg"
timer_data = 'timer_data.csv'
plot_img = "temp.png"
screen_off = "perl /home/pi/wateringsys/screen-off.pl"
speed_image = "/home/pi/wateringsys/speed.png"
class NumPad:
def __init__(self):
# Setup number pad screen
self.number_pad = Toplevel(root)
self.keypad_entery = Entry(self.number_pad,width=5,font=("Helvetica", 55))
self.keypad_entery.grid(row=0, column=0, columnspan=3, ipady=5)
self.number_pad.attributes('-fullscreen',True)
# Variables of keys to loop though
self.keys = [
['1', '2', '3'],
['4', '5', '6'],
['7', '8', '9'],
['Clear', '0', 'Exit'], ]
# Loop threw the keys and create the button with lambda command
for self.y, self.row in enumerate(self.keys, 1):
for self.x, self.key in enumerate(self.row):
self.b = Button(self.number_pad, text=self.key, command=lambda val=self.key:__numb_enter(val))
self.b.grid(row=self.y, column=self.x, ipadx=108, ipady=30)
self.exit = Button(
self.number_pad,
text="Exit",
command=self.number_pad.destroy).grid(
row=self.y, column=self.x, ipadx=100, ipady=30)
# Set the exit button at the end of the loop
def __numb_enter(arg):
# All globals required for updating the timer daily_timer_input_value
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
global timer_error_string
self.pin = ''
# Set the pin var to empty
if arg == 'Clear':
# remove last number from `pin`
self.pin = self.pin[:-1]
self.keypad_entery.delete('0', 'end')
self.keypad_entery.insert('end', self.pin)
elif arg == 'Exit':
self.number_pad.destroy
# Exit the keypad window
else:
# add number to pin
self.pin += arg
# add number to `entry`
self.keypad_entery.insert('end', arg)
self.pad_val = self.keypad_entery.get()
daily_timer_input_value.set(self.pad_val)
timer_input_value.set(self.pad_val)
# Set calculate the minuets and seconds for the label
minute, sec = divmod(int(self.pad_val), 60)
hours, minute = divmod(minute, 60)
# Set the label to update the current seconds/minutes
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
class Timers(object):
def __init__(self):
global timer_set_run_text
global daily_timer_input_value
global timer_status
global timer_error_string
global keyboard_img
self.timer_set_page = Toplevel(root)
# Setup the window for the timer selections
# Strings for all of the buttons
self.timer_run_text = Label(
self.timer_set_page,
text="Please choose a time of day to run the pump.",
font=('Helvetica', 20)).grid(row= 1,columnspan=8)
self.hours_in_day = [["1AM", "2AM", "3AM", "4AM", "5AM", "6AM", "7AM","8AM"],
["9AM", "10AM", "11AM", "12PM", "1PM", "2PM","3PM","4PM"],
["5PM", "6PM", "7PM", "8PM", "9PM", "10PM","11PM","12AM"]]
self.timer_entery = Entry(
self.timer_set_page,
textvariable=daily_timer_input_value,
width=23).grid(row=9, columnspan=3, column=0)
# Entery box for run time
daily_timer_input_value.set("") # Set the eatery to blank
self.keyboard_button = Button(self.timer_set_page,command=NumPad) # Button Image to open number pad
self.keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image)) #
self.keyboard_button.config(image=self.keyboard_img) #
self.keyboard_button.image = self.keyboard_img # Keep an instance of the image so
self.keyboard_button.grid(row=9, sticky=E, columnspan=2, column=1) # that it doesnt get garbage collected
self.exit = Button(
self.timer_set_page,
text="Exit",
command=self.timer_set_page.destroy).grid(row=9, columnspan=4,column=6, ipadx=50, ipady=15)
# Exit button back to main screen
self.set_timer = Button(
self.timer_set_page,
text="Set Timer",
command=self.__set_timer_cron,
bg="green").grid(row=9, columnspan=4, column=3, ipadx=50, ipady=15)
# Set the timer outputs the data to CVS
self.timer_run_text = Label(
self.timer_set_page,
textvariable=timer_set_run_text,
font=('Helvetica', 14)).grid(row=10, columnspan=8)
# Set the text variable for timer run label
Timers.timer_run_failed = Label(
self.timer_set_page,
textvariable=timer_status,
font=('Helvetica', 14), foreground='red')
Timers.timer_run_failed.grid(row=11, columnspan=8)
# Set the text variable for a failed CSV
timer_status.set("")
Timers.err_label = Label(
self.timer_set_page,
textvariable=timer_error_string,
font=('Helvetica', 14), foreground='red')
Timers.err_label.grid(row=12, columnspan=8)
# Set the text variable for a failed CSV
timer_error_string.set("")
self.timer_length_text = Label(
self.timer_set_page,
text="Please choose how long to run the timer for in seconds.",
font=('Helvetica', 20)).grid(row=7, columnspan=8)
self.z = 0
# Loop threw the hours in the day z will provide the hour of the day to return in lambda to timer_return function
# which manipulates the string and outputs to the label
for self.y, self.row in enumerate(self.hours_in_day, 1):
for self.x, self.key in enumerate(self.row):
self.z += 1
if self.z == 24:
self.z = 0
|
self.timer_set_page.attributes('-fullscreen', True)
# Strings for all recurrence rate
self.recurrence = ["1 Day", "2 Day", "3 Day", "4 Day", "5 Day", "6 Day","7 Day"]
self.timer_reoc_text = Label(
self.timer_set_page, text="Please choose how often you would like to run the timer.",
font=('Helvetica', 20)).grid(row=5, columnspan=8)
self.r = 0
self.col = 0
# Loop threw the recurrence options r will provide the amount
# of days between running and return in lambda to recurrence_return function
# which manipulates the string and outputs to the label
for self.d in self.recurrence:
self.r += 1
self.c = Button(self.timer_set_page, text=self.d, command=lambda val=self.r:self.__recurrence_return(val))
self.c.grid(row=6, column=self.col, ipadx=12, ipady=12)
self.col += 1
def __recurrence_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the recurrence rate, and set the new label string
timer_recurrence_string = str(arg)
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __timer_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the time of day, and set the new label string
self.pump_run_time = str(arg)
timer_time_string = str(str(arg) + ":00")
if len(timer_time_string) <= 4:
timer_time_string = "0" + timer_time_string
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __set_timer_cron(self):
global timer_status
global timer_status_1
# Remove all existing timer cron jobs.
try:
run_time = self.pump_run_time
repeat = int(timer_recurrence_string)
run_length = int(daily_timer_input_value.get())
cron = CronTab(user=True)
cron.remove_all(comment='water_timer')
cron.write()
# Insert new cron job timer.
cron = CronTab(user=True)
job = cron.new(
command='sudo python3 /home/pi/wateringsys/crontimer.py {}'.format(run_length),
comment='water_timer')
if repeat == 1:
job.hour.on(run_time)
job.minute.on(0)
if repeat >= 2:
job.setall(0, run_time, '*/{}'.format(repeat), None, None)
cron.write()
daily_timer_input_value.set("")
timer_input_value.set("")
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def __set_timer_csv(self):
global timer_status
global timer_status_1
try:
run_time = self.pump_run_time
repeat = str(timer_recurrence_string)
run_length = str(daily_timer_input_value.get())
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
# Set both enterys back to empty
daily_timer_input_value.set("")
timer_input_value.set("")
call(["sudo", "systemctl", "restart", "pumptimer.service"])
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def timer(): # Simple timer class,
try: # If any errors usually due to no input pass
run_time = timer_input_value.get()
root_status_string.set(str("Pump Running"))
timer_input_value.set("")
if GPIO.input(23) == 1:
GPIO.output(24, 1)
for i in range(1, run_time + 1, +1):
m, s = divmod(i, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
time.sleep(1)
GPIO.output(24, 0)
root_status_string.set(str("The pump run has finished"))
except:
GPIO.output(24, 0) # Turn the pump off.
print("failed")
pass
manual_timer = 0
def man_start(force=True):
global running
global manual_timer
try:
if force:
running = True
if running:
if GPIO.input(23) == 1:
root_status_string.set(str("Pump Running"))
GPIO.output(24, 1)
manual_timer += 1
m, s = divmod(manual_timer, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
root.after(1000, man_start, False)
if GPIO.input(23) == 0:
root_status_string.set(str("The pump will not run when the water level is low."))
except:
GPIO.output(24, 0) # Stop the pump.
def man_stop():
global running
global manual_timer
GPIO.output(24, 0)
running = False
manual_timer = 0
root_status_string.set(str("The Pump has been manually stopped."))
def img_updater(): # Auto image updater for home screen.
# Open image
try:
global counter
timer_set_time, time_until_run = csv_read()
if GPIO.input(23) == 0:
water_level_label.config(fg="Red")
water_level.set(str("The water level is LOW."))
if GPIO.input(23) == 1:
water_level_label.config(fg="Green")
water_level.set(str("The water level is OK."))
# Every 10 seconds change the timer_status_1 string which is the label on the front page.
counter += 1
if counter >= 1:
timer_status_1.set(str(timer_set_time))
plant_stat_img = ImageTk.PhotoImage(Image.open(plot_img))
plant_stat_panel.config(image=plant_stat_img)
plant_stat_panel.image = plant_stat_img
if counter >= 11:
timer_status_1.set(str(time_until_run))
speed_img = ImageTk.PhotoImage(Image.open(speed_image)) # /home/pi/html/
plant_stat_panel.config(image=speed_img)
plant_stat_panel.image = speed_img
if counter >= 21:
counter = 0
# Re load page every 10 seconds
root.after(1000, img_updater)
except:
timer_status_1.set(str('Please enter a timer, there is currently no timer set.'))
root.after(1000, img_updater)
pass
def back_light():
# Start the perl script which turns off the screen back light when the screensaver is active.
# The perl script calls back light.py which turns the back light on and off.
proc = Popen(
[screen_off], shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
def csv_read():
# Consider changing the times of day to a dict to use AM PM times inline with the loop.
try:
with open(timer_data) as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
days = int(row[1])
runtime = int(row[2])
time_of_day = int(row[0])
csvfile.close()
# Due to using plain numbers in the number pad loop must convert it to something people can read.
# Following is to read the set timer and make a label out of it.
if int(int(row[0])) <= 9:
run_hour = "0{}:00".format(str(int(row[0])))
if int(int(row[0])) >= 10:
run_hour = "{}:00".format(str(int(row[0])))
days = int(row[1])
m, s = divmod(int(row[2]), 60)
h, m = divmod(m, 60)
run_time = (str("{} Minutes and {} Seconds".format(m, s)))
current_runtime = "The timer is set to run for {} every {} day(s) at {}".format(run_time, days, run_hour)
# the following is to read the set timer and print out how much time is left on the timer.
now = datetime.now()
seconds_since_last_run = (now - now.replace(hour=time_of_day, minute=0, second=0, microsecond=0)).total_seconds()
if days == 1:
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if countdown <= 1:
total_seconds = days * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if days > 1:
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
m, s = divmod(countdown, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
times = (
"There is {} day(s) {} hour(s) {} minute(s) and {} seconds remaining on the timer.".format(d, h, m, s))
# Return the strings.
return current_runtime, times
except IndexError:
timer_status_1.set(str("Please set a pump timer."))
# Buttons
pump_manual_start_button = Button(
root, text="Start Pump Manual",
command=man_start,
height=3,
width=15,
bg="green").grid(row=0, sticky=E, column=1, rowspan=2)
pump_manual_stop_button = Button(
root, text="Stop Pump Manual",
command=man_stop,
height=3,
width=15,
bg="red").grid(row=0, sticky=E, column=2, rowspan=2)
keyboard_button = Button(root, command=NumPad)
keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image))
keyboard_button.config(image=keyboard_img)
keyboard_button.image = keyboard_img
keyboard_button.grid(row=2, sticky=E)
timer_button = Button(root, text="Set Timer", command=Timers, height=3, width = 15).grid(row=5, column=2)
pump_timer_start_button = Button(root, text="Start Pump Timer", command=timer).grid(row =2, sticky=W)
# Labels
hedding_label = Label(root, text="Newcastle Pad Watering System", font=("Helvetica", 16)).grid(row=0)
timer_entery_text = Label(root, text="Please enter how many seconds you wish to run the pump for.").grid(row=1)
timer_string_label = Label(root, textvariable=root_status_string).grid(row = 3,sticky=N)
plant_stat_panel = Label(root)
plant_stat_panel.grid(row=5, column=0, columnspan=2, sticky=W)
current_timer_label = Label(root, textvariable=timer_status_1).grid(row = 4,columnspan =2, sticky=S)
# If you want to dynamicaly alter the state of the lable later on you need to asign the grid seperatly
# otherwise it just returns a None type when trying to change it.
water_level_label = Label(root, textvariable=water_level, font=("Times New Roman", 16))
water_level_label.grid(row=2, column=1, columnspan=2, sticky=E)
# Enterys
timer_entery = Entry(root, textvariable=timer_input_value, width=15).grid(row = 2)
timer_input_value.set("")
counter = 0
# Main Loop
root.attributes('-fullscreen', True) # Run Fullscreen.
root.after(0, img_updater) # Run "img_updater" function on startup.
root.after(0, back_light)
root.mainloop() # Run the main page. | self.b = Button(self.timer_set_page, text=self.key, command=lambda val=self.z:self.__timer_return(val))
self.b.grid(row=self.y + 1, column=self.x, ipadx=20, ipady=10)
| random_line_split |
tk.py | #!/usr/bin/env python3
import time
from tkinter import *
from PIL import ImageTk, Image
import csv
from subprocess import Popen
import RPi.GPIO as GPIO
from subprocess import call
from datetime import datetime
from crontab import CronTab
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(22, GPIO.OUT, initial=1) # Set pin 22 gpio to an output
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Pulldown the input resistor on pin 23
GPIO.setup(24, GPIO.OUT, initial=0) # Set gpio pin 24 to an output as to turn the pump on when needed
# Main window
root = Tk()
root_status_string = StringVar() # Live timer countdown
timer_input_value = IntVar() # Keypad textbox value 1
daily_timer_input_value = IntVar() # Keypad textbox value 2
timer_set_run_text = StringVar() # Text string showing output of timer.
timer_recurrence_string = 0 # How often the pump will run
timer_time_string = "" # What time the pump will run
timer_status = StringVar() # Timer info on set text
water_level = StringVar() # Current water level string
timer_error_string = StringVar() # Timer set error string
timer_status_1 = StringVar() # Timer data info on set text
# Convert data from input value to mins/seconds
minute, sec = divmod(int(daily_timer_input_value.get()), 60)
hour, minute = divmod(minute, 60)
# Image/CSV data
keyboard_image = "keypad.jpg"
timer_data = 'timer_data.csv'
plot_img = "temp.png"
screen_off = "perl /home/pi/wateringsys/screen-off.pl"
speed_image = "/home/pi/wateringsys/speed.png"
class NumPad:
def __init__(self):
# Setup number pad screen
self.number_pad = Toplevel(root)
self.keypad_entery = Entry(self.number_pad,width=5,font=("Helvetica", 55))
self.keypad_entery.grid(row=0, column=0, columnspan=3, ipady=5)
self.number_pad.attributes('-fullscreen',True)
# Variables of keys to loop though
self.keys = [
['1', '2', '3'],
['4', '5', '6'],
['7', '8', '9'],
['Clear', '0', 'Exit'], ]
# Loop threw the keys and create the button with lambda command
for self.y, self.row in enumerate(self.keys, 1):
for self.x, self.key in enumerate(self.row):
self.b = Button(self.number_pad, text=self.key, command=lambda val=self.key:__numb_enter(val))
self.b.grid(row=self.y, column=self.x, ipadx=108, ipady=30)
self.exit = Button(
self.number_pad,
text="Exit",
command=self.number_pad.destroy).grid(
row=self.y, column=self.x, ipadx=100, ipady=30)
# Set the exit button at the end of the loop
def __numb_enter(arg):
# All globals required for updating the timer daily_timer_input_value
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
global timer_error_string
self.pin = ''
# Set the pin var to empty
if arg == 'Clear':
# remove last number from `pin`
self.pin = self.pin[:-1]
self.keypad_entery.delete('0', 'end')
self.keypad_entery.insert('end', self.pin)
elif arg == 'Exit':
self.number_pad.destroy
# Exit the keypad window
else:
# add number to pin
self.pin += arg
# add number to `entry`
self.keypad_entery.insert('end', arg)
self.pad_val = self.keypad_entery.get()
daily_timer_input_value.set(self.pad_val)
timer_input_value.set(self.pad_val)
# Set calculate the minuets and seconds for the label
minute, sec = divmod(int(self.pad_val), 60)
hours, minute = divmod(minute, 60)
# Set the label to update the current seconds/minutes
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
class Timers(object):
|
def timer(): # Simple timer class,
try: # If any errors usually due to no input pass
run_time = timer_input_value.get()
root_status_string.set(str("Pump Running"))
timer_input_value.set("")
if GPIO.input(23) == 1:
GPIO.output(24, 1)
for i in range(1, run_time + 1, +1):
m, s = divmod(i, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
time.sleep(1)
GPIO.output(24, 0)
root_status_string.set(str("The pump run has finished"))
except:
GPIO.output(24, 0) # Turn the pump off.
print("failed")
pass
manual_timer = 0
def man_start(force=True):
global running
global manual_timer
try:
if force:
running = True
if running:
if GPIO.input(23) == 1:
root_status_string.set(str("Pump Running"))
GPIO.output(24, 1)
manual_timer += 1
m, s = divmod(manual_timer, 60)
h, m = divmod(m, 60)
root_status_string.set(str("{} Minutes {} Seconds".format(m, s)))
root.update()
root.after(1000, man_start, False)
if GPIO.input(23) == 0:
root_status_string.set(str("The pump will not run when the water level is low."))
except:
GPIO.output(24, 0) # Stop the pump.
def man_stop():
global running
global manual_timer
GPIO.output(24, 0)
running = False
manual_timer = 0
root_status_string.set(str("The Pump has been manually stopped."))
def img_updater(): # Auto image updater for home screen.
# Open image
try:
global counter
timer_set_time, time_until_run = csv_read()
if GPIO.input(23) == 0:
water_level_label.config(fg="Red")
water_level.set(str("The water level is LOW."))
if GPIO.input(23) == 1:
water_level_label.config(fg="Green")
water_level.set(str("The water level is OK."))
# Every 10 seconds change the timer_status_1 string which is the label on the front page.
counter += 1
if counter >= 1:
timer_status_1.set(str(timer_set_time))
plant_stat_img = ImageTk.PhotoImage(Image.open(plot_img))
plant_stat_panel.config(image=plant_stat_img)
plant_stat_panel.image = plant_stat_img
if counter >= 11:
timer_status_1.set(str(time_until_run))
speed_img = ImageTk.PhotoImage(Image.open(speed_image)) # /home/pi/html/
plant_stat_panel.config(image=speed_img)
plant_stat_panel.image = speed_img
if counter >= 21:
counter = 0
# Re load page every 10 seconds
root.after(1000, img_updater)
except:
timer_status_1.set(str('Please enter a timer, there is currently no timer set.'))
root.after(1000, img_updater)
pass
def back_light():
# Start the perl script which turns off the screen back light when the screensaver is active.
# The perl script calls back light.py which turns the back light on and off.
proc = Popen(
[screen_off], shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
def csv_read():
# Consider changing the times of day to a dict to use AM PM times inline with the loop.
try:
with open(timer_data) as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
for row in read_csv:
days = int(row[1])
runtime = int(row[2])
time_of_day = int(row[0])
csvfile.close()
# Due to using plain numbers in the number pad loop must convert it to something people can read.
# Following is to read the set timer and make a label out of it.
if int(int(row[0])) <= 9:
run_hour = "0{}:00".format(str(int(row[0])))
if int(int(row[0])) >= 10:
run_hour = "{}:00".format(str(int(row[0])))
days = int(row[1])
m, s = divmod(int(row[2]), 60)
h, m = divmod(m, 60)
run_time = (str("{} Minutes and {} Seconds".format(m, s)))
current_runtime = "The timer is set to run for {} every {} day(s) at {}".format(run_time, days, run_hour)
# the following is to read the set timer and print out how much time is left on the timer.
now = datetime.now()
seconds_since_last_run = (now - now.replace(hour=time_of_day, minute=0, second=0, microsecond=0)).total_seconds()
if days == 1:
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if countdown <= 1:
total_seconds = days * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
if days > 1:
total_seconds = (days - 1) * 86400
countdown = total_seconds - int(round(seconds_since_last_run))
m, s = divmod(countdown, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
times = (
"There is {} day(s) {} hour(s) {} minute(s) and {} seconds remaining on the timer.".format(d, h, m, s))
# Return the strings.
return current_runtime, times
except IndexError:
timer_status_1.set(str("Please set a pump timer."))
# Buttons
pump_manual_start_button = Button(
root, text="Start Pump Manual",
command=man_start,
height=3,
width=15,
bg="green").grid(row=0, sticky=E, column=1, rowspan=2)
pump_manual_stop_button = Button(
root, text="Stop Pump Manual",
command=man_stop,
height=3,
width=15,
bg="red").grid(row=0, sticky=E, column=2, rowspan=2)
keyboard_button = Button(root, command=NumPad)
keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image))
keyboard_button.config(image=keyboard_img)
keyboard_button.image = keyboard_img
keyboard_button.grid(row=2, sticky=E)
timer_button = Button(root, text="Set Timer", command=Timers, height=3, width = 15).grid(row=5, column=2)
pump_timer_start_button = Button(root, text="Start Pump Timer", command=timer).grid(row =2, sticky=W)
# Labels
hedding_label = Label(root, text="Newcastle Pad Watering System", font=("Helvetica", 16)).grid(row=0)
timer_entery_text = Label(root, text="Please enter how many seconds you wish to run the pump for.").grid(row=1)
timer_string_label = Label(root, textvariable=root_status_string).grid(row = 3,sticky=N)
plant_stat_panel = Label(root)
plant_stat_panel.grid(row=5, column=0, columnspan=2, sticky=W)
current_timer_label = Label(root, textvariable=timer_status_1).grid(row = 4,columnspan =2, sticky=S)
# If you want to dynamicaly alter the state of the lable later on you need to asign the grid seperatly
# otherwise it just returns a None type when trying to change it.
water_level_label = Label(root, textvariable=water_level, font=("Times New Roman", 16))
water_level_label.grid(row=2, column=1, columnspan=2, sticky=E)
# Enterys
timer_entery = Entry(root, textvariable=timer_input_value, width=15).grid(row = 2)
timer_input_value.set("")
counter = 0
# Main Loop
root.attributes('-fullscreen', True) # Run Fullscreen.
root.after(0, img_updater) # Run "img_updater" function on startup.
root.after(0, back_light)
root.mainloop() # Run the main page.
| def __init__(self):
global timer_set_run_text
global daily_timer_input_value
global timer_status
global timer_error_string
global keyboard_img
self.timer_set_page = Toplevel(root)
# Setup the window for the timer selections
# Strings for all of the buttons
self.timer_run_text = Label(
self.timer_set_page,
text="Please choose a time of day to run the pump.",
font=('Helvetica', 20)).grid(row= 1,columnspan=8)
self.hours_in_day = [["1AM", "2AM", "3AM", "4AM", "5AM", "6AM", "7AM","8AM"],
["9AM", "10AM", "11AM", "12PM", "1PM", "2PM","3PM","4PM"],
["5PM", "6PM", "7PM", "8PM", "9PM", "10PM","11PM","12AM"]]
self.timer_entery = Entry(
self.timer_set_page,
textvariable=daily_timer_input_value,
width=23).grid(row=9, columnspan=3, column=0)
# Entery box for run time
daily_timer_input_value.set("") # Set the eatery to blank
self.keyboard_button = Button(self.timer_set_page,command=NumPad) # Button Image to open number pad
self.keyboard_img = ImageTk.PhotoImage(Image.open(keyboard_image)) #
self.keyboard_button.config(image=self.keyboard_img) #
self.keyboard_button.image = self.keyboard_img # Keep an instance of the image so
self.keyboard_button.grid(row=9, sticky=E, columnspan=2, column=1) # that it doesnt get garbage collected
self.exit = Button(
self.timer_set_page,
text="Exit",
command=self.timer_set_page.destroy).grid(row=9, columnspan=4,column=6, ipadx=50, ipady=15)
# Exit button back to main screen
self.set_timer = Button(
self.timer_set_page,
text="Set Timer",
command=self.__set_timer_cron,
bg="green").grid(row=9, columnspan=4, column=3, ipadx=50, ipady=15)
# Set the timer outputs the data to CVS
self.timer_run_text = Label(
self.timer_set_page,
textvariable=timer_set_run_text,
font=('Helvetica', 14)).grid(row=10, columnspan=8)
# Set the text variable for timer run label
Timers.timer_run_failed = Label(
self.timer_set_page,
textvariable=timer_status,
font=('Helvetica', 14), foreground='red')
Timers.timer_run_failed.grid(row=11, columnspan=8)
# Set the text variable for a failed CSV
timer_status.set("")
Timers.err_label = Label(
self.timer_set_page,
textvariable=timer_error_string,
font=('Helvetica', 14), foreground='red')
Timers.err_label.grid(row=12, columnspan=8)
# Set the text variable for a failed CSV
timer_error_string.set("")
self.timer_length_text = Label(
self.timer_set_page,
text="Please choose how long to run the timer for in seconds.",
font=('Helvetica', 20)).grid(row=7, columnspan=8)
self.z = 0
# Loop threw the hours in the day z will provide the hour of the day to return in lambda to timer_return function
# which manipulates the string and outputs to the label
for self.y, self.row in enumerate(self.hours_in_day, 1):
for self.x, self.key in enumerate(self.row):
self.z += 1
if self.z == 24:
self.z = 0
self.b = Button(self.timer_set_page, text=self.key, command=lambda val=self.z:self.__timer_return(val))
self.b.grid(row=self.y + 1, column=self.x, ipadx=20, ipady=10)
self.timer_set_page.attributes('-fullscreen', True)
# Strings for all recurrence rate
self.recurrence = ["1 Day", "2 Day", "3 Day", "4 Day", "5 Day", "6 Day","7 Day"]
self.timer_reoc_text = Label(
self.timer_set_page, text="Please choose how often you would like to run the timer.",
font=('Helvetica', 20)).grid(row=5, columnspan=8)
self.r = 0
self.col = 0
# Loop threw the recurrence options r will provide the amount
# of days between running and return in lambda to recurrence_return function
# which manipulates the string and outputs to the label
for self.d in self.recurrence:
self.r += 1
self.c = Button(self.timer_set_page, text=self.d, command=lambda val=self.r:self.__recurrence_return(val))
self.c.grid(row=6, column=self.col, ipadx=12, ipady=12)
self.col += 1
def __recurrence_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the recurrence rate, and set the new label string
timer_recurrence_string = str(arg)
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __timer_return(self, arg):
global timer_set_run_text
global timer_recurrence_string
global timer_time_string
global minute
global sec
# retrieve the time of day, and set the new label string
self.pump_run_time = str(arg)
timer_time_string = str(str(arg) + ":00")
if len(timer_time_string) <= 4:
timer_time_string = "0" + timer_time_string
timer_set_run_text.set(
"The timer will run at {} every {} day(s) for {} Minutes {} Seconds".format(
timer_time_string, timer_recurrence_string, minute, sec))
def __set_timer_cron(self):
global timer_status
global timer_status_1
# Remove all existing timer cron jobs.
try:
run_time = self.pump_run_time
repeat = int(timer_recurrence_string)
run_length = int(daily_timer_input_value.get())
cron = CronTab(user=True)
cron.remove_all(comment='water_timer')
cron.write()
# Insert new cron job timer.
cron = CronTab(user=True)
job = cron.new(
command='sudo python3 /home/pi/wateringsys/crontimer.py {}'.format(run_length),
comment='water_timer')
if repeat == 1:
job.hour.on(run_time)
job.minute.on(0)
if repeat >= 2:
job.setall(0, run_time, '*/{}'.format(repeat), None, None)
cron.write()
daily_timer_input_value.set("")
timer_input_value.set("")
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length")
def __set_timer_csv(self):
global timer_status
global timer_status_1
try:
run_time = self.pump_run_time
repeat = str(timer_recurrence_string)
run_length = str(daily_timer_input_value.get())
with open(timer_data, "w") as self.output:
self.output.truncate()
self.writer = csv.writer(self.output, lineterminator='\n')
self.writer.writerow([run_time, repeat, run_length])
self.output.close()
# Set both enterys back to empty
daily_timer_input_value.set("")
timer_input_value.set("")
call(["sudo", "systemctl", "restart", "pumptimer.service"])
Timers.timer_run_failed.config(fg="Green")
timer_status.set("The timer has been set.")
timer_status_1.set(str(timer_time_string))
except:
Timers.timer_run_failed.config(fg="Red")
Timers.err_label.config(fg="Red")
timer_status_1.set(str(timer_time_string))
timer_status.set("Please enter a time, recurrence rate and timer length") | identifier_body |
graphql-client.js | 'use strict'
import utils from '../../utils/utils'
const gql = require('graphql-tag')
var deepEqual = require('deep-equal')
const tradle = require('@tradle/engine')
const tradleUtils = tradle.utils
const { ApolloClient, createNetworkInterface } = require('apollo-client')
var constants = require('@tradle/constants');
const {
TYPE,
SIG,
ROOT_HASH,
CUR_HASH,
PREV_HASH
} = constants
const { MONEY, ENUM, ORGANIZATION, FORM, MESSAGE } = constants.TYPES
const PHOTO = 'tradle.Photo'
const COUNTRY = 'tradle.Country'
const PUB_KEY = 'tradle.PubKey'
var cursor = {}
var search = {
initClient(meDriver, url) {
// let graphqlEndpoint
// let orgId = me.organization.id
// let url = me.organization.url
// if (!url)
// url = SERVICE_PROVIDERS.filter((sp) => sp.org === orgId)[0].url
// if (url)
let graphqlEndpoint = `${url.replace(/[/]+$/, '')}/graphql`
// else
// graphqlEndpoint = `${ENV.LOCAL_TRADLE_SERVER.replace(/[/]+$/, '')}/graphql`
if (!graphqlEndpoint)
return
// graphqlEndpoint = `http://localhost:21012/graphql`
const networkInterface = createNetworkInterface({
uri: graphqlEndpoint
})
networkInterface.use([{
applyMiddleware: async (req, next) => {
const printer = require('graphql/language/printer')
const body = tradleUtils.stringify({
...req.request,
query: printer.print(req.request.query)
})
const { sig } = await meDriver.sign({
object: {
[TYPE]: 'tradle.GraphQLQuery',
body,
// time: Date.now()
}
})
if (!req.options.headers) {
req.options.headers = {}
}
req.options.headers['x-tradle-sig'] = sig
next()
}
}])
return new ApolloClient({ networkInterface })
},
async searchServer(params) {
let self = this
let {client, modelName, filterResource, sortProperty, asc, limit, direction, first, notArchive, noCursorChange} = params
if (filterResource && !Object.keys(filterResource).length)
filterResource = null
let table = `rl_${modelName.replace(/\./g, '_')}`
let query = `query {\n${table}\n`
let model = utils.getModel(modelName).value
let props = model.properties
let inClause = []
let op = {
CONTAINS: '',
EQ: '',
NEQ: '',
STARTS_WITH: '',
GT: '',
GTE: '',
LT: '',
LTE: '',
}
let exclude = [ROOT_HASH, CUR_HASH, TYPE]
if (filterResource) {
for (let p in filterResource) {
if (exclude.indexOf(p) !== -1)
continue
// if (!props[p] || p.charAt(0) === '_')
// continue
let val = filterResource[p]
// if (p === TYPE) {
// if (!Array.isArray(val))
// continue
// else {
// let s = `${p}: [`
// val.forEach((r, i) => {
// if (i)
// s += ', '
// s += `"${r}"`
// })
// s += ']'
// inClause.push(s)
// }
// }
// if (p.charAt(0) === '_')
// debugger
if (!props[p] && val) {
if (p.charAt(0) === '_') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
}
else
op.EQ += `\n ${p}: "${val}",`
}
continue
}
else if (props[p].type === 'string') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
continue
}
else if (!val || !val.trim().length)
continue
}
if (props[p].type === 'string') {
let len = val.length
if (val.indexOf('*') === -1)
op.EQ += `\n ${p}: "${val}",`
else if (len > 1) {
if (val.charAt(0) === '*')
op.STARTS_WITH = `\n ${p}: "${val.substring(1)}",`
else if (val.charAt(len - 1) === '*')
op.CONTAINS = `\n ${p}: "${val.substring(1, len - 1)}",`
}
}
else if (props[p].type === 'boolean') {
if (val)
op.EQ += `\n ${p}: ${val},`
else
op.NEQ += `\n ${p}: true,`
}
else if (props[p].type === 'number')
self.addEqualsOrGreaterOrLesserNumber(val, op, props[p])
else if (props[p].type === 'object') {
// if (Array.isArray(val)) {
// let s = `${p}: [`
// val.forEach((r, i) => {
// if (i)
// s += ', '
// s += `{id: "${utils.getId(r)}", title: "${utils.getDisplayName(r)}"}`
// })
// s += ']'
// inClause.push(s)
// }
if (Array.isArray(val)) {
if (!val.length)
continue
let s = `${p}__id: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${utils.getId(r)}"`
})
s += ']'
inClause.push(s)
}
else {
if (props[p].ref === MONEY) {
let {value, currency} = val
op.EQ += `\n ${p}__currency: "${currency}",`
if (val.value)
addEqualsOrGreaterOrLesserNumber(value, op, props[p])
}
else {
op.EQ += `\n ${p}__id: "${val.id}",`
}
}
}
}
}
op.IN = inClause ? inClause.join(',') : ''
let qq = ''
for (let o in op) {
let q = op[o]
if (q.length) {
qq +=
`\n ${o}: {
${op[o]}\n},`
}
}
query += '('
let hasFilter = qq.length
if (!noCursorChange) {
if (first || cursor.modelName !== modelName) {
cursor = {endCursor: []}
}
if (limit) {
if (cursor) {
if (cursor.filter) {
if (!filterResource || deepEqual(filterResource, cursor.filter))
cursor = {endCursor: []}
}
}
cursor.endCursor = cursor.endCursor || []
cursor.modelName = modelName
cursor.filter = filterResource || null
let endCursor
let len = cursor.endCursor.length
if (len) {
if (direction === 'down')
endCursor = cursor.endCursor[len - 1]
else {
if (len > 2) {
cursor.endCursor.splice(len - 2, 1)
cursor.endCursor.splice(len - 1, 1)
len -= 2
}
else
cursor.endCursor = []
endCursor = (len - 1) ? cursor.endCursor[len - 2] : null
}
}
else
endCursor = null
if (endCursor)
query += `after: "${endCursor}"\n`
query += `first: ${limit}\n`
}
}
if (hasFilter)
query += `filter: { ${qq} },\n`
if (sortProperty) {
let sortBy
let ref = props[sortProperty].ref
if (ref) {
if (ref === MONEY)
sortBy = sortProperty + '__value'
else
sortBy = sortProperty + '__title'
}
else
sortBy = sortProperty
query += `\norderBy: {
property: ${sortBy},
desc: ${asc ? false : true}
}`
}
else
query += `\norderBy: {
property: _time,
desc: true
}`
// if (limit)
// query += `, limit: ${limit}`
query += ')'
query += `\n{\n`
query += `pageInfo {\n endCursor\n}\n`
query += `edges {\n node {\n`
let arr = this.getAllPropertiesForServerSearch(model)
query += `${arr.join(' \n')}`
query += `\n}` // close 'node'
query += `\n}` // close 'edges'
query += `\n}` // close properties block
query += `\n}` // close query
try {
let data = await client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
})
let result = data.data[table]
if (!noCursorChange) {
let endCursor = result.pageInfo.endCursor
if (endCursor) {
// if (!params.direction || params.direction === 'down') {
let hasThisCursor = cursor.endCursor.some((c) => c === endCursor)
if (!hasThisCursor)
cursor.endCursor.push(endCursor)
// }
}
}
if (!result.edges.length) {
// this.trigger({action: 'list', resource: filterResource, isSearch: true, direction: direction, first: first})
return
}
// // if (result.edges.length < limit)
// // cursor.endCursor = null
// let to = this.getRepresentative(utils.getId(me.organization))
// let toId = utils.getId(to)
// let list = result.edges.map((r) => this.convertToResource(r.node))
// if (!noTrigger)
// this.trigger({action: 'list', list: list, resource: filterResource, direction: direction, first: first})
return result.edges
} catch(error) {
// debugger
console.error(error)
}
function prettify (obj) {
return JSON.stringify(obj, null, 2)
}
function addEqualsOrGreaterOrLesserNumber(val, op, prop) {
let isMoney = prop.ref === MONEY
let p = prop.name
if (isMoney)
p += '__value'
let ch = val.toString().charAt(0)
switch (ch) {
case '>':
if (val.charAt(1) === '=')
op.GTE += `\n ${p}: ${val.substring(2)},`
else
op.GT += `\n ${p}: ${val.substring(1)},`
break
case '<':
if (val.charAt(1) === '=')
op.LTE += `\n ${p}: ${val.substring(2)},`
else
op.LT += `\n ${p}: ${val.substring(1)},`
break
default:
op.EQ += `\n ${p}: ${val},`
}
}
},
// # _author: "3c67687a96fe59d8f98b1c90cc46f943b938d54cda852b12fb1d43396e28978a"
// # _inbound: false
// # _recipient: ${hash}
async getChat(params) {
let { author, recipient, client, context } = params
let table = `rl_${MESSAGE.replace(/\./g, '_')}`
let inbound = true
let query =
`query {
rl_tradle_Message(
first:20,
filter:{
EQ: {
_inbound: true
context: "${context}"
_author: "${author}"
}
},
orderBy:{
property: time
desc:true
}
) {
edges {
node {
_author
_recipient
object
}
}
}
}`
let promisses = []
promisses.push(client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
}))
let queryOutbound = query.replace('_inbound: true', '_inbound: false').replace('_author', '_recipient')
// `query {
// rl_tradle_Message(
// first:20,
// filter:{
// EQ: {
// _inbound: false
// context: "${context}"
// _recipient: "${author}"
// }
// },
// orderBy:{
// property: time
// desc:true
// }
// ) {
// edges {
// node {
// _author
// _recipient
// object
// }
// }
// }
// }`
promisses.push(client.query({
fetchPolicy: 'network-only',
query: gql(`${queryOutbound}`),
}))
try {
let all = await Promise.all(promisses)
let result = []
all.forEach((data) => {
let list = data.data[table]
if (list.edges && list.edges.length)
list.edges.forEach(r => result.push(r.node))
})
// result.sort((a, b) => a.time - b.time)
return result
} catch (err) {
debugger
}
},
getAllPropertiesForServerSearch(model, inlined) {
let props = model.properties
let arr
if (model.inlined)
arr = []
else {
arr = ['_permalink', '_link', '_time', '_author', '_authorTitle', '_virtual', 'time']
if (model.id !== PUB_KEY && !inlined) {
let newarr = arr.concat(TYPE, SIG)
arr = newarr
}
}
for (let p in props) {
if (p.charAt(0) === '_')
continue
if (p === 'from' || p === 'to' || p === 'time' || p.indexOf('_group') !== -1)
continue
let prop = props[p]
if (prop.displayAs)
continue
let ptype = prop.type
if (ptype === 'array') {
// HACK
if (p === 'verifications')
continue
let iref = prop.items.ref
if (iref) {
if (iref === model.id) {
arr.push(
`${p} {
id
}`
)
}
else if (prop.inlined)
arr.push(this.addInlined(prop))
else
arr.push(
`${p} {
id
title
}`
)
}
continue
}
if (ptype !== 'object') {
arr.push(p)
continue
}
let ref = prop.ref
if (!ref) {
if (prop.range === 'json')
arr.push(p)
continue
}
if (ref === ORGANIZATION)
continue
if (prop.inlined)
arr.push(this.addInlined(prop))
else
arr.push(this.addRef(prop))
}
return arr
},
addRef(prop) {
let ref = prop.type === 'array' ? prop.items.ref : prop.ref
let p = prop.name
if (ref === MONEY) {
return (
`${p} {
value
currency
}`
)
}
if (ref === COUNTRY) {// || ref === CURRENCY)
return (
`${p} { | )
}
let m = utils.getModel(ref).value
if (m.subClassOf === ENUM) {
if (m.enum)
return (
`${p} {
id
title
}`
)
else
return p
}
if (m.id === PHOTO) {
let mprops = m.properties
return (
`${p} {${this.getAllPropertiesForServerSearch(m)}}`
)
}
return (
`${p} {
id
title
}`
)
},
addInlined(prop) {
let ref = prop.type === 'array' ? prop.items.ref : prop.ref
let p = prop.name
let refM = utils.getModel(ref).value
if (ref === FORM || refM.isInterface || refM.subClassOf === ENUM) {
if (prop.range === 'json')
return p
else
return (
`${p} {
id
title
}`
)
}
else {
let allProps = this.getAllPropertiesForServerSearch(refM, true)
return (
`${p} {
${allProps.toString().replace(/,/g, '\n')}
}`
)
}
},
async _getItem(id, client) {
let parts = id.split('_')
let modelName = parts[0]
let m = utils.getModel(modelName)
if (!m)
return
m = m.value
let table = `r_${modelName.replace(/\./g, '_')}`
// let _link = parts[parts.length - 1]
let _permalink = parts[1]
let query = `query {\n${table} (_permalink: "${_permalink}")\n`
let arr = this.getAllPropertiesForServerSearch(m)
query += `\n{${arr.join(' \n')}\n}\n}`
try {
let result = await client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`)
})
return result.data[table]
}
catch(err) {
console.log('graphQL._getItem', err)
debugger
}
},
}
module.exports = search | id
title
}` | random_line_split |
graphql-client.js | 'use strict'
import utils from '../../utils/utils'
const gql = require('graphql-tag')
var deepEqual = require('deep-equal')
const tradle = require('@tradle/engine')
const tradleUtils = tradle.utils
const { ApolloClient, createNetworkInterface } = require('apollo-client')
var constants = require('@tradle/constants');
const {
TYPE,
SIG,
ROOT_HASH,
CUR_HASH,
PREV_HASH
} = constants
const { MONEY, ENUM, ORGANIZATION, FORM, MESSAGE } = constants.TYPES
const PHOTO = 'tradle.Photo'
const COUNTRY = 'tradle.Country'
const PUB_KEY = 'tradle.PubKey'
var cursor = {}
var search = {
initClient(meDriver, url) {
// let graphqlEndpoint
// let orgId = me.organization.id
// let url = me.organization.url
// if (!url)
// url = SERVICE_PROVIDERS.filter((sp) => sp.org === orgId)[0].url
// if (url)
let graphqlEndpoint = `${url.replace(/[/]+$/, '')}/graphql`
// else
// graphqlEndpoint = `${ENV.LOCAL_TRADLE_SERVER.replace(/[/]+$/, '')}/graphql`
if (!graphqlEndpoint)
return
// graphqlEndpoint = `http://localhost:21012/graphql`
const networkInterface = createNetworkInterface({
uri: graphqlEndpoint
})
networkInterface.use([{
applyMiddleware: async (req, next) => {
const printer = require('graphql/language/printer')
const body = tradleUtils.stringify({
...req.request,
query: printer.print(req.request.query)
})
const { sig } = await meDriver.sign({
object: {
[TYPE]: 'tradle.GraphQLQuery',
body,
// time: Date.now()
}
})
if (!req.options.headers) {
req.options.headers = {}
}
req.options.headers['x-tradle-sig'] = sig
next()
}
}])
return new ApolloClient({ networkInterface })
},
async searchServer(params) {
let self = this
let {client, modelName, filterResource, sortProperty, asc, limit, direction, first, notArchive, noCursorChange} = params
if (filterResource && !Object.keys(filterResource).length)
filterResource = null
let table = `rl_${modelName.replace(/\./g, '_')}`
let query = `query {\n${table}\n`
let model = utils.getModel(modelName).value
let props = model.properties
let inClause = []
let op = {
CONTAINS: '',
EQ: '',
NEQ: '',
STARTS_WITH: '',
GT: '',
GTE: '',
LT: '',
LTE: '',
}
let exclude = [ROOT_HASH, CUR_HASH, TYPE]
if (filterResource) {
for (let p in filterResource) {
if (exclude.indexOf(p) !== -1)
continue
// if (!props[p] || p.charAt(0) === '_')
// continue
let val = filterResource[p]
// if (p === TYPE) {
// if (!Array.isArray(val))
// continue
// else {
// let s = `${p}: [`
// val.forEach((r, i) => {
// if (i)
// s += ', '
// s += `"${r}"`
// })
// s += ']'
// inClause.push(s)
// }
// }
// if (p.charAt(0) === '_')
// debugger
if (!props[p] && val) {
if (p.charAt(0) === '_') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
}
else
op.EQ += `\n ${p}: "${val}",`
}
continue
}
else if (props[p].type === 'string') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
continue
}
else if (!val || !val.trim().length)
continue
}
if (props[p].type === 'string') {
let len = val.length
if (val.indexOf('*') === -1)
op.EQ += `\n ${p}: "${val}",`
else if (len > 1) {
if (val.charAt(0) === '*')
op.STARTS_WITH = `\n ${p}: "${val.substring(1)}",`
else if (val.charAt(len - 1) === '*')
op.CONTAINS = `\n ${p}: "${val.substring(1, len - 1)}",`
}
}
else if (props[p].type === 'boolean') {
if (val)
op.EQ += `\n ${p}: ${val},`
else
op.NEQ += `\n ${p}: true,`
}
else if (props[p].type === 'number')
self.addEqualsOrGreaterOrLesserNumber(val, op, props[p])
else if (props[p].type === 'object') {
// if (Array.isArray(val)) {
// let s = `${p}: [`
// val.forEach((r, i) => {
// if (i)
// s += ', '
// s += `{id: "${utils.getId(r)}", title: "${utils.getDisplayName(r)}"}`
// })
// s += ']'
// inClause.push(s)
// }
if (Array.isArray(val)) {
if (!val.length)
continue
let s = `${p}__id: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${utils.getId(r)}"`
})
s += ']'
inClause.push(s)
}
else {
if (props[p].ref === MONEY) {
let {value, currency} = val
op.EQ += `\n ${p}__currency: "${currency}",`
if (val.value)
addEqualsOrGreaterOrLesserNumber(value, op, props[p])
}
else {
op.EQ += `\n ${p}__id: "${val.id}",`
}
}
}
}
}
op.IN = inClause ? inClause.join(',') : ''
let qq = ''
for (let o in op) {
let q = op[o]
if (q.length) {
qq +=
`\n ${o}: {
${op[o]}\n},`
}
}
query += '('
let hasFilter = qq.length
if (!noCursorChange) {
if (first || cursor.modelName !== modelName) {
cursor = {endCursor: []}
}
if (limit) {
if (cursor) {
if (cursor.filter) {
if (!filterResource || deepEqual(filterResource, cursor.filter))
cursor = {endCursor: []}
}
}
cursor.endCursor = cursor.endCursor || []
cursor.modelName = modelName
cursor.filter = filterResource || null
let endCursor
let len = cursor.endCursor.length
if (len) {
if (direction === 'down')
endCursor = cursor.endCursor[len - 1]
else {
if (len > 2) {
cursor.endCursor.splice(len - 2, 1)
cursor.endCursor.splice(len - 1, 1)
len -= 2
}
else
cursor.endCursor = []
endCursor = (len - 1) ? cursor.endCursor[len - 2] : null
}
}
else
endCursor = null
if (endCursor)
query += `after: "${endCursor}"\n`
query += `first: ${limit}\n`
}
}
if (hasFilter)
query += `filter: { ${qq} },\n`
if (sortProperty) {
let sortBy
let ref = props[sortProperty].ref
if (ref) {
if (ref === MONEY)
sortBy = sortProperty + '__value'
else
sortBy = sortProperty + '__title'
}
else
sortBy = sortProperty
query += `\norderBy: {
property: ${sortBy},
desc: ${asc ? false : true}
}`
}
else
query += `\norderBy: {
property: _time,
desc: true
}`
// if (limit)
// query += `, limit: ${limit}`
query += ')'
query += `\n{\n`
query += `pageInfo {\n endCursor\n}\n`
query += `edges {\n node {\n`
let arr = this.getAllPropertiesForServerSearch(model)
query += `${arr.join(' \n')}`
query += `\n}` // close 'node'
query += `\n}` // close 'edges'
query += `\n}` // close properties block
query += `\n}` // close query
try {
let data = await client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
})
let result = data.data[table]
if (!noCursorChange) {
let endCursor = result.pageInfo.endCursor
if (endCursor) {
// if (!params.direction || params.direction === 'down') {
let hasThisCursor = cursor.endCursor.some((c) => c === endCursor)
if (!hasThisCursor)
cursor.endCursor.push(endCursor)
// }
}
}
if (!result.edges.length) {
// this.trigger({action: 'list', resource: filterResource, isSearch: true, direction: direction, first: first})
return
}
// // if (result.edges.length < limit)
// // cursor.endCursor = null
// let to = this.getRepresentative(utils.getId(me.organization))
// let toId = utils.getId(to)
// let list = result.edges.map((r) => this.convertToResource(r.node))
// if (!noTrigger)
// this.trigger({action: 'list', list: list, resource: filterResource, direction: direction, first: first})
return result.edges
} catch(error) {
// debugger
console.error(error)
}
function prettify (obj) {
return JSON.stringify(obj, null, 2)
}
function addEqualsOrGreaterOrLesserNumber(val, op, prop) {
let isMoney = prop.ref === MONEY
let p = prop.name
if (isMoney)
p += '__value'
let ch = val.toString().charAt(0)
switch (ch) {
case '>':
if (val.charAt(1) === '=')
op.GTE += `\n ${p}: ${val.substring(2)},`
else
op.GT += `\n ${p}: ${val.substring(1)},`
break
case '<':
if (val.charAt(1) === '=')
op.LTE += `\n ${p}: ${val.substring(2)},`
else
op.LT += `\n ${p}: ${val.substring(1)},`
break
default:
op.EQ += `\n ${p}: ${val},`
}
}
},
// # _author: "3c67687a96fe59d8f98b1c90cc46f943b938d54cda852b12fb1d43396e28978a"
// # _inbound: false
// # _recipient: ${hash}
async | (params) {
let { author, recipient, client, context } = params
let table = `rl_${MESSAGE.replace(/\./g, '_')}`
let inbound = true
let query =
`query {
rl_tradle_Message(
first:20,
filter:{
EQ: {
_inbound: true
context: "${context}"
_author: "${author}"
}
},
orderBy:{
property: time
desc:true
}
) {
edges {
node {
_author
_recipient
object
}
}
}
}`
let promisses = []
promisses.push(client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
}))
let queryOutbound = query.replace('_inbound: true', '_inbound: false').replace('_author', '_recipient')
// `query {
// rl_tradle_Message(
// first:20,
// filter:{
// EQ: {
// _inbound: false
// context: "${context}"
// _recipient: "${author}"
// }
// },
// orderBy:{
// property: time
// desc:true
// }
// ) {
// edges {
// node {
// _author
// _recipient
// object
// }
// }
// }
// }`
promisses.push(client.query({
fetchPolicy: 'network-only',
query: gql(`${queryOutbound}`),
}))
try {
let all = await Promise.all(promisses)
let result = []
all.forEach((data) => {
let list = data.data[table]
if (list.edges && list.edges.length)
list.edges.forEach(r => result.push(r.node))
})
// result.sort((a, b) => a.time - b.time)
return result
} catch (err) {
debugger
}
},
getAllPropertiesForServerSearch(model, inlined) {
let props = model.properties
let arr
if (model.inlined)
arr = []
else {
arr = ['_permalink', '_link', '_time', '_author', '_authorTitle', '_virtual', 'time']
if (model.id !== PUB_KEY && !inlined) {
let newarr = arr.concat(TYPE, SIG)
arr = newarr
}
}
for (let p in props) {
if (p.charAt(0) === '_')
continue
if (p === 'from' || p === 'to' || p === 'time' || p.indexOf('_group') !== -1)
continue
let prop = props[p]
if (prop.displayAs)
continue
let ptype = prop.type
if (ptype === 'array') {
// HACK
if (p === 'verifications')
continue
let iref = prop.items.ref
if (iref) {
if (iref === model.id) {
arr.push(
`${p} {
id
}`
)
}
else if (prop.inlined)
arr.push(this.addInlined(prop))
else
arr.push(
`${p} {
id
title
}`
)
}
continue
}
if (ptype !== 'object') {
arr.push(p)
continue
}
let ref = prop.ref
if (!ref) {
if (prop.range === 'json')
arr.push(p)
continue
}
if (ref === ORGANIZATION)
continue
if (prop.inlined)
arr.push(this.addInlined(prop))
else
arr.push(this.addRef(prop))
}
return arr
},
addRef(prop) {
let ref = prop.type === 'array' ? prop.items.ref : prop.ref
let p = prop.name
if (ref === MONEY) {
return (
`${p} {
value
currency
}`
)
}
if (ref === COUNTRY) {// || ref === CURRENCY)
return (
`${p} {
id
title
}`
)
}
let m = utils.getModel(ref).value
if (m.subClassOf === ENUM) {
if (m.enum)
return (
`${p} {
id
title
}`
)
else
return p
}
if (m.id === PHOTO) {
let mprops = m.properties
return (
`${p} {${this.getAllPropertiesForServerSearch(m)}}`
)
}
return (
`${p} {
id
title
}`
)
},
addInlined(prop) {
let ref = prop.type === 'array' ? prop.items.ref : prop.ref
let p = prop.name
let refM = utils.getModel(ref).value
if (ref === FORM || refM.isInterface || refM.subClassOf === ENUM) {
if (prop.range === 'json')
return p
else
return (
`${p} {
id
title
}`
)
}
else {
let allProps = this.getAllPropertiesForServerSearch(refM, true)
return (
`${p} {
${allProps.toString().replace(/,/g, '\n')}
}`
)
}
},
async _getItem(id, client) {
let parts = id.split('_')
let modelName = parts[0]
let m = utils.getModel(modelName)
if (!m)
return
m = m.value
let table = `r_${modelName.replace(/\./g, '_')}`
// let _link = parts[parts.length - 1]
let _permalink = parts[1]
let query = `query {\n${table} (_permalink: "${_permalink}")\n`
let arr = this.getAllPropertiesForServerSearch(m)
query += `\n{${arr.join(' \n')}\n}\n}`
try {
let result = await client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`)
})
return result.data[table]
}
catch(err) {
console.log('graphQL._getItem', err)
debugger
}
},
}
module.exports = search
| getChat | identifier_name |
graphql-client.js | 'use strict'
import utils from '../../utils/utils'
const gql = require('graphql-tag')
var deepEqual = require('deep-equal')
const tradle = require('@tradle/engine')
const tradleUtils = tradle.utils
const { ApolloClient, createNetworkInterface } = require('apollo-client')
var constants = require('@tradle/constants');
const {
TYPE,
SIG,
ROOT_HASH,
CUR_HASH,
PREV_HASH
} = constants
const { MONEY, ENUM, ORGANIZATION, FORM, MESSAGE } = constants.TYPES
const PHOTO = 'tradle.Photo'
const COUNTRY = 'tradle.Country'
const PUB_KEY = 'tradle.PubKey'
var cursor = {}
var search = {
initClient(meDriver, url) {
// let graphqlEndpoint
// let orgId = me.organization.id
// let url = me.organization.url
// if (!url)
// url = SERVICE_PROVIDERS.filter((sp) => sp.org === orgId)[0].url
// if (url)
let graphqlEndpoint = `${url.replace(/[/]+$/, '')}/graphql`
// else
// graphqlEndpoint = `${ENV.LOCAL_TRADLE_SERVER.replace(/[/]+$/, '')}/graphql`
if (!graphqlEndpoint)
return
// graphqlEndpoint = `http://localhost:21012/graphql`
const networkInterface = createNetworkInterface({
uri: graphqlEndpoint
})
networkInterface.use([{
applyMiddleware: async (req, next) => {
const printer = require('graphql/language/printer')
const body = tradleUtils.stringify({
...req.request,
query: printer.print(req.request.query)
})
const { sig } = await meDriver.sign({
object: {
[TYPE]: 'tradle.GraphQLQuery',
body,
// time: Date.now()
}
})
if (!req.options.headers) {
req.options.headers = {}
}
req.options.headers['x-tradle-sig'] = sig
next()
}
}])
return new ApolloClient({ networkInterface })
},
async searchServer(params) {
let self = this
let {client, modelName, filterResource, sortProperty, asc, limit, direction, first, notArchive, noCursorChange} = params
if (filterResource && !Object.keys(filterResource).length)
filterResource = null
let table = `rl_${modelName.replace(/\./g, '_')}`
let query = `query {\n${table}\n`
let model = utils.getModel(modelName).value
let props = model.properties
let inClause = []
let op = {
CONTAINS: '',
EQ: '',
NEQ: '',
STARTS_WITH: '',
GT: '',
GTE: '',
LT: '',
LTE: '',
}
let exclude = [ROOT_HASH, CUR_HASH, TYPE]
if (filterResource) {
for (let p in filterResource) {
if (exclude.indexOf(p) !== -1)
continue
// if (!props[p] || p.charAt(0) === '_')
// continue
let val = filterResource[p]
// if (p === TYPE) {
// if (!Array.isArray(val))
// continue
// else {
// let s = `${p}: [`
// val.forEach((r, i) => {
// if (i)
// s += ', '
// s += `"${r}"`
// })
// s += ']'
// inClause.push(s)
// }
// }
// if (p.charAt(0) === '_')
// debugger
if (!props[p] && val) {
if (p.charAt(0) === '_') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
}
else
op.EQ += `\n ${p}: "${val}",`
}
continue
}
else if (props[p].type === 'string') {
if (Array.isArray(val)) {
let s = `${p}: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${r}"`
})
s += ']'
inClause.push(s)
continue
}
else if (!val || !val.trim().length)
continue
}
if (props[p].type === 'string') {
let len = val.length
if (val.indexOf('*') === -1)
op.EQ += `\n ${p}: "${val}",`
else if (len > 1) {
if (val.charAt(0) === '*')
op.STARTS_WITH = `\n ${p}: "${val.substring(1)}",`
else if (val.charAt(len - 1) === '*')
op.CONTAINS = `\n ${p}: "${val.substring(1, len - 1)}",`
}
}
else if (props[p].type === 'boolean') {
if (val)
op.EQ += `\n ${p}: ${val},`
else
op.NEQ += `\n ${p}: true,`
}
else if (props[p].type === 'number')
self.addEqualsOrGreaterOrLesserNumber(val, op, props[p])
else if (props[p].type === 'object') {
// if (Array.isArray(val)) {
// let s = `${p}: [`
// val.forEach((r, i) => {
// if (i)
// s += ', '
// s += `{id: "${utils.getId(r)}", title: "${utils.getDisplayName(r)}"}`
// })
// s += ']'
// inClause.push(s)
// }
if (Array.isArray(val)) {
if (!val.length)
continue
let s = `${p}__id: [`
val.forEach((r, i) => {
if (i)
s += ', '
s += `"${utils.getId(r)}"`
})
s += ']'
inClause.push(s)
}
else {
if (props[p].ref === MONEY) {
let {value, currency} = val
op.EQ += `\n ${p}__currency: "${currency}",`
if (val.value)
addEqualsOrGreaterOrLesserNumber(value, op, props[p])
}
else {
op.EQ += `\n ${p}__id: "${val.id}",`
}
}
}
}
}
op.IN = inClause ? inClause.join(',') : ''
let qq = ''
for (let o in op) {
let q = op[o]
if (q.length) {
qq +=
`\n ${o}: {
${op[o]}\n},`
}
}
query += '('
let hasFilter = qq.length
if (!noCursorChange) {
if (first || cursor.modelName !== modelName) {
cursor = {endCursor: []}
}
if (limit) {
if (cursor) {
if (cursor.filter) {
if (!filterResource || deepEqual(filterResource, cursor.filter))
cursor = {endCursor: []}
}
}
cursor.endCursor = cursor.endCursor || []
cursor.modelName = modelName
cursor.filter = filterResource || null
let endCursor
let len = cursor.endCursor.length
if (len) {
if (direction === 'down')
endCursor = cursor.endCursor[len - 1]
else {
if (len > 2) {
cursor.endCursor.splice(len - 2, 1)
cursor.endCursor.splice(len - 1, 1)
len -= 2
}
else
cursor.endCursor = []
endCursor = (len - 1) ? cursor.endCursor[len - 2] : null
}
}
else
endCursor = null
if (endCursor)
query += `after: "${endCursor}"\n`
query += `first: ${limit}\n`
}
}
if (hasFilter)
query += `filter: { ${qq} },\n`
if (sortProperty) {
let sortBy
let ref = props[sortProperty].ref
if (ref) {
if (ref === MONEY)
sortBy = sortProperty + '__value'
else
sortBy = sortProperty + '__title'
}
else
sortBy = sortProperty
query += `\norderBy: {
property: ${sortBy},
desc: ${asc ? false : true}
}`
}
else
query += `\norderBy: {
property: _time,
desc: true
}`
// if (limit)
// query += `, limit: ${limit}`
query += ')'
query += `\n{\n`
query += `pageInfo {\n endCursor\n}\n`
query += `edges {\n node {\n`
let arr = this.getAllPropertiesForServerSearch(model)
query += `${arr.join(' \n')}`
query += `\n}` // close 'node'
query += `\n}` // close 'edges'
query += `\n}` // close properties block
query += `\n}` // close query
try {
let data = await client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
})
let result = data.data[table]
if (!noCursorChange) {
let endCursor = result.pageInfo.endCursor
if (endCursor) {
// if (!params.direction || params.direction === 'down') {
let hasThisCursor = cursor.endCursor.some((c) => c === endCursor)
if (!hasThisCursor)
cursor.endCursor.push(endCursor)
// }
}
}
if (!result.edges.length) {
// this.trigger({action: 'list', resource: filterResource, isSearch: true, direction: direction, first: first})
return
}
// // if (result.edges.length < limit)
// // cursor.endCursor = null
// let to = this.getRepresentative(utils.getId(me.organization))
// let toId = utils.getId(to)
// let list = result.edges.map((r) => this.convertToResource(r.node))
// if (!noTrigger)
// this.trigger({action: 'list', list: list, resource: filterResource, direction: direction, first: first})
return result.edges
} catch(error) {
// debugger
console.error(error)
}
function prettify (obj) {
return JSON.stringify(obj, null, 2)
}
function addEqualsOrGreaterOrLesserNumber(val, op, prop) {
let isMoney = prop.ref === MONEY
let p = prop.name
if (isMoney)
p += '__value'
let ch = val.toString().charAt(0)
switch (ch) {
case '>':
if (val.charAt(1) === '=')
op.GTE += `\n ${p}: ${val.substring(2)},`
else
op.GT += `\n ${p}: ${val.substring(1)},`
break
case '<':
if (val.charAt(1) === '=')
op.LTE += `\n ${p}: ${val.substring(2)},`
else
op.LT += `\n ${p}: ${val.substring(1)},`
break
default:
op.EQ += `\n ${p}: ${val},`
}
}
},
// # _author: "3c67687a96fe59d8f98b1c90cc46f943b938d54cda852b12fb1d43396e28978a"
// # _inbound: false
// # _recipient: ${hash}
async getChat(params) | ,
getAllPropertiesForServerSearch(model, inlined) {
let props = model.properties
let arr
if (model.inlined)
arr = []
else {
arr = ['_permalink', '_link', '_time', '_author', '_authorTitle', '_virtual', 'time']
if (model.id !== PUB_KEY && !inlined) {
let newarr = arr.concat(TYPE, SIG)
arr = newarr
}
}
for (let p in props) {
if (p.charAt(0) === '_')
continue
if (p === 'from' || p === 'to' || p === 'time' || p.indexOf('_group') !== -1)
continue
let prop = props[p]
if (prop.displayAs)
continue
let ptype = prop.type
if (ptype === 'array') {
// HACK
if (p === 'verifications')
continue
let iref = prop.items.ref
if (iref) {
if (iref === model.id) {
arr.push(
`${p} {
id
}`
)
}
else if (prop.inlined)
arr.push(this.addInlined(prop))
else
arr.push(
`${p} {
id
title
}`
)
}
continue
}
if (ptype !== 'object') {
arr.push(p)
continue
}
let ref = prop.ref
if (!ref) {
if (prop.range === 'json')
arr.push(p)
continue
}
if (ref === ORGANIZATION)
continue
if (prop.inlined)
arr.push(this.addInlined(prop))
else
arr.push(this.addRef(prop))
}
return arr
},
addRef(prop) {
let ref = prop.type === 'array' ? prop.items.ref : prop.ref
let p = prop.name
if (ref === MONEY) {
return (
`${p} {
value
currency
}`
)
}
if (ref === COUNTRY) {// || ref === CURRENCY)
return (
`${p} {
id
title
}`
)
}
let m = utils.getModel(ref).value
if (m.subClassOf === ENUM) {
if (m.enum)
return (
`${p} {
id
title
}`
)
else
return p
}
if (m.id === PHOTO) {
let mprops = m.properties
return (
`${p} {${this.getAllPropertiesForServerSearch(m)}}`
)
}
return (
`${p} {
id
title
}`
)
},
addInlined(prop) {
let ref = prop.type === 'array' ? prop.items.ref : prop.ref
let p = prop.name
let refM = utils.getModel(ref).value
if (ref === FORM || refM.isInterface || refM.subClassOf === ENUM) {
if (prop.range === 'json')
return p
else
return (
`${p} {
id
title
}`
)
}
else {
let allProps = this.getAllPropertiesForServerSearch(refM, true)
return (
`${p} {
${allProps.toString().replace(/,/g, '\n')}
}`
)
}
},
async _getItem(id, client) {
let parts = id.split('_')
let modelName = parts[0]
let m = utils.getModel(modelName)
if (!m)
return
m = m.value
let table = `r_${modelName.replace(/\./g, '_')}`
// let _link = parts[parts.length - 1]
let _permalink = parts[1]
let query = `query {\n${table} (_permalink: "${_permalink}")\n`
let arr = this.getAllPropertiesForServerSearch(m)
query += `\n{${arr.join(' \n')}\n}\n}`
try {
let result = await client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`)
})
return result.data[table]
}
catch(err) {
console.log('graphQL._getItem', err)
debugger
}
},
}
module.exports = search
| {
let { author, recipient, client, context } = params
let table = `rl_${MESSAGE.replace(/\./g, '_')}`
let inbound = true
let query =
`query {
rl_tradle_Message(
first:20,
filter:{
EQ: {
_inbound: true
context: "${context}"
_author: "${author}"
}
},
orderBy:{
property: time
desc:true
}
) {
edges {
node {
_author
_recipient
object
}
}
}
}`
let promisses = []
promisses.push(client.query({
fetchPolicy: 'network-only',
query: gql(`${query}`),
}))
let queryOutbound = query.replace('_inbound: true', '_inbound: false').replace('_author', '_recipient')
// `query {
// rl_tradle_Message(
// first:20,
// filter:{
// EQ: {
// _inbound: false
// context: "${context}"
// _recipient: "${author}"
// }
// },
// orderBy:{
// property: time
// desc:true
// }
// ) {
// edges {
// node {
// _author
// _recipient
// object
// }
// }
// }
// }`
promisses.push(client.query({
fetchPolicy: 'network-only',
query: gql(`${queryOutbound}`),
}))
try {
let all = await Promise.all(promisses)
let result = []
all.forEach((data) => {
let list = data.data[table]
if (list.edges && list.edges.length)
list.edges.forEach(r => result.push(r.node))
})
// result.sort((a, b) => a.time - b.time)
return result
} catch (err) {
debugger
}
} | identifier_body |
greader.go | package api
import (
"encoding/json"
"fmt"
log "github.com/golang/glog"
"github.com/jrupac/goliath/models"
"github.com/jrupac/goliath/storage"
"github.com/jrupac/goliath/utils"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/bcrypt"
"net/http"
"net/http/httputil"
"strconv"
"strings"
"time"
)
const (
readingListStreamId string = "user/-/state/com.google/reading-list"
readStreamId string = "user/-/state/com.google/read"
unreadStreamId string = "user/-/state/com.google/kept-unread"
starredStreamId string = "user/-/state/com.google/starred"
broadcastStreamId string = "user/-/state/com.google/broadcast"
)
var (
greaderLatencyMetric = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "greader_server_latency",
Help: "Server-side latency of GReader API operations.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"method"},
)
)
func init() {
prometheus.MustRegister(greaderLatencyMetric)
}
// GReader is an implementation of the GReader API.
type GReader struct {
d *storage.Database
}
// GReaderHandler returns a new GReader handler.
func GReaderHandler(d *storage.Database) http.HandlerFunc {
return GReader{d}.Handler()
}
// Handler returns a handler function that implements the GReader API.
func (a GReader) Handler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
a.route(w, r)
}
}
func (a GReader) recordLatency(t time.Time, label string) {
utils.Elapsed(t, func(d time.Duration) {
// Record latency measurements in microseconds.
greaderLatencyMetric.WithLabelValues(label).Observe(float64(d) / float64(time.Microsecond))
})
}
func (a GReader) route(w http.ResponseWriter, r *http.Request) {
// Record the total server latency of each call.
defer a.recordLatency(time.Now(), "server")
w.Header().Set("Content-Type", "application/json")
switch r.URL.Path {
case "/greader/accounts/ClientLogin":
a.handleLogin(w, r)
case "/greader/reader/api/0/token":
a.withAuth(w, r, a.handlePostToken)
case "/greader/reader/api/0/user-info":
a.withAuth(w, r, a.handleUserInfo)
case "/greader/reader/api/0/subscription/list":
a.withAuth(w, r, a.handleSubscriptionList)
case "/greader/reader/api/0/stream/items/ids":
a.withAuth(w, r, a.handleStreamItemIds)
case "/greader/reader/api/0/stream/items/contents":
a.withAuth(w, r, a.handleStreamItemsContents)
case "/greader/reader/api/0/edit-tag":
a.withAuth(w, r, a.handleEditTag)
default:
log.Warningf("Got unexpected route: %s", r.URL.String())
dump, err := httputil.DumpRequest(r, true)
if err != nil {
log.Warningf("Failed to dump request: %s", err)
}
log.Warningf("%q", dump)
a.returnError(w, http.StatusBadRequest)
}
}
func (a GReader) handleLogin(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
formUser := r.Form.Get("Email")
formPass := r.Form.Get("Passwd")
user, err := a.d.GetUserByUsername(formUser)
if err != nil {
log.Warningf("Failed to find user: %s", formUser)
a.returnError(w, http.StatusUnauthorized)
return
}
ok := bcrypt.CompareHashAndPassword([]byte(user.HashPass), []byte(formPass))
if ok == nil {
token, err := createAuthToken(user.HashPass, formUser)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
a.returnSuccess(w, greaderHandlelogin{Auth: token})
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func (a GReader) handleUserInfo(w http.ResponseWriter, _ *http.Request, user models.User) {
a.returnSuccess(w, greaderUserInfo{
UserId: string(user.UserId),
Username: user.Username,
})
}
func (a GReader) handleSubscriptionList(w http.ResponseWriter, _ *http.Request, user models.User) {
folders, err := a.d.GetAllFoldersForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
feeds, err := a.d.GetAllFeedsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
faviconMap, err := a.d.GetAllFaviconsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
folderMap := map[int64]string{}
for _, folder := range folders {
folderMap[folder.ID] = folder.Name
}
subList := greaderSubscriptionList{}
for _, feed := range feeds {
subList.Subscriptions = append(subList.Subscriptions, greaderSubscription{
Title: feed.Title,
// No client seems to use this field, so let it as zero
FirstItemMsec: "0",
HtmlUrl: feed.Link,
IconUrl: fmt.Sprintf("data:%s", faviconMap[feed.ID]),
SortId: feed.Title,
Id: greaderFeedId(feed.ID),
Categories: []greaderCategory{{
Id: greaderFolderId(feed.FolderID),
Label: folderMap[feed.FolderID],
}},
})
}
a.returnSuccess(w, subList)
}
func (a GReader) handleStreamItemIds(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
limit, err := strconv.Atoi(r.Form.Get("n"))
if err != nil {
log.Warningf(
"Saw unexpected 'n' parameter, defaulting to 10,000: %s", r.PostForm.Get("n"))
limit = 10000
}
switch s := r.Form.Get("s"); s {
case starredStreamId:
// TODO: Support starred items
a.returnSuccess(w, greaderStreamItemIds{})
return
case readStreamId:
// Never return read items to the client, it's just simpler
a.returnSuccess(w, greaderStreamItemIds{})
return
case readingListStreamId:
// Handled below
break
default:
log.Warningf("Saw unexpected 's' parameter: %s", s)
a.returnError(w, http.StatusNotImplemented)
return
}
xt := r.Form.Get("xt")
if xt != readStreamId {
// Only support excluding read items
log.Warningf("Saw unexpected 'xt' parameter: %s", xt)
a.returnError(w, http.StatusNotImplemented)
return
}
// TODO: Support continuation tokens
articles, err := a.d.GetUnreadArticleMetaForUser(user, limit, -1)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemIds := greaderStreamItemIds{}
for _, article := range articles {
streamItemIds.ItemRefs = append(streamItemIds.ItemRefs, greaderItemRef{
Id: strconv.FormatInt(article.ID, 10),
DirectStreamIds: []string{
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
})
}
a.returnSuccess(w, streamItemIds)
}
func (a GReader) handlePostToken(w http.ResponseWriter, _ *http.Request, _ models.User) {
_, _ = fmt.Fprint(w, createPostToken())
a.returnSuccess(w, nil)
}
func (a GReader) handleStreamItemsContents(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
articles, err := a.d.GetArticlesForUser(user, articleIds)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemContents := greaderStreamItemsContents{
Id: readingListStreamId,
Updated: time.Now().Unix(),
}
for _, article := range articles {
streamItemContents.Items = append(streamItemContents.Items, greaderItemContent{
CrawlTimeMsec: strconv.FormatInt(article.Date.UnixMilli(), 10),
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
Id: greaderArticleId(article.ID),
Categories: []string{
readingListStreamId,
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
Title: article.Title, | Alternate: []greaderCanonical{
{Href: article.Link},
},
Summary: greaderContent{
Content: article.GetContents(*serveParsedArticles),
},
Origin: greaderOrigin{
StreamId: greaderFeedId(article.FeedID),
},
})
}
a.returnSuccess(w, streamItemContents)
}
func (a GReader) handleEditTag(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
var status string
// Only support updating one tag
switch r.Form.Get("a") {
case readStreamId:
status = "read"
case unreadStreamId:
status = "unread"
case starredStreamId, broadcastStreamId:
// TODO: Support starring items
a.returnError(w, http.StatusNotImplemented)
return
}
for _, articleId := range articleIds {
err = a.d.MarkArticleForUser(user, articleId, status)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
}
_, _ = w.Write([]byte("OK"))
a.returnSuccess(w, nil)
}
func (a GReader) withAuth(w http.ResponseWriter, r *http.Request, handler func(http.ResponseWriter, *http.Request, models.User)) {
// Header should be in format:
// Authorization: GoogleLogin auth=<token>
authHeader := r.Header.Get("Authorization")
if authHeader == "" {
a.returnError(w, http.StatusUnauthorized)
return
}
authFields := strings.Fields(authHeader)
if len(authFields) != 2 || !strings.EqualFold(authFields[0], "GoogleLogin") {
a.returnError(w, http.StatusBadRequest)
return
}
authStr, tokenStr, found := strings.Cut(authFields[1], "=")
if !found {
a.returnError(w, http.StatusBadRequest)
return
}
if !strings.EqualFold(authStr, "auth") {
a.returnError(w, http.StatusBadRequest)
return
}
username, token, err := extractAuthToken(tokenStr)
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
user, err := a.d.GetUserByUsername(username)
if err != nil {
a.returnError(w, http.StatusUnauthorized)
return
}
if validateAuthToken(token, username, user.HashPass) {
handler(w, r, user)
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func greaderArticleId(articleId int64) string {
return fmt.Sprintf("tag:google.com,2005:reader/item/%x", articleId)
}
func greaderFeedId(feedId int64) string {
return fmt.Sprintf("feed/%d", feedId)
}
func greaderFolderId(folderId int64) string {
return fmt.Sprintf("user/-/label/%d", folderId)
}
func (a GReader) returnError(w http.ResponseWriter, status int) {
w.WriteHeader(status)
}
func (a GReader) returnSuccess(w http.ResponseWriter, resp any) {
w.WriteHeader(http.StatusOK)
if resp != nil {
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
if err := enc.Encode(resp); err != nil {
a.returnError(w, http.StatusInternalServerError)
}
}
} | Published: article.Date.Unix(),
Canonical: []greaderCanonical{
{Href: article.Link},
}, | random_line_split |
greader.go | package api
import (
"encoding/json"
"fmt"
log "github.com/golang/glog"
"github.com/jrupac/goliath/models"
"github.com/jrupac/goliath/storage"
"github.com/jrupac/goliath/utils"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/bcrypt"
"net/http"
"net/http/httputil"
"strconv"
"strings"
"time"
)
const (
readingListStreamId string = "user/-/state/com.google/reading-list"
readStreamId string = "user/-/state/com.google/read"
unreadStreamId string = "user/-/state/com.google/kept-unread"
starredStreamId string = "user/-/state/com.google/starred"
broadcastStreamId string = "user/-/state/com.google/broadcast"
)
var (
greaderLatencyMetric = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "greader_server_latency",
Help: "Server-side latency of GReader API operations.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"method"},
)
)
func init() {
prometheus.MustRegister(greaderLatencyMetric)
}
// GReader is an implementation of the GReader API.
type GReader struct {
d *storage.Database
}
// GReaderHandler returns a new GReader handler.
func GReaderHandler(d *storage.Database) http.HandlerFunc {
return GReader{d}.Handler()
}
// Handler returns a handler function that implements the GReader API.
func (a GReader) Handler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
a.route(w, r)
}
}
func (a GReader) recordLatency(t time.Time, label string) {
utils.Elapsed(t, func(d time.Duration) {
// Record latency measurements in microseconds.
greaderLatencyMetric.WithLabelValues(label).Observe(float64(d) / float64(time.Microsecond))
})
}
func (a GReader) route(w http.ResponseWriter, r *http.Request) {
// Record the total server latency of each call.
defer a.recordLatency(time.Now(), "server")
w.Header().Set("Content-Type", "application/json")
switch r.URL.Path {
case "/greader/accounts/ClientLogin":
a.handleLogin(w, r)
case "/greader/reader/api/0/token":
a.withAuth(w, r, a.handlePostToken)
case "/greader/reader/api/0/user-info":
a.withAuth(w, r, a.handleUserInfo)
case "/greader/reader/api/0/subscription/list":
a.withAuth(w, r, a.handleSubscriptionList)
case "/greader/reader/api/0/stream/items/ids":
a.withAuth(w, r, a.handleStreamItemIds)
case "/greader/reader/api/0/stream/items/contents":
a.withAuth(w, r, a.handleStreamItemsContents)
case "/greader/reader/api/0/edit-tag":
a.withAuth(w, r, a.handleEditTag)
default:
log.Warningf("Got unexpected route: %s", r.URL.String())
dump, err := httputil.DumpRequest(r, true)
if err != nil {
log.Warningf("Failed to dump request: %s", err)
}
log.Warningf("%q", dump)
a.returnError(w, http.StatusBadRequest)
}
}
func (a GReader) handleLogin(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
formUser := r.Form.Get("Email")
formPass := r.Form.Get("Passwd")
user, err := a.d.GetUserByUsername(formUser)
if err != nil {
log.Warningf("Failed to find user: %s", formUser)
a.returnError(w, http.StatusUnauthorized)
return
}
ok := bcrypt.CompareHashAndPassword([]byte(user.HashPass), []byte(formPass))
if ok == nil {
token, err := createAuthToken(user.HashPass, formUser)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
a.returnSuccess(w, greaderHandlelogin{Auth: token})
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func (a GReader) handleUserInfo(w http.ResponseWriter, _ *http.Request, user models.User) {
a.returnSuccess(w, greaderUserInfo{
UserId: string(user.UserId),
Username: user.Username,
})
}
func (a GReader) handleSubscriptionList(w http.ResponseWriter, _ *http.Request, user models.User) {
folders, err := a.d.GetAllFoldersForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
feeds, err := a.d.GetAllFeedsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
faviconMap, err := a.d.GetAllFaviconsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
folderMap := map[int64]string{}
for _, folder := range folders |
subList := greaderSubscriptionList{}
for _, feed := range feeds {
subList.Subscriptions = append(subList.Subscriptions, greaderSubscription{
Title: feed.Title,
// No client seems to use this field, so let it as zero
FirstItemMsec: "0",
HtmlUrl: feed.Link,
IconUrl: fmt.Sprintf("data:%s", faviconMap[feed.ID]),
SortId: feed.Title,
Id: greaderFeedId(feed.ID),
Categories: []greaderCategory{{
Id: greaderFolderId(feed.FolderID),
Label: folderMap[feed.FolderID],
}},
})
}
a.returnSuccess(w, subList)
}
func (a GReader) handleStreamItemIds(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
limit, err := strconv.Atoi(r.Form.Get("n"))
if err != nil {
log.Warningf(
"Saw unexpected 'n' parameter, defaulting to 10,000: %s", r.PostForm.Get("n"))
limit = 10000
}
switch s := r.Form.Get("s"); s {
case starredStreamId:
// TODO: Support starred items
a.returnSuccess(w, greaderStreamItemIds{})
return
case readStreamId:
// Never return read items to the client, it's just simpler
a.returnSuccess(w, greaderStreamItemIds{})
return
case readingListStreamId:
// Handled below
break
default:
log.Warningf("Saw unexpected 's' parameter: %s", s)
a.returnError(w, http.StatusNotImplemented)
return
}
xt := r.Form.Get("xt")
if xt != readStreamId {
// Only support excluding read items
log.Warningf("Saw unexpected 'xt' parameter: %s", xt)
a.returnError(w, http.StatusNotImplemented)
return
}
// TODO: Support continuation tokens
articles, err := a.d.GetUnreadArticleMetaForUser(user, limit, -1)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemIds := greaderStreamItemIds{}
for _, article := range articles {
streamItemIds.ItemRefs = append(streamItemIds.ItemRefs, greaderItemRef{
Id: strconv.FormatInt(article.ID, 10),
DirectStreamIds: []string{
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
})
}
a.returnSuccess(w, streamItemIds)
}
func (a GReader) handlePostToken(w http.ResponseWriter, _ *http.Request, _ models.User) {
_, _ = fmt.Fprint(w, createPostToken())
a.returnSuccess(w, nil)
}
func (a GReader) handleStreamItemsContents(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
articles, err := a.d.GetArticlesForUser(user, articleIds)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemContents := greaderStreamItemsContents{
Id: readingListStreamId,
Updated: time.Now().Unix(),
}
for _, article := range articles {
streamItemContents.Items = append(streamItemContents.Items, greaderItemContent{
CrawlTimeMsec: strconv.FormatInt(article.Date.UnixMilli(), 10),
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
Id: greaderArticleId(article.ID),
Categories: []string{
readingListStreamId,
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
Title: article.Title,
Published: article.Date.Unix(),
Canonical: []greaderCanonical{
{Href: article.Link},
},
Alternate: []greaderCanonical{
{Href: article.Link},
},
Summary: greaderContent{
Content: article.GetContents(*serveParsedArticles),
},
Origin: greaderOrigin{
StreamId: greaderFeedId(article.FeedID),
},
})
}
a.returnSuccess(w, streamItemContents)
}
func (a GReader) handleEditTag(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
var status string
// Only support updating one tag
switch r.Form.Get("a") {
case readStreamId:
status = "read"
case unreadStreamId:
status = "unread"
case starredStreamId, broadcastStreamId:
// TODO: Support starring items
a.returnError(w, http.StatusNotImplemented)
return
}
for _, articleId := range articleIds {
err = a.d.MarkArticleForUser(user, articleId, status)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
}
_, _ = w.Write([]byte("OK"))
a.returnSuccess(w, nil)
}
func (a GReader) withAuth(w http.ResponseWriter, r *http.Request, handler func(http.ResponseWriter, *http.Request, models.User)) {
// Header should be in format:
// Authorization: GoogleLogin auth=<token>
authHeader := r.Header.Get("Authorization")
if authHeader == "" {
a.returnError(w, http.StatusUnauthorized)
return
}
authFields := strings.Fields(authHeader)
if len(authFields) != 2 || !strings.EqualFold(authFields[0], "GoogleLogin") {
a.returnError(w, http.StatusBadRequest)
return
}
authStr, tokenStr, found := strings.Cut(authFields[1], "=")
if !found {
a.returnError(w, http.StatusBadRequest)
return
}
if !strings.EqualFold(authStr, "auth") {
a.returnError(w, http.StatusBadRequest)
return
}
username, token, err := extractAuthToken(tokenStr)
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
user, err := a.d.GetUserByUsername(username)
if err != nil {
a.returnError(w, http.StatusUnauthorized)
return
}
if validateAuthToken(token, username, user.HashPass) {
handler(w, r, user)
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func greaderArticleId(articleId int64) string {
return fmt.Sprintf("tag:google.com,2005:reader/item/%x", articleId)
}
func greaderFeedId(feedId int64) string {
return fmt.Sprintf("feed/%d", feedId)
}
func greaderFolderId(folderId int64) string {
return fmt.Sprintf("user/-/label/%d", folderId)
}
func (a GReader) returnError(w http.ResponseWriter, status int) {
w.WriteHeader(status)
}
func (a GReader) returnSuccess(w http.ResponseWriter, resp any) {
w.WriteHeader(http.StatusOK)
if resp != nil {
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
if err := enc.Encode(resp); err != nil {
a.returnError(w, http.StatusInternalServerError)
}
}
}
| {
folderMap[folder.ID] = folder.Name
} | conditional_block |
greader.go | package api
import (
"encoding/json"
"fmt"
log "github.com/golang/glog"
"github.com/jrupac/goliath/models"
"github.com/jrupac/goliath/storage"
"github.com/jrupac/goliath/utils"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/bcrypt"
"net/http"
"net/http/httputil"
"strconv"
"strings"
"time"
)
const (
readingListStreamId string = "user/-/state/com.google/reading-list"
readStreamId string = "user/-/state/com.google/read"
unreadStreamId string = "user/-/state/com.google/kept-unread"
starredStreamId string = "user/-/state/com.google/starred"
broadcastStreamId string = "user/-/state/com.google/broadcast"
)
var (
greaderLatencyMetric = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "greader_server_latency",
Help: "Server-side latency of GReader API operations.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"method"},
)
)
func init() {
prometheus.MustRegister(greaderLatencyMetric)
}
// GReader is an implementation of the GReader API.
type GReader struct {
d *storage.Database
}
// GReaderHandler returns a new GReader handler.
func GReaderHandler(d *storage.Database) http.HandlerFunc {
return GReader{d}.Handler()
}
// Handler returns a handler function that implements the GReader API.
func (a GReader) Handler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
a.route(w, r)
}
}
func (a GReader) recordLatency(t time.Time, label string) |
func (a GReader) route(w http.ResponseWriter, r *http.Request) {
// Record the total server latency of each call.
defer a.recordLatency(time.Now(), "server")
w.Header().Set("Content-Type", "application/json")
switch r.URL.Path {
case "/greader/accounts/ClientLogin":
a.handleLogin(w, r)
case "/greader/reader/api/0/token":
a.withAuth(w, r, a.handlePostToken)
case "/greader/reader/api/0/user-info":
a.withAuth(w, r, a.handleUserInfo)
case "/greader/reader/api/0/subscription/list":
a.withAuth(w, r, a.handleSubscriptionList)
case "/greader/reader/api/0/stream/items/ids":
a.withAuth(w, r, a.handleStreamItemIds)
case "/greader/reader/api/0/stream/items/contents":
a.withAuth(w, r, a.handleStreamItemsContents)
case "/greader/reader/api/0/edit-tag":
a.withAuth(w, r, a.handleEditTag)
default:
log.Warningf("Got unexpected route: %s", r.URL.String())
dump, err := httputil.DumpRequest(r, true)
if err != nil {
log.Warningf("Failed to dump request: %s", err)
}
log.Warningf("%q", dump)
a.returnError(w, http.StatusBadRequest)
}
}
func (a GReader) handleLogin(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
formUser := r.Form.Get("Email")
formPass := r.Form.Get("Passwd")
user, err := a.d.GetUserByUsername(formUser)
if err != nil {
log.Warningf("Failed to find user: %s", formUser)
a.returnError(w, http.StatusUnauthorized)
return
}
ok := bcrypt.CompareHashAndPassword([]byte(user.HashPass), []byte(formPass))
if ok == nil {
token, err := createAuthToken(user.HashPass, formUser)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
a.returnSuccess(w, greaderHandlelogin{Auth: token})
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func (a GReader) handleUserInfo(w http.ResponseWriter, _ *http.Request, user models.User) {
a.returnSuccess(w, greaderUserInfo{
UserId: string(user.UserId),
Username: user.Username,
})
}
func (a GReader) handleSubscriptionList(w http.ResponseWriter, _ *http.Request, user models.User) {
folders, err := a.d.GetAllFoldersForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
feeds, err := a.d.GetAllFeedsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
faviconMap, err := a.d.GetAllFaviconsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
folderMap := map[int64]string{}
for _, folder := range folders {
folderMap[folder.ID] = folder.Name
}
subList := greaderSubscriptionList{}
for _, feed := range feeds {
subList.Subscriptions = append(subList.Subscriptions, greaderSubscription{
Title: feed.Title,
// No client seems to use this field, so let it as zero
FirstItemMsec: "0",
HtmlUrl: feed.Link,
IconUrl: fmt.Sprintf("data:%s", faviconMap[feed.ID]),
SortId: feed.Title,
Id: greaderFeedId(feed.ID),
Categories: []greaderCategory{{
Id: greaderFolderId(feed.FolderID),
Label: folderMap[feed.FolderID],
}},
})
}
a.returnSuccess(w, subList)
}
func (a GReader) handleStreamItemIds(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
limit, err := strconv.Atoi(r.Form.Get("n"))
if err != nil {
log.Warningf(
"Saw unexpected 'n' parameter, defaulting to 10,000: %s", r.PostForm.Get("n"))
limit = 10000
}
switch s := r.Form.Get("s"); s {
case starredStreamId:
// TODO: Support starred items
a.returnSuccess(w, greaderStreamItemIds{})
return
case readStreamId:
// Never return read items to the client, it's just simpler
a.returnSuccess(w, greaderStreamItemIds{})
return
case readingListStreamId:
// Handled below
break
default:
log.Warningf("Saw unexpected 's' parameter: %s", s)
a.returnError(w, http.StatusNotImplemented)
return
}
xt := r.Form.Get("xt")
if xt != readStreamId {
// Only support excluding read items
log.Warningf("Saw unexpected 'xt' parameter: %s", xt)
a.returnError(w, http.StatusNotImplemented)
return
}
// TODO: Support continuation tokens
articles, err := a.d.GetUnreadArticleMetaForUser(user, limit, -1)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemIds := greaderStreamItemIds{}
for _, article := range articles {
streamItemIds.ItemRefs = append(streamItemIds.ItemRefs, greaderItemRef{
Id: strconv.FormatInt(article.ID, 10),
DirectStreamIds: []string{
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
})
}
a.returnSuccess(w, streamItemIds)
}
func (a GReader) handlePostToken(w http.ResponseWriter, _ *http.Request, _ models.User) {
_, _ = fmt.Fprint(w, createPostToken())
a.returnSuccess(w, nil)
}
func (a GReader) handleStreamItemsContents(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
articles, err := a.d.GetArticlesForUser(user, articleIds)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemContents := greaderStreamItemsContents{
Id: readingListStreamId,
Updated: time.Now().Unix(),
}
for _, article := range articles {
streamItemContents.Items = append(streamItemContents.Items, greaderItemContent{
CrawlTimeMsec: strconv.FormatInt(article.Date.UnixMilli(), 10),
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
Id: greaderArticleId(article.ID),
Categories: []string{
readingListStreamId,
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
Title: article.Title,
Published: article.Date.Unix(),
Canonical: []greaderCanonical{
{Href: article.Link},
},
Alternate: []greaderCanonical{
{Href: article.Link},
},
Summary: greaderContent{
Content: article.GetContents(*serveParsedArticles),
},
Origin: greaderOrigin{
StreamId: greaderFeedId(article.FeedID),
},
})
}
a.returnSuccess(w, streamItemContents)
}
func (a GReader) handleEditTag(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
var status string
// Only support updating one tag
switch r.Form.Get("a") {
case readStreamId:
status = "read"
case unreadStreamId:
status = "unread"
case starredStreamId, broadcastStreamId:
// TODO: Support starring items
a.returnError(w, http.StatusNotImplemented)
return
}
for _, articleId := range articleIds {
err = a.d.MarkArticleForUser(user, articleId, status)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
}
_, _ = w.Write([]byte("OK"))
a.returnSuccess(w, nil)
}
func (a GReader) withAuth(w http.ResponseWriter, r *http.Request, handler func(http.ResponseWriter, *http.Request, models.User)) {
// Header should be in format:
// Authorization: GoogleLogin auth=<token>
authHeader := r.Header.Get("Authorization")
if authHeader == "" {
a.returnError(w, http.StatusUnauthorized)
return
}
authFields := strings.Fields(authHeader)
if len(authFields) != 2 || !strings.EqualFold(authFields[0], "GoogleLogin") {
a.returnError(w, http.StatusBadRequest)
return
}
authStr, tokenStr, found := strings.Cut(authFields[1], "=")
if !found {
a.returnError(w, http.StatusBadRequest)
return
}
if !strings.EqualFold(authStr, "auth") {
a.returnError(w, http.StatusBadRequest)
return
}
username, token, err := extractAuthToken(tokenStr)
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
user, err := a.d.GetUserByUsername(username)
if err != nil {
a.returnError(w, http.StatusUnauthorized)
return
}
if validateAuthToken(token, username, user.HashPass) {
handler(w, r, user)
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func greaderArticleId(articleId int64) string {
return fmt.Sprintf("tag:google.com,2005:reader/item/%x", articleId)
}
func greaderFeedId(feedId int64) string {
return fmt.Sprintf("feed/%d", feedId)
}
func greaderFolderId(folderId int64) string {
return fmt.Sprintf("user/-/label/%d", folderId)
}
func (a GReader) returnError(w http.ResponseWriter, status int) {
w.WriteHeader(status)
}
func (a GReader) returnSuccess(w http.ResponseWriter, resp any) {
w.WriteHeader(http.StatusOK)
if resp != nil {
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
if err := enc.Encode(resp); err != nil {
a.returnError(w, http.StatusInternalServerError)
}
}
}
| {
utils.Elapsed(t, func(d time.Duration) {
// Record latency measurements in microseconds.
greaderLatencyMetric.WithLabelValues(label).Observe(float64(d) / float64(time.Microsecond))
})
} | identifier_body |
greader.go | package api
import (
"encoding/json"
"fmt"
log "github.com/golang/glog"
"github.com/jrupac/goliath/models"
"github.com/jrupac/goliath/storage"
"github.com/jrupac/goliath/utils"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/bcrypt"
"net/http"
"net/http/httputil"
"strconv"
"strings"
"time"
)
const (
readingListStreamId string = "user/-/state/com.google/reading-list"
readStreamId string = "user/-/state/com.google/read"
unreadStreamId string = "user/-/state/com.google/kept-unread"
starredStreamId string = "user/-/state/com.google/starred"
broadcastStreamId string = "user/-/state/com.google/broadcast"
)
var (
greaderLatencyMetric = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Name: "greader_server_latency",
Help: "Server-side latency of GReader API operations.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"method"},
)
)
func init() {
prometheus.MustRegister(greaderLatencyMetric)
}
// GReader is an implementation of the GReader API.
type GReader struct {
d *storage.Database
}
// GReaderHandler returns a new GReader handler.
func GReaderHandler(d *storage.Database) http.HandlerFunc {
return GReader{d}.Handler()
}
// Handler returns a handler function that implements the GReader API.
func (a GReader) Handler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
a.route(w, r)
}
}
func (a GReader) recordLatency(t time.Time, label string) {
utils.Elapsed(t, func(d time.Duration) {
// Record latency measurements in microseconds.
greaderLatencyMetric.WithLabelValues(label).Observe(float64(d) / float64(time.Microsecond))
})
}
func (a GReader) route(w http.ResponseWriter, r *http.Request) {
// Record the total server latency of each call.
defer a.recordLatency(time.Now(), "server")
w.Header().Set("Content-Type", "application/json")
switch r.URL.Path {
case "/greader/accounts/ClientLogin":
a.handleLogin(w, r)
case "/greader/reader/api/0/token":
a.withAuth(w, r, a.handlePostToken)
case "/greader/reader/api/0/user-info":
a.withAuth(w, r, a.handleUserInfo)
case "/greader/reader/api/0/subscription/list":
a.withAuth(w, r, a.handleSubscriptionList)
case "/greader/reader/api/0/stream/items/ids":
a.withAuth(w, r, a.handleStreamItemIds)
case "/greader/reader/api/0/stream/items/contents":
a.withAuth(w, r, a.handleStreamItemsContents)
case "/greader/reader/api/0/edit-tag":
a.withAuth(w, r, a.handleEditTag)
default:
log.Warningf("Got unexpected route: %s", r.URL.String())
dump, err := httputil.DumpRequest(r, true)
if err != nil {
log.Warningf("Failed to dump request: %s", err)
}
log.Warningf("%q", dump)
a.returnError(w, http.StatusBadRequest)
}
}
func (a GReader) handleLogin(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
formUser := r.Form.Get("Email")
formPass := r.Form.Get("Passwd")
user, err := a.d.GetUserByUsername(formUser)
if err != nil {
log.Warningf("Failed to find user: %s", formUser)
a.returnError(w, http.StatusUnauthorized)
return
}
ok := bcrypt.CompareHashAndPassword([]byte(user.HashPass), []byte(formPass))
if ok == nil {
token, err := createAuthToken(user.HashPass, formUser)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
a.returnSuccess(w, greaderHandlelogin{Auth: token})
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func (a GReader) handleUserInfo(w http.ResponseWriter, _ *http.Request, user models.User) {
a.returnSuccess(w, greaderUserInfo{
UserId: string(user.UserId),
Username: user.Username,
})
}
func (a GReader) handleSubscriptionList(w http.ResponseWriter, _ *http.Request, user models.User) {
folders, err := a.d.GetAllFoldersForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
feeds, err := a.d.GetAllFeedsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
faviconMap, err := a.d.GetAllFaviconsForUser(user)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
folderMap := map[int64]string{}
for _, folder := range folders {
folderMap[folder.ID] = folder.Name
}
subList := greaderSubscriptionList{}
for _, feed := range feeds {
subList.Subscriptions = append(subList.Subscriptions, greaderSubscription{
Title: feed.Title,
// No client seems to use this field, so let it as zero
FirstItemMsec: "0",
HtmlUrl: feed.Link,
IconUrl: fmt.Sprintf("data:%s", faviconMap[feed.ID]),
SortId: feed.Title,
Id: greaderFeedId(feed.ID),
Categories: []greaderCategory{{
Id: greaderFolderId(feed.FolderID),
Label: folderMap[feed.FolderID],
}},
})
}
a.returnSuccess(w, subList)
}
func (a GReader) handleStreamItemIds(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
limit, err := strconv.Atoi(r.Form.Get("n"))
if err != nil {
log.Warningf(
"Saw unexpected 'n' parameter, defaulting to 10,000: %s", r.PostForm.Get("n"))
limit = 10000
}
switch s := r.Form.Get("s"); s {
case starredStreamId:
// TODO: Support starred items
a.returnSuccess(w, greaderStreamItemIds{})
return
case readStreamId:
// Never return read items to the client, it's just simpler
a.returnSuccess(w, greaderStreamItemIds{})
return
case readingListStreamId:
// Handled below
break
default:
log.Warningf("Saw unexpected 's' parameter: %s", s)
a.returnError(w, http.StatusNotImplemented)
return
}
xt := r.Form.Get("xt")
if xt != readStreamId {
// Only support excluding read items
log.Warningf("Saw unexpected 'xt' parameter: %s", xt)
a.returnError(w, http.StatusNotImplemented)
return
}
// TODO: Support continuation tokens
articles, err := a.d.GetUnreadArticleMetaForUser(user, limit, -1)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemIds := greaderStreamItemIds{}
for _, article := range articles {
streamItemIds.ItemRefs = append(streamItemIds.ItemRefs, greaderItemRef{
Id: strconv.FormatInt(article.ID, 10),
DirectStreamIds: []string{
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
})
}
a.returnSuccess(w, streamItemIds)
}
func (a GReader) handlePostToken(w http.ResponseWriter, _ *http.Request, _ models.User) {
_, _ = fmt.Fprint(w, createPostToken())
a.returnSuccess(w, nil)
}
func (a GReader) handleStreamItemsContents(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
articles, err := a.d.GetArticlesForUser(user, articleIds)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
streamItemContents := greaderStreamItemsContents{
Id: readingListStreamId,
Updated: time.Now().Unix(),
}
for _, article := range articles {
streamItemContents.Items = append(streamItemContents.Items, greaderItemContent{
CrawlTimeMsec: strconv.FormatInt(article.Date.UnixMilli(), 10),
TimestampUsec: strconv.FormatInt(article.Date.UnixMicro(), 10),
Id: greaderArticleId(article.ID),
Categories: []string{
readingListStreamId,
greaderFeedId(article.FeedID),
greaderFolderId(article.FolderID),
},
Title: article.Title,
Published: article.Date.Unix(),
Canonical: []greaderCanonical{
{Href: article.Link},
},
Alternate: []greaderCanonical{
{Href: article.Link},
},
Summary: greaderContent{
Content: article.GetContents(*serveParsedArticles),
},
Origin: greaderOrigin{
StreamId: greaderFeedId(article.FeedID),
},
})
}
a.returnSuccess(w, streamItemContents)
}
func (a GReader) handleEditTag(w http.ResponseWriter, r *http.Request, user models.User) {
err := r.ParseForm()
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
if !validatePostToken(r.Form.Get("T")) {
a.returnError(w, http.StatusUnauthorized)
return
}
articleIdsValue := r.Form["i"]
var articleIds []int64
for _, articleIdStr := range articleIdsValue {
id, err := strconv.ParseInt(articleIdStr, 16, 64)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
articleIds = append(articleIds, id)
}
var status string
// Only support updating one tag
switch r.Form.Get("a") {
case readStreamId:
status = "read"
case unreadStreamId:
status = "unread"
case starredStreamId, broadcastStreamId:
// TODO: Support starring items
a.returnError(w, http.StatusNotImplemented)
return
}
for _, articleId := range articleIds {
err = a.d.MarkArticleForUser(user, articleId, status)
if err != nil {
a.returnError(w, http.StatusInternalServerError)
return
}
}
_, _ = w.Write([]byte("OK"))
a.returnSuccess(w, nil)
}
func (a GReader) withAuth(w http.ResponseWriter, r *http.Request, handler func(http.ResponseWriter, *http.Request, models.User)) {
// Header should be in format:
// Authorization: GoogleLogin auth=<token>
authHeader := r.Header.Get("Authorization")
if authHeader == "" {
a.returnError(w, http.StatusUnauthorized)
return
}
authFields := strings.Fields(authHeader)
if len(authFields) != 2 || !strings.EqualFold(authFields[0], "GoogleLogin") {
a.returnError(w, http.StatusBadRequest)
return
}
authStr, tokenStr, found := strings.Cut(authFields[1], "=")
if !found {
a.returnError(w, http.StatusBadRequest)
return
}
if !strings.EqualFold(authStr, "auth") {
a.returnError(w, http.StatusBadRequest)
return
}
username, token, err := extractAuthToken(tokenStr)
if err != nil {
a.returnError(w, http.StatusBadRequest)
return
}
user, err := a.d.GetUserByUsername(username)
if err != nil {
a.returnError(w, http.StatusUnauthorized)
return
}
if validateAuthToken(token, username, user.HashPass) {
handler(w, r, user)
} else {
a.returnError(w, http.StatusUnauthorized)
}
}
func greaderArticleId(articleId int64) string {
return fmt.Sprintf("tag:google.com,2005:reader/item/%x", articleId)
}
func | (feedId int64) string {
return fmt.Sprintf("feed/%d", feedId)
}
func greaderFolderId(folderId int64) string {
return fmt.Sprintf("user/-/label/%d", folderId)
}
func (a GReader) returnError(w http.ResponseWriter, status int) {
w.WriteHeader(status)
}
func (a GReader) returnSuccess(w http.ResponseWriter, resp any) {
w.WriteHeader(http.StatusOK)
if resp != nil {
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
if err := enc.Encode(resp); err != nil {
a.returnError(w, http.StatusInternalServerError)
}
}
}
| greaderFeedId | identifier_name |
locale.js | var dictBG = {
"PORTFOLIO": "Портфолио",
"RANGE":"Обхват",
"AUDIENCE":"Аудитория",
"BRAND": "Бранд",
"CONTACT":"Контакт",
"LEADING MEDIA COMPANY":"Xenium е водеща медийна компания в България",
"MEDIA KIT":"МЕДИА КИТ",
"AUDIENCE REACH":"Медиен обхват на аудиторията",
"AUDIENCE PROFILE":"Профил на аудиторията",
"FEMALE":"жени",
"MALE":"мъже",
"ON A DAILY BASIS":"всекидневно използват Интернет",
"MIDDLE AND HIGH INCOME":"среден и <br />висок<br /> доход",
"MLNS":"млн",
"SECONDARY AND HIGHER EDUCATION":"средно и висше образование",
"BIG CITIES AND REGIONAL CAPITALS":"големи градове и обл. центрове",
"AGED":"възраст",
"THE MOST COMPEHENCIVE":"най-всеобхватната социална медия за новини и развлечение.",
"OVER 4000 PUBLICATIONS DAILY":"Над <strong> 4000 публикации дневно</strong> се добавят от потребителите.",
"OVER 2M PUBLICATIONS IN TOTAL":"Общо над <strong>2 млн. публикации</strong> структурирани в над <strong>250 000 теми</strong>.",
"UNIQUE":"УНИКАЛНИ",
"MONTHLY USERS":"потребители месечно:",
"IMPRESSIONS":"Импресии",
"AUDIENCE OF":"Обхват на ",
"OF REACH":"аудиторията",
"AGE":"Възраст:",
"EDUCATION":"Образование:",
"SECONDARY":"Средно:",
"COLLEGE":"Полувисше:",
"HIGHER":"Висше:",
"CITY LIFE MEDIUM":"Медия за градски живот, която има за цел да носи <strong>положителни емоции в ежедневието на хората</strong> чрез стойностното си съдържание.",
"ENTERTAINS AND INFORMS THE YOUNG MODERN PERSON":"Развлича и информира младия и модерен човек за всичко ново, което се случва в областите <strong> мода, музика, кино, популярни личности, събития, нови технологии, изкуство и пътувания</strong>.",
"KULINARIA IS MEDIA":"<strong>Kulinaria.bg</strong> е медия за вкусно и здравословно хранене.",
"PRESENTS IN AN ORIGINAL AND":"По оригинален и достъпен начин, представя <strong> най-доброто от традиционната и модерна кухня, майстор-готвачи, места с добра кухня и атмосфера</strong>.",
"TERMO IS A SITE FOR TRAVEL":"<strong>Termo.bg</strong> е сайт за пътувания и забавление за всяко време. ",
"RECOMMENDS UPCOMING EVENTS":"Препоръчва предстоящи събития, забележителности, заведения, хотели, шопинг възможности в страната и чужбина.",
"PRESENTS DETAILED 15 DAY":"Представя детайлна 15-дневна прогноза за времето в над 200 000 локации.",
"HARDWARE IS THE MOST POPULAR HARDWARE":"<strong>Hardware.bg</strong> е най-популярния сайт за хардуер, софтуер и техологии в страната.",
"EXCEPTIONALLY WELL DEVELOPED FORUM":"Разполага с изключително развит форум.",
"MOBILITY UNITES":"<strong>Mobility.bg</strong> - обединява трите най-популярни сайта за фенове на мобилни телефони: <strong>iPhone-Bulgaria.com, WinPhone.bg, AllAndroid.com</strong>.",
"MOBILITY ITS AUDIENCE":"Посещава се от аудитория, която <strong>се интересува от най-новото в света на мобилните технологии и има сериозни познания в областта</strong>.",
"IN 2013 MOBILE":"През 2013 година <strong>Xenium Media</strong> започва да предлага ексклузивно за България <strong>mobile.de</strong> и трафика на българската аудитория в него.",
"THE AUDIENCE CONSISTS":"Аудиторията е предимно <strong>млади хора с високи доходи</strong>, търсещи автомобили за покупка с цена над 5000 евро.",
"CALCULATOR FOR EVERYTHING":"<strong>Calculator.bg</strong> - за всичко и за всички.",
"FACILITATES ANY CALCULATION":"Улеснява всяко едно изчисление.",
"YOU CAN CALCULATE":"Можеш да изчислиш <strong> заплата, валута, лихви, нотариални такси, данъци и др.</strong>",
"MEGANEWS IS AN INFORMATION SITE":"<strong>Meganews.bg</strong> е информационен сайт, който показва най-горещите факти от деня такива, каквито са.",
"OUR MISSION IS TO IMPARTIALLY INFORM":"Мисията ни е да информираме безпристрастно аудиторията, давайки свобода на читателите сами да изразяват гражданската си позиция.",
"NIEJENITE IS A SITE":"<strong>Nie-jenite.bg</strong> е сайт за модерната и успяла жена, интересуваща се <strong>мода, красота, задраве, любов, дизайн и полезни съвети.</strong>",
"THE MEDIUM ALLOWS ITS USERS":"Медията позволява на потребителите да оценяват и коментират публикации, да споделят и обсъждат своите проблеми и истории.",
"MEGALIFE IS A DEVELOPED":"<strong>Megalife.bg</strong> е развита и успешна лайфстайл медия, която има за цел да информира и забавлява потребителите си с богато разнообразие от съдържателни публикации.",
"THE MEDIUMS USERS":"Потребителите на медията са главно <strong>Икономически активни, заинтересовани от лайфстайла в страната</strong>. ",
"FOODS IS A CULINARY SITE":"<strong>Foods.bg</strong> е кулинарен сайт с множество избрани рецепти, любопитни факти и полезни приложения.",
"IT PRESENTS IN THE MOST":"Представя максимално достъпно най-добрите рецепти като акцентът е върху здравословното хранене и начин на живот. Аудиорията е предимно от жени, от добрата кухня и здраве.",
"KIDAMOM IS A FUN":"<strong>Kidamom.com</strong> забавна интерактивна среда за вашето дете с много анимационни и образователни филми.",
"XENIUM MEDIA COMPANY TOGETHER WITH":"Xenium Media Company с удоволствие ви представя онлайн проекта Kidamom.com, съвместно с Кидамом ООД и Eleven. Kidamom.com предоставя интерактивна среда за деца до 12 години. В специализирания сайт подрастващите обогатяват своите уменията и знанията по забавен и достъпен начин.",
"KIDAMOM IS A PLACE":"Kidamom.com е място, в което децата се въвеждат в съвременния дигитален свят, развивайки своите личностни качества и индивидуални навици. Платформата предлага подбрано качествено съдържание и възможност за родителски контрол.",
"SHVARGALO IS AN ART COMPANY":"<strong>Shvargalo.com</strong> е арт компания, която се занимава с <strong> продуциране и произвеждане на филми, книги, спектакли, музикално-танцови и сценични произведения.</strong>",
"KAMEN DONEV AND ELENA BOZOVA":"В проекта взимат участие <strong> Камен Донев (режисьор и актьор) Елена Бозова (актьор)</strong> в компанията на именити български актьори.",
"THE AUDIENCE CONSISTS OF":"Аудиторията се състои <strong> мъже и жени на възраст между 20 и 40 години,</strong> жители на големи и средни градове, икономически активни потребители, интересуващи се от <strong> култура, изкуство и развлечения</strong>.",
"NONSTANDARD FORMATS":"НЕСТАРДАНТНИ ФОРМАТИ",
"CLIENT":"Клиент: ",
"NESCAFE: THE GAME ATTRACTED":"Играта се проведе при голям интерес и за периода на конкурса получихме над 70 авторски рецепти. Страницата на конкурса беше широко промотирана в сайтовете на Xenium Media и за периода на кампанията беше посетена от над 50 000 потребители и събра над 20 000 харесвания на рецептите във Facebook.",
"NESCAFE: THE CONTEST WEBPAGE":" ",
"TEFAL: THE GAME":"Играта се проведе при голям интерес от потребителите. Всеки един от тях представи своето любимо модно съчетание, като покани приятелите си да гласуват за неговата снимка. Играта се проведе в рамките на петнадесет дни, като беше промотирана чрез брандиране на Bulevard.bg.",
"TEFAL: THE GAME TOOK PLACE":" ",
"ESCADA: USERS CHOSE THE AROMA":"Потребителите избираха аромата, който най-много им допада и избора им се отразяваше и на стените им във Facebook.",
"ESCADA: THIS CREATED A VIRAL EFFECT":"Това създаде вирусен ефект и интерес сред приятелите им. Само за 20 дни кампанията събра близо 1000 участника и над 150 коментара. Постигнахме добра интеракция с марката и накарахме потребителите да се асоцират с различните аромати.",
"BRUNO: A GAME URGING MALE AUDIENCE":"Игра подтикваща мъжката аудитория да разглежда и избира сексапилни девойки и да пише под формата на коментар реплики за свалки.",
"BRUNO: MORE THAN 230 USERS":"За периода на играта са регистрирани над 230 потребители и над 230 постинга, които се състезават за наградата. Събрани бяха над 9000 гласа. Кампанията беше много добре отразена с PR в цялата мрежа от сайтове на Xenium Media.",
"PLESIO: THE VIDEO":"Видео клипът „От любов към технологиите“ е създаден като концепция и реализиран от екипа на Ксениум Медия. ",
"PLESIO: POPULARIZED":"Популяризирането му бе извършено чрез различните социални канали – Facebook, You Tube, Vimeo и др. и сайтовете на Ксениум Медия.",
"AMSTEL: BEER CEREMONY":"„Бирена церемония“ e нестандартен рекламен видео формат (дълъг е 02:51 минути) и носи всички характеристики на късометражен игрален филм.",
"AMSTEL: CONCEPT":"Концепцията и реализацията на видео клипа са на Ксениум Медия.",
"MAIN GRAPH":"<img class='graph' src='img/graph1.jpg' />",
"RANGE IMG":'<img src="img/range.png" height="522"/>',
"SVEJO DATA IMG":'<img src="img/media-data.png" />',
"BULEVARD DATA IMG":'<img src="img/bulevard-data.png" />',
"KULINARIA DATA IMG":'<img src="img/kulinaria-data.png" />',
"TERMO DATA IMG":'<img src="img/termo-data.png" />',
"HARDWARE DATA IMG":'<img src="img/hardware-data.png" />',
"MOBILITY DATA IMG":'<img src="img/mobility-data.png" />',
"MOBILE DATA IMG":'<img src="img/mobile-data.png" />',
"MEGANEWS DATA IMG":'<img src="img/meganews-data.png" />',
"JENITE DATA IMG":'<img src="img/jenite-data.png" />',
"MEGALIFE DATA IMG":'<img src="img/megalife-data.png" />',
"FOODS DATA IMG":'<img src="img/foods-data.png" />',
"THANK YOU":"Благодаря Ви!",
"CS: THE VIDEO":"Гурме храна, гурме вино, гурме ресторант... Многоуважаваният проф. Донев дегустира различни кулинарни специалитети, вина и съчетанията между тях, отговаряйки на въпроса 'Има ли гурме култура в България?'"
}
var dictEN = {
"PORTFOLIO": "Portfolio",
"RANGE":"Reach",
"AUDIENCE":"Audience",
"BRAND": "Brand",
"CONTACT":"Contact",
"LEADING MEDIA COMPANY":"Xenium is a leading Bulgarian media company",
"MEDIA KIT":"MEDIA KIT",
"AUDIENCE REACH":"AUDIENCE REACH",
"AUDIENCE PROFILE":"Audience Profile",
"FEMALE":"female",
"MALE":"male",
"ON A DAILY BASIS":"use internet <br />on a daily<br /> basis",
"MIDDLE AND HIGH INCOME":"middle and <br />high <br />income",
"MLNS":"M",
"SECONDARY AND HIGHER EDUCATION":"secondary and higher education", | "BIG CITIES AND REGIONAL CAPITALS":"big cities and regional capitals",
"AGED":"aged",
"THE MOST COMPEHENCIVE":"the most comprehencive social medium for news and entertainment.",
"OVER 4000 PUBLICATIONS DAILY":"Over <strong>4000 publications daily </strong>are added by the users.",
"OVER 2M PUBLICATIONS IN TOTAL":"Over <strong>2M publications in total </strong>structured in over <strong>250,000 topics</strong>.",
"UNIQUE":"UNIQUE",
"MONTHLY USERS":"monthly users:",
"IMPRESSIONS":"Impressions",
"AUDIENCE OF":"Audience",
"OF REACH":"reach",
"AGE":"Age:",
"EDUCATION":"Education:",
"SECONDARY":"Secondary: ",
"COLLEGE":"College:",
"HIGHER":"Higher:",
"CITY LIFE MEDIUM":"City life medium aimed at bringing <strong>positive emotions in people's everyday life </strong> through its valuable content.",
"ENTERTAINS AND INFORMS THE YOUNG MODERN PERSON":"Entertains and informs the young modern person of all that is new in <strong>fashion, music, cinema, celebrity world, events, new technologies, art and travel</strong>.",
"KULINARIA IS MEDIA":"<strong>Kulinaria.bg</strong> is a medium for delicious and healthy cuisine.",
"PRESENTS IN AN ORIGINAL AND":"Presents in an original and easy to understand way <strong>the best of traditional and contemporary cuisine, chefs, places with good kitchen and atmosphere</strong>.",
"TERMO IS A SITE FOR TRAVEL":"<strong>Termo.bg</strong> is a site for travel and entertainment for every weather.",
"RECOMMENDS UPCOMING EVENTS":"Recommends upcoming events, landmarks, restaurants, hotels, shopping in Bulgaria and abroad.",
"PRESENTS DETAILED 15 DAY":"Presents a detailed 15 day weather forecast for over 200,000 locations.",
"HARDWARE IS THE MOST POPULAR HARDWARE":"<strong>Hardware.bg</strong> is the most popular hardware, software and technology site in the country.",
"EXCEPTIONALLY WELL DEVELOPED FORUM":"Has an exceptionally well developed forum.",
"MOBILITY UNITES":"<strong>Mobility.bg</strong> unites the three most popular sites for mobile phone fans: <strong>iPhoneBulgaria.com, WinPhone.bg and All4android.com</strong>.",
"MOBILITY ITS AUDIENCE":"Its audience is <strong>interested in all the latest of the mobile technology world and has substantial knowlege in the field</strong>.",
"IN 2013 MOBILE":"In 2013 <strong>Xenium Media</strong> started offering exclusively for Bulgaria <strong>mobile.de</strong> and the Bulgarian audience traffic within it.",
"THE AUDIENCE CONSISTS":"The audience consists mainly of <strong>young people with high income,</strong> looking to buy cars priced over 5,000 euro.",
"CALCULATOR FOR EVERYTHING":"<strong>Calculator.bg</strong> - for everything and everyone.",
"FACILITATES ANY CALCULATION":"Facilitates any calculation.",
"YOU CAN CALCULATE":"You can calculate <strong>salary, currency, interest, notary fees, taxes, etc.</strong>",
"MEGANEWS IS AN INFORMATION SITE":"<strong>Meganews.bg</strong> is an information site showing the hottest facts of the day as they are.",
"OUR MISSION IS TO IMPARTIALLY INFORM":"Our mission is to impartially inform the audience, giving readers the freedom to express their personal standpoint.",
"NIEJENITE IS A SITE":"<strong>Nie-jenite.bg</strong> is a site for the modern and successful woman who is interested in <strong>fashion, beauty, love, health, design and useful tips.</strong>",
"THE MEDIUM ALLOWS ITS USERS":"The medium allows it's users to rate and comment publications, as well as share and discuss their own problems and stories.",
"MEGALIFE IS A DEVELOPED":"<strong>Megalife.bg</strong> is a developed and successful lifestyle medium aimed at informing and entertaining its users through rich variety of meaningful publications.",
"THE MEDIUMS USERS":"The medium's users are predominantly <strong>economically active and lifestyle oriented</strong>.",
"FOODS IS A CULINARY SITE":"<strong>Foods.bg</strong> is a culinary site with a multitude of chosen recipes, curious facts and useful applications.",
"IT PRESENTS IN THE MOST":"It presents in the most easy to understand way the best recipes, focusing on healthy nutrition and lifestyle. The audience consists chiefly of women interested in good cuisine and health.",
"KIDAMOM IS A FUN":"<strong>Kidamom.com</strong> - a fun, interactive environment for your child with lots of cartoons and educational movies.",
"XENIUM MEDIA COMPANY TOGETHER WITH":"Xenium Media Company, together with Kidamom OOD and Eleven, is pleased to present you the online project Kidamom.com. Kidamom.com offers an interactive environment for kids up to 12 years old. Children enrich their skills and knowledge in the specialized site in a fun and accessible way.",
"KIDAMOM IS A PLACE":"Kidamom.com is a place where children are introduced to the contemporary digital world while developing their personal qualities and individual habits. The platform offers selected, highquality content and the ability for parental control.",
"SHVARGALO IS AN ART COMPANY":"<strong>Shvargalo.com</strong> is an art company <strong>producing films, books, shows, music, dance and stage performances</strong>.",
"KAMEN DONEV AND ELENA BOZOVA":"<strong>Kamen Donev (Director/Actor)</strong> and <strong>Elena Bozova (Actor)</strong> take part in the project in the company of famous Bulgarian actors.",
"THE AUDIENCE CONSISTS OF":"The audience consists of <strong>men and women, aged 20 to 40</strong>, living in big and medium-sized cities, economically active users, interested in <strong>art and entertainment</strong>.",
"NONSTANDARD FORMATS":"NONSTANDARD FORMATS",
"CLIENT":"Client: ",
"NESCAFE: THE GAME ATTRACTED":"The game attracted huge interest and over 70 original recipes were received over the period of the contest. ",
"NESCAFE: THE CONTEST WEBPAGE":"The contest webpage was widely promoted on the Xenium Media websites. It was visited by more than 50,000 users and attracted over 20,000 Facebook likes of the recipes over the campaign period.",
"TEFAL: THE GAME":"The game was accompanied by big user interest. Each of the users presented their favourite fashion ensemble and invited their friends to vote for their picture. ",
"TEFAL: THE GAME TOOK PLACE":"The game took place over 15 days and was promoted through branding of Bulevard.bg.",
"ESCADA: USERS CHOSE THE AROMA":"Users chose the aroma that best suited them and their choice was posted on their Facebook walls.",
"ESCADA: THIS CREATED A VIRAL EFFECT":"This created a viral effect and interest among their friends. Within just 20 days the campaign attracted almost 1,000 participants and over 150 comments. We achieved good interaction with the brand and encouraged the users to associate themselves with the various aromas.",
"BRUNO: A GAME URGING MALE AUDIENCE":"A game urging male audience to browse and choose sexy girls and to write pickup lines in the form of comments.",
"BRUNO: MORE THAN 230 USERS":"More than 230 users and their postings, competing for the prize, were registered over the period of the game. Over 9,000 votes were cast. The campaign had a very good PR coverage in the entire Xenium Media website network.",
"PLESIO: THE VIDEO":"For The Love Of Technology video was created as a concept and shot by the Xenium Media team.",
"PLESIO: POPULARIZED":"It was popularized through various social channels - Facebook, YouTube, Vimeo, etc., as well as through Xenium Media's websites.",
"AMSTEL: BEER CEREMONY":"Beer Ceremony is a nonstandard advertising video format (2:51 minutes long) and bears all the characteristics of a short fiction film.",
"AMSTEL: CONCEPT":"Xenium Media developed the concept and shot the video.",
"MAIN GRAPH":'<img class="graph" src="img/graphEn.png" />',
"RANGE IMG":'<img src="img/rangeEn.png" height="522"/>',
"SVEJO DATA IMG":'<img src="img/svejo-dataEn.png" />',
"BULEVARD DATA IMG":'<img src="img/bulevard-dataEn.png" />',
"KULINARIA DATA IMG":'<img src="img/kulinaria-dataEn.png" />',
"TERMO DATA IMG":'<img src="img/termo-dataEn.png" />',
"HARDWARE DATA IMG":'<img src="img/hardware-dataEn.png" />',
"MOBILITY DATA IMG":'<img src="img/mobility-dataEn.png" />',
"MOBILE DATA IMG":'<img src="img/mobile-dataEn.png" />',
"MEGANEWS DATA IMG":'<img src="img/meganews-dataEn.png" />',
"JENITE DATA IMG":'<img src="img/jenite-dataEn.png" />',
"MEGALIFE DATA IMG":'<img src="img/megalife-dataEn.png" />',
"FOODS DATA IMG":'<img src="img/foods-dataEn.png" />',
"THANK YOU":"Thank You!",
"CS: THE VIDEO":""
} | random_line_split | |
__init__.py | import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
|
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
return conn.interpreter
| if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter) | conditional_block |
__init__.py | import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy' | self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
return conn.interpreter |
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo | random_line_split |
__init__.py | import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
| """
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
return conn.interpreter | identifier_body | |
__init__.py | import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def | ():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
return conn.interpreter
| basic_remote_logger | identifier_name |
user_controller.py |
from flask import render_template, flash, redirect, session, url_for, request, g, json, jsonify
from flask.ext.login import login_user, logout_user, current_user, login_required
from lumberjack import app, login_manager, db
from lumberjack.models import *
from lumberjack.models.user import User
from lumberjack.models.workout import Workout
from lumberjack.models.workout_history import WorkoutHistory
from lumberjack.forms import LoginForm, RegistrationForm
from sqlalchemy import or_
from datetime import datetime, time
import json
from config import MAX_SEARCH_RESULTS, POSTS_PER_PAGE
def request_wants_json():
best = request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
request.accept_mimetypes[best] > \
request.accept_mimetypes['text/html']
@app.route("/")
def home_index():
if( g.user.is_authenticated() ):
return redirect(url_for('display_spash'))
return render_template("home.html")
@app.route("/users/splash/")
@login_required
def display_spash():
return render_template("users/splash.html")
@app.route("/user/splash/contents", methods=['GET'])
def display_splash_contents():
ret = Workout.getNewest()
nWorkout = ret.to_hash()
ret = User.getNewest()
nuPic = ret.get_avatar(200)
nUser = ret.to_hash()
ret = User.find_by_id(8)
mfPic = ret.get_avatar(200)
mFollowed = ret.to_hash()
w = {"Result":"OK", "nUser":nUser, "nuPic": nuPic,"nWorkout":nWorkout, "mostFollowed":mFollowed, "mfPic":mfPic}
return jsonify(w)
@app.before_request
def before_request():
g.user = current_user
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm(csrf_enabled=False)
if g.user is not None and g.user.is_authenticated():
if(request_wants_json()):
return g.user.to_json();
else:
return redirect(url_for('home_index'))
if request.method == 'GET':
return render_template('users/login.html',
title = 'Sign In',
form = form)
elif request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("Login successful")
session['username'] = form.username.data
user = User.find_by_username(form.username.data)
if(request_wants_json()):
return user.to_json();
else:
return redirect(request.args.get("next") or url_for("home_index"))
else:
if(request_wants_json()):
return form.to_json();
else:
return render_template('users/login.html',
title = 'Sign In',
form = form)
@app.route('/logout')
def logout():
logout_user()
session.pop('username', None)
if(request_wants_json()):
return json.dumps({'logged_out': 'true'})
else:
return redirect(url_for('home_index'))
@app.route("/users/")
def users_index():
users = User.all()
output = ""
for user in users:
output += user.username + "\n"
return output
@app.route("/users/getUsers", methods=["POST"])
def get_users():
"""
"""
result = []
userids = []
usernames = []
if 'username' in request.form:
usernames = User.find_all_by_username(request.form['username'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
elif 'email' in request.form:
usernames = User.find_all_by_email(request.form['email'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
w = {"Result":"OK", "Records": result}
return jsonify(w)
@app.route("/users/new", methods=['GET', 'POST'])
def new_user():
if request.headers['Content-Type'] == 'application/json':
form = RegistrationForm.from_json(request.json, csrf_enabled=False)
else:
form = RegistrationForm()
if request.method == 'GET':
return render_template('users/new.html', form=form)
elif request.method == 'POST':
if form.validate():
user = User(form.username.data,
form.password.data)
User.save_to_db(user)
user = user.follow(user)
User.add_newsfeed(user,"Has joined Lumberjack.")
flash("Registration Successful!")
if request.headers['Content-Type'] == 'application/json':
return user.to_json()
else:
login_user(user);
session['username'] = form.username.data
return redirect(url_for('display_user_profile',
username=user.username))
else:
if request.headers['Content-Type'] == 'application/json':
return form.to_json()
else:
return render_template('users/new.html', form=form)
@login_manager.user_loader
@app.route("/user/find/", methods=['GET'])
def load_user(id):
return User.find_by_id(int(id))
@app.route("/user/<username>")
@app.route('/user/<username>/<int:page>', methods = ['GET'])
def display_user_profile(username, page=1):
user = User.find_by_username(username)
posts = None
if not request_wants_json():
if user == None:
return render_template("users/user.html", user = user, posts = posts) #user not found
if g.user.is_authenticated() and g.user.id == user.id:
if user.firstname == None or user.firstname == "" or user.lastname == None or user.lastname == "" or user.email == None or user.email == "":
flash("We can't display your profile until you have filled out the form")
return render_template("users/update_info.html")
posts = g.user.followed_posts().paginate(page, 10, False)
else:
if user == None:
return json.dumps({"Error": "User not found."})
return user.to_json()
return render_template("users/user.html", user = user, posts = posts)
@app.route("/user/update-profile/", methods=['GET', 'POST'])
@login_required
def update_info():
if request.method == 'POST':
if not request_wants_json():
user = User.find_by_id(g.user.id)
else:
user = User.find_by_id(request.form['uid'])
user.firstname = request.form['firstname']
user.lastname = request.form['lastname']
user.email = request.form['email']
email_user = User.find_by_email(user.email)
if not request_wants_json():
if email_user != None and email_user.id != g.user.id:
flash("Our record shows that you have an account under the given email address already.")
return render_template("users/update_info.html")
else:
if email_user != None and str(email_user.id) != request.form['uid']:
ret = {"result": "Email address already exist."}
return json.dumps(ret)
if len(request.form['gender']) > 0:
user.sex = request.form['gender'][0].upper()
user.location = request.form['location']
user.date_of_birth = request.form['date_of_birth']
user.avatar = request.form['gravatar']
user.about_me = request.form['about-me']
User.save_to_db(user)
if request_wants_json():
ret = {"result": "OK"}
return json.dumps(ret)
flash('Your changes have been made!')
return render_template("users/update_info.html")
@app.route('/follow_btn')
def follow_btn():
followee = request.args.get('followee', '', type=int)
state = request.args.get('state', '', type=str)
user = User.find_by_id(followee)
if state.startswith("Follow"):
follower = g.user.follow(user)
User.save_to_db(follower)
if g.user.is_following(user):
return jsonify(result="Unfollow") #g.user successfully followed user. So, we must change the state of the button
else:
return jsonify(resul="error") #we could return 'Follow' to just keep the state. But returning 'error' will say that something went wrong. Could be a database problem.
follower = g.user.unfollow(user)
User.save_to_db(follower)
if not g.user.is_following(user):
return jsonify(result="Follow") #g.user successfully unfollowed user
else:
return jsonify(result="error")
@app.route("/<username>/followers")
@app.route('/<username>/followers/<int:page>', methods = ['GET'])
def followers(username, page=1):
|
@app.route("/user_feeds/")
@app.route('/user_feeds/<int:page>', methods = ['GET'])
def user_feeds(page=1):
user = g.user
if user.is_anonymous():
return jsonify(result="")
posts = g.user.followed_posts().paginate(request.args.get('page', '', type=int), 10, False)
if not posts.items:
return jsonify(result="")
feeds = "{\"feed\":["
for post in posts.items:
feeds += "{\"username\":\"" + post.userName + "\"," + "\"body\":\"" + post.body + "\"," + "\"time\":\"" + str(post.timestamp) + "\"," + "\"avatar\":\"" + post.get_feed_avatar(post.userName, 40) + "\"},"
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return jsonify(result = feeds)
@app.route("/all_user_feeds/<uid>", methods=["GET"])
def all_user_feeds(uid):
user = User.find_by_id(uid)
posts = user.followed_posts().paginate(1, 100, False)
feeds = '{"feed":['
for post in posts.items:
feeds += '{"username":"' + post.userName + '",' + '"body":"' + post.body + '",' + '"time":"' + str(post.timestamp) + '",' + '"avatar":"' + post.get_feed_avatar(post.userName, 80) + '"},'
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return feeds
@app.route("/post-status/", methods = ['POST'])
def post_status():
body = request.form['body']
if 'uid' not in request.form:
g.user.add_newsfeed(body)
else:
user = User.find_by_id(request.form['uid'])
user.add_newsfeed(body)
return jsonify(result="success");
@app.route("/followers/get_followers", methods=["POST"])
def get_followers():
user = g.user
fw = user.user_is_following()
users = []
for foll in fw:
users.append(foll.to_hash())
res ={"Result":"OK", "Records": users}
return jsonify(res)
@app.route("/followers/get_following_count", methods=["POST"])
def get_following_count():
count = g.user.followed.count()
return jsonify(following = count)
@app.route("/followers/get_top_dog")
def get_top_dog():
user = g.user
output = ""
fw = user.top_user()
return jsonify(topUser = fw)
@app.route("/submit_workout_history", methods = ['POST'])
def submit_workout_history():
wName = request.form['wName']
date = request.form['date']
desc = request.form['desc']
user = User.find_by_username(request.form['user'])
workout = Workout.find_single_workout_by_name_(wName)
if(workout == None):
return jsonify(result="errorName", content=" The workout name you have entered may not exist. Please double check the spelling of the workout name. Thank you")
if(date == ""):
return jsonify(result="errorDate", content=" Please enter the date and time of the completed workout")
wh = WorkoutHistory(user.id, workout.id, datetime.strptime(date, "%m/%d/%Y %I:%M:%S %p"), desc, True)
WorkoutHistory.save_to_db(wh)
feed = "comleted "+wName+" on "+date+" - "+desc;
user.add_newsfeed(feed);
return jsonify(result="success");
@app.route("/user/<username>/workouthistory", methods=['GET'])
def display_user_workout_history (username):
user = User.find_by_username(username)
return render_template("users/workout_history.html", user=user)
############################
##WORKOUT SEARCH
############################
#workout search
@app.route('/workout_search')
def workout_search():
return render_template('workout_search.html')
@app.route("/search", methods=['GET'])
def search_for_key():
query = request.args.get('key', '')
if query.startswith('#'):
return redirect(url_for('display_user_profile', username=query[1:]))
user_search_result = (User.query.filter(or_((User.first_last_name.like("%" + query + "%")), (User.last_first_name.like("%" + query + "%"))))).all()
workout_search_result = (Workout.query.filter(Workout.name.like("%" + query + "%"))).all()
return render_template("/results.html", query = query, user_list = user_search_result, workout_list = workout_search_result)
#
#ErrorHandlers
#
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(error):
db.session.rollback()
return render_template('500.html'), 500
| user = g.user
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template("users/followers.html",user = user) | identifier_body |
user_controller.py |
from flask import render_template, flash, redirect, session, url_for, request, g, json, jsonify
from flask.ext.login import login_user, logout_user, current_user, login_required
from lumberjack import app, login_manager, db
from lumberjack.models import *
from lumberjack.models.user import User
from lumberjack.models.workout import Workout
from lumberjack.models.workout_history import WorkoutHistory
from lumberjack.forms import LoginForm, RegistrationForm
from sqlalchemy import or_
from datetime import datetime, time
import json
from config import MAX_SEARCH_RESULTS, POSTS_PER_PAGE
def request_wants_json():
best = request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
request.accept_mimetypes[best] > \
request.accept_mimetypes['text/html']
@app.route("/")
def home_index():
if( g.user.is_authenticated() ):
return redirect(url_for('display_spash'))
return render_template("home.html")
@app.route("/users/splash/")
@login_required
def display_spash():
return render_template("users/splash.html")
@app.route("/user/splash/contents", methods=['GET'])
def display_splash_contents():
ret = Workout.getNewest()
nWorkout = ret.to_hash()
ret = User.getNewest()
nuPic = ret.get_avatar(200)
nUser = ret.to_hash()
ret = User.find_by_id(8)
mfPic = ret.get_avatar(200)
mFollowed = ret.to_hash()
w = {"Result":"OK", "nUser":nUser, "nuPic": nuPic,"nWorkout":nWorkout, "mostFollowed":mFollowed, "mfPic":mfPic}
return jsonify(w)
@app.before_request
def before_request():
g.user = current_user
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm(csrf_enabled=False)
if g.user is not None and g.user.is_authenticated():
if(request_wants_json()):
return g.user.to_json();
else:
return redirect(url_for('home_index'))
if request.method == 'GET':
return render_template('users/login.html',
title = 'Sign In',
form = form)
elif request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("Login successful")
session['username'] = form.username.data
user = User.find_by_username(form.username.data)
if(request_wants_json()):
return user.to_json();
else:
return redirect(request.args.get("next") or url_for("home_index"))
else:
if(request_wants_json()):
return form.to_json();
else:
return render_template('users/login.html',
title = 'Sign In',
form = form)
@app.route('/logout')
def logout():
logout_user()
session.pop('username', None)
if(request_wants_json()):
return json.dumps({'logged_out': 'true'})
else:
return redirect(url_for('home_index'))
@app.route("/users/")
def users_index():
users = User.all()
output = ""
for user in users:
output += user.username + "\n"
return output
@app.route("/users/getUsers", methods=["POST"])
def get_users():
"""
"""
result = []
userids = []
usernames = []
if 'username' in request.form:
usernames = User.find_all_by_username(request.form['username'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
elif 'email' in request.form:
usernames = User.find_all_by_email(request.form['email'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
w = {"Result":"OK", "Records": result}
return jsonify(w)
@app.route("/users/new", methods=['GET', 'POST'])
def new_user():
if request.headers['Content-Type'] == 'application/json':
form = RegistrationForm.from_json(request.json, csrf_enabled=False)
else:
form = RegistrationForm()
if request.method == 'GET':
return render_template('users/new.html', form=form)
elif request.method == 'POST':
|
@login_manager.user_loader
@app.route("/user/find/", methods=['GET'])
def load_user(id):
return User.find_by_id(int(id))
@app.route("/user/<username>")
@app.route('/user/<username>/<int:page>', methods = ['GET'])
def display_user_profile(username, page=1):
user = User.find_by_username(username)
posts = None
if not request_wants_json():
if user == None:
return render_template("users/user.html", user = user, posts = posts) #user not found
if g.user.is_authenticated() and g.user.id == user.id:
if user.firstname == None or user.firstname == "" or user.lastname == None or user.lastname == "" or user.email == None or user.email == "":
flash("We can't display your profile until you have filled out the form")
return render_template("users/update_info.html")
posts = g.user.followed_posts().paginate(page, 10, False)
else:
if user == None:
return json.dumps({"Error": "User not found."})
return user.to_json()
return render_template("users/user.html", user = user, posts = posts)
@app.route("/user/update-profile/", methods=['GET', 'POST'])
@login_required
def update_info():
if request.method == 'POST':
if not request_wants_json():
user = User.find_by_id(g.user.id)
else:
user = User.find_by_id(request.form['uid'])
user.firstname = request.form['firstname']
user.lastname = request.form['lastname']
user.email = request.form['email']
email_user = User.find_by_email(user.email)
if not request_wants_json():
if email_user != None and email_user.id != g.user.id:
flash("Our record shows that you have an account under the given email address already.")
return render_template("users/update_info.html")
else:
if email_user != None and str(email_user.id) != request.form['uid']:
ret = {"result": "Email address already exist."}
return json.dumps(ret)
if len(request.form['gender']) > 0:
user.sex = request.form['gender'][0].upper()
user.location = request.form['location']
user.date_of_birth = request.form['date_of_birth']
user.avatar = request.form['gravatar']
user.about_me = request.form['about-me']
User.save_to_db(user)
if request_wants_json():
ret = {"result": "OK"}
return json.dumps(ret)
flash('Your changes have been made!')
return render_template("users/update_info.html")
@app.route('/follow_btn')
def follow_btn():
followee = request.args.get('followee', '', type=int)
state = request.args.get('state', '', type=str)
user = User.find_by_id(followee)
if state.startswith("Follow"):
follower = g.user.follow(user)
User.save_to_db(follower)
if g.user.is_following(user):
return jsonify(result="Unfollow") #g.user successfully followed user. So, we must change the state of the button
else:
return jsonify(resul="error") #we could return 'Follow' to just keep the state. But returning 'error' will say that something went wrong. Could be a database problem.
follower = g.user.unfollow(user)
User.save_to_db(follower)
if not g.user.is_following(user):
return jsonify(result="Follow") #g.user successfully unfollowed user
else:
return jsonify(result="error")
@app.route("/<username>/followers")
@app.route('/<username>/followers/<int:page>', methods = ['GET'])
def followers(username, page=1):
user = g.user
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template("users/followers.html",user = user)
@app.route("/user_feeds/")
@app.route('/user_feeds/<int:page>', methods = ['GET'])
def user_feeds(page=1):
user = g.user
if user.is_anonymous():
return jsonify(result="")
posts = g.user.followed_posts().paginate(request.args.get('page', '', type=int), 10, False)
if not posts.items:
return jsonify(result="")
feeds = "{\"feed\":["
for post in posts.items:
feeds += "{\"username\":\"" + post.userName + "\"," + "\"body\":\"" + post.body + "\"," + "\"time\":\"" + str(post.timestamp) + "\"," + "\"avatar\":\"" + post.get_feed_avatar(post.userName, 40) + "\"},"
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return jsonify(result = feeds)
@app.route("/all_user_feeds/<uid>", methods=["GET"])
def all_user_feeds(uid):
user = User.find_by_id(uid)
posts = user.followed_posts().paginate(1, 100, False)
feeds = '{"feed":['
for post in posts.items:
feeds += '{"username":"' + post.userName + '",' + '"body":"' + post.body + '",' + '"time":"' + str(post.timestamp) + '",' + '"avatar":"' + post.get_feed_avatar(post.userName, 80) + '"},'
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return feeds
@app.route("/post-status/", methods = ['POST'])
def post_status():
body = request.form['body']
if 'uid' not in request.form:
g.user.add_newsfeed(body)
else:
user = User.find_by_id(request.form['uid'])
user.add_newsfeed(body)
return jsonify(result="success");
@app.route("/followers/get_followers", methods=["POST"])
def get_followers():
user = g.user
fw = user.user_is_following()
users = []
for foll in fw:
users.append(foll.to_hash())
res ={"Result":"OK", "Records": users}
return jsonify(res)
@app.route("/followers/get_following_count", methods=["POST"])
def get_following_count():
count = g.user.followed.count()
return jsonify(following = count)
@app.route("/followers/get_top_dog")
def get_top_dog():
user = g.user
output = ""
fw = user.top_user()
return jsonify(topUser = fw)
@app.route("/submit_workout_history", methods = ['POST'])
def submit_workout_history():
wName = request.form['wName']
date = request.form['date']
desc = request.form['desc']
user = User.find_by_username(request.form['user'])
workout = Workout.find_single_workout_by_name_(wName)
if(workout == None):
return jsonify(result="errorName", content=" The workout name you have entered may not exist. Please double check the spelling of the workout name. Thank you")
if(date == ""):
return jsonify(result="errorDate", content=" Please enter the date and time of the completed workout")
wh = WorkoutHistory(user.id, workout.id, datetime.strptime(date, "%m/%d/%Y %I:%M:%S %p"), desc, True)
WorkoutHistory.save_to_db(wh)
feed = "comleted "+wName+" on "+date+" - "+desc;
user.add_newsfeed(feed);
return jsonify(result="success");
@app.route("/user/<username>/workouthistory", methods=['GET'])
def display_user_workout_history (username):
user = User.find_by_username(username)
return render_template("users/workout_history.html", user=user)
############################
##WORKOUT SEARCH
############################
#workout search
@app.route('/workout_search')
def workout_search():
return render_template('workout_search.html')
@app.route("/search", methods=['GET'])
def search_for_key():
query = request.args.get('key', '')
if query.startswith('#'):
return redirect(url_for('display_user_profile', username=query[1:]))
user_search_result = (User.query.filter(or_((User.first_last_name.like("%" + query + "%")), (User.last_first_name.like("%" + query + "%"))))).all()
workout_search_result = (Workout.query.filter(Workout.name.like("%" + query + "%"))).all()
return render_template("/results.html", query = query, user_list = user_search_result, workout_list = workout_search_result)
#
#ErrorHandlers
#
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(error):
db.session.rollback()
return render_template('500.html'), 500
| if form.validate():
user = User(form.username.data,
form.password.data)
User.save_to_db(user)
user = user.follow(user)
User.add_newsfeed(user,"Has joined Lumberjack.")
flash("Registration Successful!")
if request.headers['Content-Type'] == 'application/json':
return user.to_json()
else:
login_user(user);
session['username'] = form.username.data
return redirect(url_for('display_user_profile',
username=user.username))
else:
if request.headers['Content-Type'] == 'application/json':
return form.to_json()
else:
return render_template('users/new.html', form=form) | conditional_block |
user_controller.py |
from flask import render_template, flash, redirect, session, url_for, request, g, json, jsonify
from flask.ext.login import login_user, logout_user, current_user, login_required
from lumberjack import app, login_manager, db
from lumberjack.models import *
from lumberjack.models.user import User
from lumberjack.models.workout import Workout
from lumberjack.models.workout_history import WorkoutHistory
from lumberjack.forms import LoginForm, RegistrationForm
from sqlalchemy import or_
from datetime import datetime, time
import json
from config import MAX_SEARCH_RESULTS, POSTS_PER_PAGE
def request_wants_json():
best = request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
request.accept_mimetypes[best] > \
request.accept_mimetypes['text/html']
@app.route("/")
def home_index():
if( g.user.is_authenticated() ):
return redirect(url_for('display_spash'))
return render_template("home.html")
@app.route("/users/splash/")
@login_required
def display_spash():
return render_template("users/splash.html")
@app.route("/user/splash/contents", methods=['GET'])
def display_splash_contents():
ret = Workout.getNewest()
nWorkout = ret.to_hash()
ret = User.getNewest()
nuPic = ret.get_avatar(200)
nUser = ret.to_hash()
ret = User.find_by_id(8)
mfPic = ret.get_avatar(200)
mFollowed = ret.to_hash()
w = {"Result":"OK", "nUser":nUser, "nuPic": nuPic,"nWorkout":nWorkout, "mostFollowed":mFollowed, "mfPic":mfPic}
return jsonify(w)
@app.before_request
def before_request():
g.user = current_user
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm(csrf_enabled=False)
if g.user is not None and g.user.is_authenticated():
if(request_wants_json()):
return g.user.to_json();
else:
return redirect(url_for('home_index'))
if request.method == 'GET':
return render_template('users/login.html',
title = 'Sign In',
form = form)
elif request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("Login successful")
session['username'] = form.username.data
user = User.find_by_username(form.username.data)
if(request_wants_json()):
return user.to_json();
else:
return redirect(request.args.get("next") or url_for("home_index"))
else:
if(request_wants_json()):
return form.to_json();
else:
return render_template('users/login.html',
title = 'Sign In',
form = form)
@app.route('/logout')
def logout():
logout_user()
session.pop('username', None)
if(request_wants_json()):
return json.dumps({'logged_out': 'true'})
else:
return redirect(url_for('home_index'))
@app.route("/users/")
def users_index():
users = User.all()
output = ""
for user in users:
output += user.username + "\n"
return output
@app.route("/users/getUsers", methods=["POST"])
def get_users():
"""
"""
result = []
userids = []
usernames = []
if 'username' in request.form:
usernames = User.find_all_by_username(request.form['username'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
elif 'email' in request.form:
usernames = User.find_all_by_email(request.form['email'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
w = {"Result":"OK", "Records": result}
return jsonify(w)
@app.route("/users/new", methods=['GET', 'POST'])
def new_user():
if request.headers['Content-Type'] == 'application/json':
form = RegistrationForm.from_json(request.json, csrf_enabled=False)
else:
form = RegistrationForm()
if request.method == 'GET':
return render_template('users/new.html', form=form)
elif request.method == 'POST':
if form.validate():
user = User(form.username.data,
form.password.data)
User.save_to_db(user)
user = user.follow(user)
User.add_newsfeed(user,"Has joined Lumberjack.")
flash("Registration Successful!")
if request.headers['Content-Type'] == 'application/json':
return user.to_json()
else:
login_user(user);
session['username'] = form.username.data
return redirect(url_for('display_user_profile',
username=user.username))
else:
if request.headers['Content-Type'] == 'application/json':
return form.to_json()
else:
return render_template('users/new.html', form=form)
@login_manager.user_loader
@app.route("/user/find/", methods=['GET'])
def load_user(id):
return User.find_by_id(int(id))
@app.route("/user/<username>")
@app.route('/user/<username>/<int:page>', methods = ['GET'])
def display_user_profile(username, page=1):
user = User.find_by_username(username)
posts = None
if not request_wants_json():
if user == None:
return render_template("users/user.html", user = user, posts = posts) #user not found
if g.user.is_authenticated() and g.user.id == user.id:
if user.firstname == None or user.firstname == "" or user.lastname == None or user.lastname == "" or user.email == None or user.email == "":
flash("We can't display your profile until you have filled out the form")
return render_template("users/update_info.html")
posts = g.user.followed_posts().paginate(page, 10, False)
else:
if user == None:
return json.dumps({"Error": "User not found."})
return user.to_json()
return render_template("users/user.html", user = user, posts = posts)
@app.route("/user/update-profile/", methods=['GET', 'POST'])
@login_required
def update_info():
if request.method == 'POST':
if not request_wants_json():
user = User.find_by_id(g.user.id)
else:
user = User.find_by_id(request.form['uid'])
user.firstname = request.form['firstname']
user.lastname = request.form['lastname']
user.email = request.form['email']
email_user = User.find_by_email(user.email)
if not request_wants_json():
if email_user != None and email_user.id != g.user.id:
flash("Our record shows that you have an account under the given email address already.")
return render_template("users/update_info.html")
else:
if email_user != None and str(email_user.id) != request.form['uid']:
ret = {"result": "Email address already exist."}
return json.dumps(ret)
if len(request.form['gender']) > 0:
user.sex = request.form['gender'][0].upper()
user.location = request.form['location']
user.date_of_birth = request.form['date_of_birth']
user.avatar = request.form['gravatar']
user.about_me = request.form['about-me']
User.save_to_db(user)
if request_wants_json():
ret = {"result": "OK"}
return json.dumps(ret)
flash('Your changes have been made!')
return render_template("users/update_info.html")
@app.route('/follow_btn')
def follow_btn():
followee = request.args.get('followee', '', type=int)
state = request.args.get('state', '', type=str)
user = User.find_by_id(followee)
if state.startswith("Follow"):
follower = g.user.follow(user)
User.save_to_db(follower)
if g.user.is_following(user):
return jsonify(result="Unfollow") #g.user successfully followed user. So, we must change the state of the button
else:
return jsonify(resul="error") #we could return 'Follow' to just keep the state. But returning 'error' will say that something went wrong. Could be a database problem.
follower = g.user.unfollow(user)
User.save_to_db(follower)
if not g.user.is_following(user):
return jsonify(result="Follow") #g.user successfully unfollowed user
else:
return jsonify(result="error")
@app.route("/<username>/followers")
@app.route('/<username>/followers/<int:page>', methods = ['GET'])
def followers(username, page=1):
user = g.user
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template("users/followers.html",user = user)
@app.route("/user_feeds/")
@app.route('/user_feeds/<int:page>', methods = ['GET'])
def user_feeds(page=1):
user = g.user
if user.is_anonymous():
return jsonify(result="")
posts = g.user.followed_posts().paginate(request.args.get('page', '', type=int), 10, False)
if not posts.items:
return jsonify(result="")
feeds = "{\"feed\":["
for post in posts.items:
feeds += "{\"username\":\"" + post.userName + "\"," + "\"body\":\"" + post.body + "\"," + "\"time\":\"" + str(post.timestamp) + "\"," + "\"avatar\":\"" + post.get_feed_avatar(post.userName, 40) + "\"},"
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return jsonify(result = feeds)
@app.route("/all_user_feeds/<uid>", methods=["GET"])
def all_user_feeds(uid):
user = User.find_by_id(uid)
posts = user.followed_posts().paginate(1, 100, False)
feeds = '{"feed":['
for post in posts.items:
feeds += '{"username":"' + post.userName + '",' + '"body":"' + post.body + '",' + '"time":"' + str(post.timestamp) + '",' + '"avatar":"' + post.get_feed_avatar(post.userName, 80) + '"},'
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return feeds
@app.route("/post-status/", methods = ['POST'])
def post_status():
body = request.form['body']
if 'uid' not in request.form:
g.user.add_newsfeed(body)
else:
user = User.find_by_id(request.form['uid'])
user.add_newsfeed(body)
return jsonify(result="success");
@app.route("/followers/get_followers", methods=["POST"])
def get_followers():
user = g.user
fw = user.user_is_following()
users = []
for foll in fw:
users.append(foll.to_hash())
res ={"Result":"OK", "Records": users}
return jsonify(res)
@app.route("/followers/get_following_count", methods=["POST"])
def get_following_count():
count = g.user.followed.count()
return jsonify(following = count)
@app.route("/followers/get_top_dog")
def get_top_dog():
user = g.user
output = ""
fw = user.top_user()
return jsonify(topUser = fw)
@app.route("/submit_workout_history", methods = ['POST'])
def submit_workout_history():
wName = request.form['wName']
date = request.form['date']
desc = request.form['desc']
user = User.find_by_username(request.form['user'])
workout = Workout.find_single_workout_by_name_(wName)
if(workout == None):
return jsonify(result="errorName", content=" The workout name you have entered may not exist. Please double check the spelling of the workout name. Thank you")
if(date == ""):
return jsonify(result="errorDate", content=" Please enter the date and time of the completed workout")
wh = WorkoutHistory(user.id, workout.id, datetime.strptime(date, "%m/%d/%Y %I:%M:%S %p"), desc, True)
WorkoutHistory.save_to_db(wh)
feed = "comleted "+wName+" on "+date+" - "+desc;
user.add_newsfeed(feed);
return jsonify(result="success");
@app.route("/user/<username>/workouthistory", methods=['GET'])
def display_user_workout_history (username):
user = User.find_by_username(username)
return render_template("users/workout_history.html", user=user)
############################
##WORKOUT SEARCH
############################
#workout search
@app.route('/workout_search')
def workout_search():
return render_template('workout_search.html')
@app.route("/search", methods=['GET'])
def | ():
query = request.args.get('key', '')
if query.startswith('#'):
return redirect(url_for('display_user_profile', username=query[1:]))
user_search_result = (User.query.filter(or_((User.first_last_name.like("%" + query + "%")), (User.last_first_name.like("%" + query + "%"))))).all()
workout_search_result = (Workout.query.filter(Workout.name.like("%" + query + "%"))).all()
return render_template("/results.html", query = query, user_list = user_search_result, workout_list = workout_search_result)
#
#ErrorHandlers
#
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(error):
db.session.rollback()
return render_template('500.html'), 500
| search_for_key | identifier_name |
user_controller.py | from flask import render_template, flash, redirect, session, url_for, request, g, json, jsonify
from flask.ext.login import login_user, logout_user, current_user, login_required
from lumberjack import app, login_manager, db
from lumberjack.models import *
from lumberjack.models.user import User
from lumberjack.models.workout import Workout
from lumberjack.models.workout_history import WorkoutHistory
from lumberjack.forms import LoginForm, RegistrationForm
from sqlalchemy import or_
from datetime import datetime, time
import json
from config import MAX_SEARCH_RESULTS, POSTS_PER_PAGE
def request_wants_json():
best = request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
request.accept_mimetypes[best] > \
request.accept_mimetypes['text/html']
@app.route("/")
def home_index():
if( g.user.is_authenticated() ):
return redirect(url_for('display_spash'))
return render_template("home.html")
@app.route("/users/splash/")
@login_required
def display_spash():
return render_template("users/splash.html")
@app.route("/user/splash/contents", methods=['GET'])
def display_splash_contents():
ret = Workout.getNewest()
nWorkout = ret.to_hash()
ret = User.getNewest()
nuPic = ret.get_avatar(200)
nUser = ret.to_hash()
ret = User.find_by_id(8)
mfPic = ret.get_avatar(200)
mFollowed = ret.to_hash()
w = {"Result":"OK", "nUser":nUser, "nuPic": nuPic,"nWorkout":nWorkout, "mostFollowed":mFollowed, "mfPic":mfPic}
return jsonify(w)
@app.before_request
def before_request():
g.user = current_user
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm(csrf_enabled=False)
if g.user is not None and g.user.is_authenticated():
if(request_wants_json()):
return g.user.to_json();
else:
return redirect(url_for('home_index'))
if request.method == 'GET':
return render_template('users/login.html',
title = 'Sign In',
form = form)
elif request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("Login successful")
session['username'] = form.username.data
user = User.find_by_username(form.username.data)
if(request_wants_json()):
return user.to_json();
else:
return redirect(request.args.get("next") or url_for("home_index"))
else:
if(request_wants_json()):
return form.to_json();
else:
return render_template('users/login.html',
title = 'Sign In',
form = form)
@app.route('/logout')
def logout():
logout_user()
session.pop('username', None)
if(request_wants_json()):
return json.dumps({'logged_out': 'true'})
else:
return redirect(url_for('home_index'))
@app.route("/users/")
def users_index():
users = User.all()
output = ""
for user in users:
output += user.username + "\n"
return output
@app.route("/users/getUsers", methods=["POST"])
def get_users():
"""
| """
result = []
userids = []
usernames = []
if 'username' in request.form:
usernames = User.find_all_by_username(request.form['username'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
elif 'email' in request.form:
usernames = User.find_all_by_email(request.form['email'])
if usernames is not None:
for username in usernames:
result.append(username.to_hash())
w = {"Result":"OK", "Records": result}
return jsonify(w)
@app.route("/users/new", methods=['GET', 'POST'])
def new_user():
if request.headers['Content-Type'] == 'application/json':
form = RegistrationForm.from_json(request.json, csrf_enabled=False)
else:
form = RegistrationForm()
if request.method == 'GET':
return render_template('users/new.html', form=form)
elif request.method == 'POST':
if form.validate():
user = User(form.username.data,
form.password.data)
User.save_to_db(user)
user = user.follow(user)
User.add_newsfeed(user,"Has joined Lumberjack.")
flash("Registration Successful!")
if request.headers['Content-Type'] == 'application/json':
return user.to_json()
else:
login_user(user);
session['username'] = form.username.data
return redirect(url_for('display_user_profile',
username=user.username))
else:
if request.headers['Content-Type'] == 'application/json':
return form.to_json()
else:
return render_template('users/new.html', form=form)
@login_manager.user_loader
@app.route("/user/find/", methods=['GET'])
def load_user(id):
return User.find_by_id(int(id))
@app.route("/user/<username>")
@app.route('/user/<username>/<int:page>', methods = ['GET'])
def display_user_profile(username, page=1):
user = User.find_by_username(username)
posts = None
if not request_wants_json():
if user == None:
return render_template("users/user.html", user = user, posts = posts) #user not found
if g.user.is_authenticated() and g.user.id == user.id:
if user.firstname == None or user.firstname == "" or user.lastname == None or user.lastname == "" or user.email == None or user.email == "":
flash("We can't display your profile until you have filled out the form")
return render_template("users/update_info.html")
posts = g.user.followed_posts().paginate(page, 10, False)
else:
if user == None:
return json.dumps({"Error": "User not found."})
return user.to_json()
return render_template("users/user.html", user = user, posts = posts)
@app.route("/user/update-profile/", methods=['GET', 'POST'])
@login_required
def update_info():
if request.method == 'POST':
if not request_wants_json():
user = User.find_by_id(g.user.id)
else:
user = User.find_by_id(request.form['uid'])
user.firstname = request.form['firstname']
user.lastname = request.form['lastname']
user.email = request.form['email']
email_user = User.find_by_email(user.email)
if not request_wants_json():
if email_user != None and email_user.id != g.user.id:
flash("Our record shows that you have an account under the given email address already.")
return render_template("users/update_info.html")
else:
if email_user != None and str(email_user.id) != request.form['uid']:
ret = {"result": "Email address already exist."}
return json.dumps(ret)
if len(request.form['gender']) > 0:
user.sex = request.form['gender'][0].upper()
user.location = request.form['location']
user.date_of_birth = request.form['date_of_birth']
user.avatar = request.form['gravatar']
user.about_me = request.form['about-me']
User.save_to_db(user)
if request_wants_json():
ret = {"result": "OK"}
return json.dumps(ret)
flash('Your changes have been made!')
return render_template("users/update_info.html")
@app.route('/follow_btn')
def follow_btn():
followee = request.args.get('followee', '', type=int)
state = request.args.get('state', '', type=str)
user = User.find_by_id(followee)
if state.startswith("Follow"):
follower = g.user.follow(user)
User.save_to_db(follower)
if g.user.is_following(user):
return jsonify(result="Unfollow") #g.user successfully followed user. So, we must change the state of the button
else:
return jsonify(resul="error") #we could return 'Follow' to just keep the state. But returning 'error' will say that something went wrong. Could be a database problem.
follower = g.user.unfollow(user)
User.save_to_db(follower)
if not g.user.is_following(user):
return jsonify(result="Follow") #g.user successfully unfollowed user
else:
return jsonify(result="error")
@app.route("/<username>/followers")
@app.route('/<username>/followers/<int:page>', methods = ['GET'])
def followers(username, page=1):
user = g.user
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template("users/followers.html",user = user)
@app.route("/user_feeds/")
@app.route('/user_feeds/<int:page>', methods = ['GET'])
def user_feeds(page=1):
user = g.user
if user.is_anonymous():
return jsonify(result="")
posts = g.user.followed_posts().paginate(request.args.get('page', '', type=int), 10, False)
if not posts.items:
return jsonify(result="")
feeds = "{\"feed\":["
for post in posts.items:
feeds += "{\"username\":\"" + post.userName + "\"," + "\"body\":\"" + post.body + "\"," + "\"time\":\"" + str(post.timestamp) + "\"," + "\"avatar\":\"" + post.get_feed_avatar(post.userName, 40) + "\"},"
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return jsonify(result = feeds)
@app.route("/all_user_feeds/<uid>", methods=["GET"])
def all_user_feeds(uid):
user = User.find_by_id(uid)
posts = user.followed_posts().paginate(1, 100, False)
feeds = '{"feed":['
for post in posts.items:
feeds += '{"username":"' + post.userName + '",' + '"body":"' + post.body + '",' + '"time":"' + str(post.timestamp) + '",' + '"avatar":"' + post.get_feed_avatar(post.userName, 80) + '"},'
feeds = feeds[:len(feeds)-1]
feeds += "]}"
return feeds
@app.route("/post-status/", methods = ['POST'])
def post_status():
body = request.form['body']
if 'uid' not in request.form:
g.user.add_newsfeed(body)
else:
user = User.find_by_id(request.form['uid'])
user.add_newsfeed(body)
return jsonify(result="success");
@app.route("/followers/get_followers", methods=["POST"])
def get_followers():
user = g.user
fw = user.user_is_following()
users = []
for foll in fw:
users.append(foll.to_hash())
res ={"Result":"OK", "Records": users}
return jsonify(res)
@app.route("/followers/get_following_count", methods=["POST"])
def get_following_count():
count = g.user.followed.count()
return jsonify(following = count)
@app.route("/followers/get_top_dog")
def get_top_dog():
user = g.user
output = ""
fw = user.top_user()
return jsonify(topUser = fw)
@app.route("/submit_workout_history", methods = ['POST'])
def submit_workout_history():
wName = request.form['wName']
date = request.form['date']
desc = request.form['desc']
user = User.find_by_username(request.form['user'])
workout = Workout.find_single_workout_by_name_(wName)
if(workout == None):
return jsonify(result="errorName", content=" The workout name you have entered may not exist. Please double check the spelling of the workout name. Thank you")
if(date == ""):
return jsonify(result="errorDate", content=" Please enter the date and time of the completed workout")
wh = WorkoutHistory(user.id, workout.id, datetime.strptime(date, "%m/%d/%Y %I:%M:%S %p"), desc, True)
WorkoutHistory.save_to_db(wh)
feed = "comleted "+wName+" on "+date+" - "+desc;
user.add_newsfeed(feed);
return jsonify(result="success");
@app.route("/user/<username>/workouthistory", methods=['GET'])
def display_user_workout_history (username):
user = User.find_by_username(username)
return render_template("users/workout_history.html", user=user)
############################
##WORKOUT SEARCH
############################
#workout search
@app.route('/workout_search')
def workout_search():
return render_template('workout_search.html')
@app.route("/search", methods=['GET'])
def search_for_key():
query = request.args.get('key', '')
if query.startswith('#'):
return redirect(url_for('display_user_profile', username=query[1:]))
user_search_result = (User.query.filter(or_((User.first_last_name.like("%" + query + "%")), (User.last_first_name.like("%" + query + "%"))))).all()
workout_search_result = (Workout.query.filter(Workout.name.like("%" + query + "%"))).all()
return render_template("/results.html", query = query, user_list = user_search_result, workout_list = workout_search_result)
#
#ErrorHandlers
#
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(error):
db.session.rollback()
return render_template('500.html'), 500 | random_line_split | |
block.rs | use crate::error::{ViuError, ViuResult};
use crate::printer::Printer;
use crate::Config;
use ansi_colours::ansi256_from_rgb;
use image::{DynamicImage, GenericImageView, Rgba};
use std::io::Write;
use termcolor::{Buffer, BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
use crossterm::cursor::{MoveRight, MoveTo, MoveToPreviousLine};
use crossterm::execute;
const UPPER_HALF_BLOCK: &str = "\u{2580}";
const LOWER_HALF_BLOCK: &str = "\u{2584}";
const CHECKERBOARD_BACKGROUND_LIGHT: (u8, u8, u8) = (153, 153, 153);
const CHECKERBOARD_BACKGROUND_DARK: (u8, u8, u8) = (102, 102, 102);
pub struct BlockPrinter {}
impl Printer for BlockPrinter {
fn print(&self, img: &DynamicImage, config: &Config) -> ViuResult<(u32, u32)> {
// there are two types of buffers in this function:
// - stdout: Buffer, which is from termcolor crate. Used to buffer all writing
// required to print a single image or frame. Flushed on every line
// - row_buffer: Vec<ColorSpec>, which stores back- and foreground colors for a
// row of terminal cells. When flushed, its output goes into out_buffer.
// They are both flushed on every terminal line (i.e 2 pixel rows)
let stdout = BufferWriter::stdout(ColorChoice::Always);
let mut out_buffer = stdout.buffer();
// adjust y offset
if config.absolute_offset {
if config.y >= 0 {
// If absolute_offset, move to (0,y).
execute!(out_buffer, MoveTo(0, config.y as u16))?;
} else {
//Negative values do not make sense.
return Err(ViuError::InvalidConfiguration(
"absolute_offset is true but y offset is negative".to_owned(),
));
}
} else if config.y < 0 {
// MoveUp if negative
execute!(out_buffer, MoveToPreviousLine(-config.y as u16))?;
} else {
// Move down y lines
for _ in 0..config.y {
// writeln! is used instead of MoveDown to force scrolldown
// observed when config.y > 0 and cursor is on the last terminal line
writeln!(out_buffer)?;
}
}
// resize the image so that it fits in the constraints, if any
let resized_img;
let img = if config.resize {
resized_img = super::resize(&img, config.width, config.height);
&resized_img
} else {
img
};
let (width, _) = img.dimensions();
// TODO: position information is contained in the pixel
let mut curr_col_px = 0;
let mut curr_row_px = 0;
let mut row_buffer: Vec<ColorSpec> = Vec::with_capacity(width as usize);
// row_buffer building mode. At first the top colors are calculated and then the bottom
// Once the bottom row is ready, row_buffer is flushed
let mut mode = Mode::Top;
// iterate pixels and fill row_buffer
for pixel in img.pixels() {
// if the alpha of the pixel is 0, print a predefined pixel based on the position in order
// to mimic the checherboard background. If the transparent option was given, move right instead
let color = if is_pixel_transparent(pixel) {
if config.transparent {
None
} else {
Some(get_transparency_color(
curr_row_px,
curr_col_px,
config.truecolor,
))
}
} else {
Some(get_color_from_pixel(pixel, config.truecolor))
};
if mode == Mode::Top {
// add a new ColorSpec to row_buffer
let mut c = ColorSpec::new();
c.set_bg(color);
row_buffer.push(c);
} else {
// upgrade an already existing ColorSpec
let colorspec_to_upg = &mut row_buffer[curr_col_px as usize];
colorspec_to_upg.set_fg(color);
}
curr_col_px += 1;
// if the buffer is full start adding the second row of pixels
if row_buffer.len() == width as usize {
if mode == Mode::Top {
mode = Mode::Bottom;
curr_col_px = 0;
curr_row_px += 1;
}
// only if the second row is completed, flush the buffer and start again
else if curr_col_px == width {
curr_col_px = 0;
curr_row_px += 1;
// move right if x offset is specified
if config.x > 0 {
execute!(out_buffer, MoveRight(config.x))?;
}
// flush the row_buffer into out_buffer
fill_out_buffer(&mut row_buffer, &mut out_buffer, false)?;
// write the line to stdout
print_buffer(&stdout, &mut out_buffer)?;
mode = Mode::Top;
} else {
// in the middle of the second row, more iterations are required
}
}
}
// buffer will be flushed if the image has an odd height
if !row_buffer.is_empty() {
fill_out_buffer(&mut row_buffer, &mut out_buffer, true)?;
}
// do a final write to stdout to print last row if length is odd, and reset cursor position
print_buffer(&stdout, &mut out_buffer)?;
// TODO: might be +1/2 ?
Ok((width, curr_row_px / 2))
}
}
// Send out_buffer to stdout. Empties it when it's done
fn print_buffer(stdout: &BufferWriter, out_buffer: &mut Buffer) -> ViuResult {
match stdout.print(out_buffer) {
Ok(_) => {
out_buffer.clear();
Ok(())
}
Err(e) => match e.kind() {
// Ignore broken pipe errors. They arise when piping output to `head`, for example,
// and panic is not desired.
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(ViuError::IO(e)),
},
}
}
// Translates the row_buffer, containing colors, into the out_buffer which will be flushed to the terminal
fn fill_out_buffer(
row_buffer: &mut Vec<ColorSpec>,
out_buffer: &mut Buffer,
is_last_row: bool,
) -> ViuResult {
let mut out_color;
let mut out_char;
let mut new_color;
for c in row_buffer.iter() {
// If a flush is needed it means that only one row with UPPER_HALF_BLOCK must be printed
// because it is the last row, hence it contains only 1 pixel
if is_last_row {
new_color = ColorSpec::new();
if let Some(bg) = c.bg() {
new_color.set_fg(Some(*bg));
out_char = UPPER_HALF_BLOCK;
} else {
execute!(out_buffer, MoveRight(1))?;
continue;
}
out_color = &new_color;
} else {
match (c.fg(), c.bg()) {
(None, None) => {
// completely transparent
execute!(out_buffer, MoveRight(1))?;
continue;
}
(Some(bottom), None) => {
// only top transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*bottom));
out_color = &new_color;
out_char = LOWER_HALF_BLOCK;
}
(None, Some(top)) => {
// only bottom transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*top));
out_color = &new_color;
out_char = UPPER_HALF_BLOCK;
}
(Some(_top), Some(_bottom)) => {
// both parts have a color
out_color = c;
out_char = LOWER_HALF_BLOCK;
}
}
}
out_buffer.set_color(out_color)?;
write!(out_buffer, "{}", out_char)?;
}
out_buffer.reset()?;
writeln!(out_buffer)?;
row_buffer.clear();
Ok(())
}
fn is_pixel_transparent(pixel: (u32, u32, Rgba<u8>)) -> bool {
let (_x, _y, data) = pixel;
data[3] == 0
}
fn get_transparency_color(row: u32, col: u32, truecolor: bool) -> Color {
//imitate the transparent chess board pattern
let rgb = if row % 2 == col % 2 {
CHECKERBOARD_BACKGROUND_DARK
} else {
CHECKERBOARD_BACKGROUND_LIGHT
};
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
fn get_color_from_pixel(pixel: (u32, u32, Rgba<u8>), truecolor: bool) -> Color {
let (_x, _y, data) = pixel;
let rgb = (data[0], data[1], data[2]);
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else |
}
// enum used to keep track where the current line of pixels processed should be displayed - as
// background or foreground color
#[derive(PartialEq)]
enum Mode {
Top,
Bottom,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_block_printer_small() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(20, 6));
let config = Config {
width: Some(40),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 20);
assert_eq!(h, 3);
}
// TODO: failing on Windows. Why?
#[test]
fn test_block_printer_large() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(2000, 1000));
let config = Config {
width: Some(160),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 160);
assert_eq!(h, 40);
}
}
| {
Color::Ansi256(ansi256_from_rgb(rgb))
} | conditional_block |
block.rs | use crate::error::{ViuError, ViuResult};
use crate::printer::Printer;
use crate::Config;
use ansi_colours::ansi256_from_rgb;
use image::{DynamicImage, GenericImageView, Rgba};
use std::io::Write;
use termcolor::{Buffer, BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
use crossterm::cursor::{MoveRight, MoveTo, MoveToPreviousLine};
use crossterm::execute;
const UPPER_HALF_BLOCK: &str = "\u{2580}";
const LOWER_HALF_BLOCK: &str = "\u{2584}";
const CHECKERBOARD_BACKGROUND_LIGHT: (u8, u8, u8) = (153, 153, 153);
const CHECKERBOARD_BACKGROUND_DARK: (u8, u8, u8) = (102, 102, 102);
pub struct BlockPrinter {}
impl Printer for BlockPrinter {
fn print(&self, img: &DynamicImage, config: &Config) -> ViuResult<(u32, u32)> {
// there are two types of buffers in this function:
// - stdout: Buffer, which is from termcolor crate. Used to buffer all writing
// required to print a single image or frame. Flushed on every line
// - row_buffer: Vec<ColorSpec>, which stores back- and foreground colors for a
// row of terminal cells. When flushed, its output goes into out_buffer.
// They are both flushed on every terminal line (i.e 2 pixel rows)
let stdout = BufferWriter::stdout(ColorChoice::Always);
let mut out_buffer = stdout.buffer();
// adjust y offset
if config.absolute_offset {
if config.y >= 0 {
// If absolute_offset, move to (0,y).
execute!(out_buffer, MoveTo(0, config.y as u16))?;
} else {
//Negative values do not make sense.
return Err(ViuError::InvalidConfiguration(
"absolute_offset is true but y offset is negative".to_owned(),
));
}
} else if config.y < 0 {
// MoveUp if negative
execute!(out_buffer, MoveToPreviousLine(-config.y as u16))?;
} else {
// Move down y lines
for _ in 0..config.y {
// writeln! is used instead of MoveDown to force scrolldown
// observed when config.y > 0 and cursor is on the last terminal line
writeln!(out_buffer)?;
}
}
// resize the image so that it fits in the constraints, if any
let resized_img;
let img = if config.resize {
resized_img = super::resize(&img, config.width, config.height);
&resized_img
} else {
img
};
let (width, _) = img.dimensions();
// TODO: position information is contained in the pixel
let mut curr_col_px = 0;
let mut curr_row_px = 0;
let mut row_buffer: Vec<ColorSpec> = Vec::with_capacity(width as usize);
// row_buffer building mode. At first the top colors are calculated and then the bottom
// Once the bottom row is ready, row_buffer is flushed
let mut mode = Mode::Top;
// iterate pixels and fill row_buffer
for pixel in img.pixels() {
// if the alpha of the pixel is 0, print a predefined pixel based on the position in order
// to mimic the checherboard background. If the transparent option was given, move right instead
let color = if is_pixel_transparent(pixel) {
if config.transparent {
None
} else {
Some(get_transparency_color(
curr_row_px,
curr_col_px,
config.truecolor,
))
}
} else {
Some(get_color_from_pixel(pixel, config.truecolor))
};
if mode == Mode::Top {
// add a new ColorSpec to row_buffer
let mut c = ColorSpec::new();
c.set_bg(color);
row_buffer.push(c);
} else {
// upgrade an already existing ColorSpec
let colorspec_to_upg = &mut row_buffer[curr_col_px as usize];
colorspec_to_upg.set_fg(color);
}
curr_col_px += 1;
// if the buffer is full start adding the second row of pixels
if row_buffer.len() == width as usize {
if mode == Mode::Top {
mode = Mode::Bottom;
curr_col_px = 0;
curr_row_px += 1;
}
// only if the second row is completed, flush the buffer and start again
else if curr_col_px == width {
curr_col_px = 0;
curr_row_px += 1;
// move right if x offset is specified
if config.x > 0 {
execute!(out_buffer, MoveRight(config.x))?;
}
// flush the row_buffer into out_buffer
fill_out_buffer(&mut row_buffer, &mut out_buffer, false)?;
// write the line to stdout
print_buffer(&stdout, &mut out_buffer)?;
mode = Mode::Top;
} else {
// in the middle of the second row, more iterations are required
}
}
}
// buffer will be flushed if the image has an odd height
if !row_buffer.is_empty() {
fill_out_buffer(&mut row_buffer, &mut out_buffer, true)?;
}
// do a final write to stdout to print last row if length is odd, and reset cursor position
print_buffer(&stdout, &mut out_buffer)?;
// TODO: might be +1/2 ?
Ok((width, curr_row_px / 2))
}
}
// Send out_buffer to stdout. Empties it when it's done
fn print_buffer(stdout: &BufferWriter, out_buffer: &mut Buffer) -> ViuResult {
match stdout.print(out_buffer) {
Ok(_) => {
out_buffer.clear();
Ok(())
}
Err(e) => match e.kind() {
// Ignore broken pipe errors. They arise when piping output to `head`, for example,
// and panic is not desired.
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(ViuError::IO(e)),
},
}
}
// Translates the row_buffer, containing colors, into the out_buffer which will be flushed to the terminal
fn fill_out_buffer(
row_buffer: &mut Vec<ColorSpec>,
out_buffer: &mut Buffer,
is_last_row: bool,
) -> ViuResult {
let mut out_color;
let mut out_char;
let mut new_color;
for c in row_buffer.iter() {
// If a flush is needed it means that only one row with UPPER_HALF_BLOCK must be printed
// because it is the last row, hence it contains only 1 pixel
if is_last_row { | if let Some(bg) = c.bg() {
new_color.set_fg(Some(*bg));
out_char = UPPER_HALF_BLOCK;
} else {
execute!(out_buffer, MoveRight(1))?;
continue;
}
out_color = &new_color;
} else {
match (c.fg(), c.bg()) {
(None, None) => {
// completely transparent
execute!(out_buffer, MoveRight(1))?;
continue;
}
(Some(bottom), None) => {
// only top transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*bottom));
out_color = &new_color;
out_char = LOWER_HALF_BLOCK;
}
(None, Some(top)) => {
// only bottom transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*top));
out_color = &new_color;
out_char = UPPER_HALF_BLOCK;
}
(Some(_top), Some(_bottom)) => {
// both parts have a color
out_color = c;
out_char = LOWER_HALF_BLOCK;
}
}
}
out_buffer.set_color(out_color)?;
write!(out_buffer, "{}", out_char)?;
}
out_buffer.reset()?;
writeln!(out_buffer)?;
row_buffer.clear();
Ok(())
}
fn is_pixel_transparent(pixel: (u32, u32, Rgba<u8>)) -> bool {
let (_x, _y, data) = pixel;
data[3] == 0
}
fn get_transparency_color(row: u32, col: u32, truecolor: bool) -> Color {
//imitate the transparent chess board pattern
let rgb = if row % 2 == col % 2 {
CHECKERBOARD_BACKGROUND_DARK
} else {
CHECKERBOARD_BACKGROUND_LIGHT
};
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
fn get_color_from_pixel(pixel: (u32, u32, Rgba<u8>), truecolor: bool) -> Color {
let (_x, _y, data) = pixel;
let rgb = (data[0], data[1], data[2]);
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
// enum used to keep track where the current line of pixels processed should be displayed - as
// background or foreground color
#[derive(PartialEq)]
enum Mode {
Top,
Bottom,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_block_printer_small() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(20, 6));
let config = Config {
width: Some(40),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 20);
assert_eq!(h, 3);
}
// TODO: failing on Windows. Why?
#[test]
fn test_block_printer_large() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(2000, 1000));
let config = Config {
width: Some(160),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 160);
assert_eq!(h, 40);
}
} | new_color = ColorSpec::new(); | random_line_split |
block.rs | use crate::error::{ViuError, ViuResult};
use crate::printer::Printer;
use crate::Config;
use ansi_colours::ansi256_from_rgb;
use image::{DynamicImage, GenericImageView, Rgba};
use std::io::Write;
use termcolor::{Buffer, BufferWriter, Color, ColorChoice, ColorSpec, WriteColor};
use crossterm::cursor::{MoveRight, MoveTo, MoveToPreviousLine};
use crossterm::execute;
const UPPER_HALF_BLOCK: &str = "\u{2580}";
const LOWER_HALF_BLOCK: &str = "\u{2584}";
const CHECKERBOARD_BACKGROUND_LIGHT: (u8, u8, u8) = (153, 153, 153);
const CHECKERBOARD_BACKGROUND_DARK: (u8, u8, u8) = (102, 102, 102);
pub struct BlockPrinter {}
impl Printer for BlockPrinter {
fn print(&self, img: &DynamicImage, config: &Config) -> ViuResult<(u32, u32)> {
// there are two types of buffers in this function:
// - stdout: Buffer, which is from termcolor crate. Used to buffer all writing
// required to print a single image or frame. Flushed on every line
// - row_buffer: Vec<ColorSpec>, which stores back- and foreground colors for a
// row of terminal cells. When flushed, its output goes into out_buffer.
// They are both flushed on every terminal line (i.e 2 pixel rows)
let stdout = BufferWriter::stdout(ColorChoice::Always);
let mut out_buffer = stdout.buffer();
// adjust y offset
if config.absolute_offset {
if config.y >= 0 {
// If absolute_offset, move to (0,y).
execute!(out_buffer, MoveTo(0, config.y as u16))?;
} else {
//Negative values do not make sense.
return Err(ViuError::InvalidConfiguration(
"absolute_offset is true but y offset is negative".to_owned(),
));
}
} else if config.y < 0 {
// MoveUp if negative
execute!(out_buffer, MoveToPreviousLine(-config.y as u16))?;
} else {
// Move down y lines
for _ in 0..config.y {
// writeln! is used instead of MoveDown to force scrolldown
// observed when config.y > 0 and cursor is on the last terminal line
writeln!(out_buffer)?;
}
}
// resize the image so that it fits in the constraints, if any
let resized_img;
let img = if config.resize {
resized_img = super::resize(&img, config.width, config.height);
&resized_img
} else {
img
};
let (width, _) = img.dimensions();
// TODO: position information is contained in the pixel
let mut curr_col_px = 0;
let mut curr_row_px = 0;
let mut row_buffer: Vec<ColorSpec> = Vec::with_capacity(width as usize);
// row_buffer building mode. At first the top colors are calculated and then the bottom
// Once the bottom row is ready, row_buffer is flushed
let mut mode = Mode::Top;
// iterate pixels and fill row_buffer
for pixel in img.pixels() {
// if the alpha of the pixel is 0, print a predefined pixel based on the position in order
// to mimic the checherboard background. If the transparent option was given, move right instead
let color = if is_pixel_transparent(pixel) {
if config.transparent {
None
} else {
Some(get_transparency_color(
curr_row_px,
curr_col_px,
config.truecolor,
))
}
} else {
Some(get_color_from_pixel(pixel, config.truecolor))
};
if mode == Mode::Top {
// add a new ColorSpec to row_buffer
let mut c = ColorSpec::new();
c.set_bg(color);
row_buffer.push(c);
} else {
// upgrade an already existing ColorSpec
let colorspec_to_upg = &mut row_buffer[curr_col_px as usize];
colorspec_to_upg.set_fg(color);
}
curr_col_px += 1;
// if the buffer is full start adding the second row of pixels
if row_buffer.len() == width as usize {
if mode == Mode::Top {
mode = Mode::Bottom;
curr_col_px = 0;
curr_row_px += 1;
}
// only if the second row is completed, flush the buffer and start again
else if curr_col_px == width {
curr_col_px = 0;
curr_row_px += 1;
// move right if x offset is specified
if config.x > 0 {
execute!(out_buffer, MoveRight(config.x))?;
}
// flush the row_buffer into out_buffer
fill_out_buffer(&mut row_buffer, &mut out_buffer, false)?;
// write the line to stdout
print_buffer(&stdout, &mut out_buffer)?;
mode = Mode::Top;
} else {
// in the middle of the second row, more iterations are required
}
}
}
// buffer will be flushed if the image has an odd height
if !row_buffer.is_empty() {
fill_out_buffer(&mut row_buffer, &mut out_buffer, true)?;
}
// do a final write to stdout to print last row if length is odd, and reset cursor position
print_buffer(&stdout, &mut out_buffer)?;
// TODO: might be +1/2 ?
Ok((width, curr_row_px / 2))
}
}
// Send out_buffer to stdout. Empties it when it's done
fn print_buffer(stdout: &BufferWriter, out_buffer: &mut Buffer) -> ViuResult {
match stdout.print(out_buffer) {
Ok(_) => {
out_buffer.clear();
Ok(())
}
Err(e) => match e.kind() {
// Ignore broken pipe errors. They arise when piping output to `head`, for example,
// and panic is not desired.
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(ViuError::IO(e)),
},
}
}
// Translates the row_buffer, containing colors, into the out_buffer which will be flushed to the terminal
fn fill_out_buffer(
row_buffer: &mut Vec<ColorSpec>,
out_buffer: &mut Buffer,
is_last_row: bool,
) -> ViuResult {
let mut out_color;
let mut out_char;
let mut new_color;
for c in row_buffer.iter() {
// If a flush is needed it means that only one row with UPPER_HALF_BLOCK must be printed
// because it is the last row, hence it contains only 1 pixel
if is_last_row {
new_color = ColorSpec::new();
if let Some(bg) = c.bg() {
new_color.set_fg(Some(*bg));
out_char = UPPER_HALF_BLOCK;
} else {
execute!(out_buffer, MoveRight(1))?;
continue;
}
out_color = &new_color;
} else {
match (c.fg(), c.bg()) {
(None, None) => {
// completely transparent
execute!(out_buffer, MoveRight(1))?;
continue;
}
(Some(bottom), None) => {
// only top transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*bottom));
out_color = &new_color;
out_char = LOWER_HALF_BLOCK;
}
(None, Some(top)) => {
// only bottom transparent
new_color = ColorSpec::new();
new_color.set_fg(Some(*top));
out_color = &new_color;
out_char = UPPER_HALF_BLOCK;
}
(Some(_top), Some(_bottom)) => {
// both parts have a color
out_color = c;
out_char = LOWER_HALF_BLOCK;
}
}
}
out_buffer.set_color(out_color)?;
write!(out_buffer, "{}", out_char)?;
}
out_buffer.reset()?;
writeln!(out_buffer)?;
row_buffer.clear();
Ok(())
}
fn is_pixel_transparent(pixel: (u32, u32, Rgba<u8>)) -> bool {
let (_x, _y, data) = pixel;
data[3] == 0
}
fn get_transparency_color(row: u32, col: u32, truecolor: bool) -> Color {
//imitate the transparent chess board pattern
let rgb = if row % 2 == col % 2 {
CHECKERBOARD_BACKGROUND_DARK
} else {
CHECKERBOARD_BACKGROUND_LIGHT
};
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
fn get_color_from_pixel(pixel: (u32, u32, Rgba<u8>), truecolor: bool) -> Color {
let (_x, _y, data) = pixel;
let rgb = (data[0], data[1], data[2]);
if truecolor {
Color::Rgb(rgb.0, rgb.1, rgb.2)
} else {
Color::Ansi256(ansi256_from_rgb(rgb))
}
}
// enum used to keep track where the current line of pixels processed should be displayed - as
// background or foreground color
#[derive(PartialEq)]
enum Mode {
Top,
Bottom,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_block_printer_small() {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(20, 6));
let config = Config {
width: Some(40),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 20);
assert_eq!(h, 3);
}
// TODO: failing on Windows. Why?
#[test]
fn | () {
let img = DynamicImage::ImageRgba8(image::RgbaImage::new(2000, 1000));
let config = Config {
width: Some(160),
height: None,
absolute_offset: false,
transparent: true,
..Default::default()
};
let (w, h) = BlockPrinter {}.print(&img, &config).unwrap();
assert_eq!(w, 160);
assert_eq!(h, 40);
}
}
| test_block_printer_large | identifier_name |
immutable.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::fmt::Debug;
use std::iter::FromIterator;
use std::ptr::NonNull;
use std::sync::Arc;
use std::{convert::AsRef, usize};
use crate::util::bit_chunk_iterator::BitChunks;
use crate::{
bytes::{Bytes, Deallocation},
datatypes::ArrowNativeType,
ffi,
};
use super::ops::bitwise_unary_op_helper;
use super::MutableBuffer;
/// Buffer represents a contiguous memory region that can be shared with other buffers and across
/// thread boundaries.
#[derive(Clone, PartialEq, Debug)]
pub struct Buffer {
/// the internal byte buffer.
data: Arc<Bytes>,
/// The offset into the buffer.
offset: usize,
}
impl Buffer {
/// Auxiliary method to create a new Buffer
#[inline]
pub fn from_bytes(bytes: Bytes) -> Self {
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Initializes a [Buffer] from a slice of items.
pub fn | <U: ArrowNativeType, T: AsRef<[U]>>(items: &T) -> Self {
let slice = items.as_ref();
let capacity = slice.len() * std::mem::size_of::<U>();
let mut buffer = MutableBuffer::with_capacity(capacity);
buffer.extend_from_slice(slice);
buffer.into()
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` will free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `capacity` - Total allocated memory for the pointer `ptr`, in **bytes**
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes. If the `ptr` and `capacity` come from a `Buffer`, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: NonNull<u8>, len: usize, capacity: usize) -> Self {
assert!(len <= capacity);
Buffer::build_with_arguments(ptr, len, Deallocation::Native(capacity))
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` **does not** free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `data` - An [ffi::FFI_ArrowArray] with the data
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes and that the foreign deallocator frees the region.
pub unsafe fn from_unowned(
ptr: NonNull<u8>,
len: usize,
data: Arc<ffi::FFI_ArrowArray>,
) -> Self {
Buffer::build_with_arguments(ptr, len, Deallocation::Foreign(data))
}
/// Auxiliary method to create a new Buffer
unsafe fn build_with_arguments(
ptr: NonNull<u8>,
len: usize,
deallocation: Deallocation,
) -> Self {
let bytes = Bytes::new(ptr, len, deallocation);
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Returns the number of bytes in the buffer
pub fn len(&self) -> usize {
self.data.len() - self.offset
}
/// Returns the capacity of this buffer.
/// For externally owned buffers, this returns zero
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Returns whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.data.len() - self.offset == 0
}
/// Returns the byte slice stored in this buffer
pub fn as_slice(&self) -> &[u8] {
&self.data[self.offset..]
}
/// Returns a new [Buffer] that is a slice of this buffer starting at `offset`.
/// Doing so allows the same memory region to be shared between buffers.
/// # Panics
/// Panics iff `offset` is larger than `len`.
pub fn slice(&self, offset: usize) -> Self {
assert!(
offset <= self.len(),
"the offset of the new Buffer cannot exceed the existing length"
);
Self {
data: self.data.clone(),
offset: self.offset + offset,
}
}
/// Returns a pointer to the start of this buffer.
///
/// Note that this should be used cautiously, and the returned pointer should not be
/// stored anywhere, to avoid dangling pointers.
pub fn as_ptr(&self) -> *const u8 {
unsafe { self.data.ptr().as_ptr().add(self.offset) }
}
/// View buffer as typed slice.
///
/// # Safety
///
/// `ArrowNativeType` is public so that it can be used as a trait bound for other public
/// components, such as the `ToByteSlice` trait. However, this means that it can be
/// implemented by user defined types, which it is not intended for.
pub unsafe fn typed_data<T: ArrowNativeType + num::Num>(&self) -> &[T] {
// JUSTIFICATION
// Benefit
// Many of the buffers represent specific types, and consumers of `Buffer` often need to re-interpret them.
// Soundness
// * The pointer is non-null by construction
// * alignment asserted below.
let (prefix, offsets, suffix) = self.as_slice().align_to::<T>();
assert!(prefix.is_empty() && suffix.is_empty());
offsets
}
/// Returns a slice of this buffer starting at a certain bit offset.
/// If the offset is byte-aligned the returned buffer is a shallow clone,
/// otherwise a new buffer is allocated and filled with a copy of the bits in the range.
pub fn bit_slice(&self, offset: usize, len: usize) -> Self {
if offset % 8 == 0 {
return self.slice(offset / 8);
}
bitwise_unary_op_helper(self, offset, len, |a| a)
}
/// Returns a `BitChunks` instance which can be used to iterate over this buffers bits
/// in larger chunks and starting at arbitrary bit offsets.
/// Note that both `offset` and `length` are measured in bits.
pub fn bit_chunks(&self, offset: usize, len: usize) -> BitChunks {
BitChunks::new(self.as_slice(), offset, len)
}
/// Returns the number of 1-bits in this buffer.
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}
/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
let chunks = self.bit_chunks(offset, len);
let mut count = chunks.iter().map(|c| c.count_ones() as usize).sum();
count += chunks.remainder_bits().count_ones() as usize;
count
}
}
/// Creating a `Buffer` instance by copying the memory from a `AsRef<[u8]>` into a newly
/// allocated memory region.
impl<T: AsRef<[u8]>> From<T> for Buffer {
fn from(p: T) -> Self {
// allocate aligned memory buffer
let slice = p.as_ref();
let len = slice.len();
let mut buffer = MutableBuffer::new(len);
buffer.extend_from_slice(slice);
buffer.into()
}
}
/// Creating a `Buffer` instance by storing the boolean values into the buffer
impl std::iter::FromIterator<bool> for Buffer {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = bool>,
{
MutableBuffer::from_iter(iter).into()
}
}
impl std::ops::Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
}
}
unsafe impl Sync for Buffer {}
unsafe impl Send for Buffer {}
impl From<MutableBuffer> for Buffer {
#[inline]
fn from(buffer: MutableBuffer) -> Self {
buffer.into_buffer()
}
}
impl Buffer {
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Example
/// ```
/// # use arrow::buffer::Buffer;
/// let v = vec![1u32];
/// let iter = v.iter().map(|x| x * 2);
/// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
/// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
/// ```
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
// This implementation is required for two reasons:
// 1. there is no trait `TrustedLen` in stable rust and therefore
// we can't specialize `extend` for `TrustedLen` like `Vec` does.
// 2. `from_trusted_len_iter` is faster.
#[inline]
pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
iterator: I,
) -> Self {
MutableBuffer::from_trusted_len_iter(iterator).into()
}
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
/// if any of the items of the iterator is an error.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
#[inline]
pub unsafe fn try_from_trusted_len_iter<
E,
T: ArrowNativeType,
I: Iterator<Item = std::result::Result<T, E>>,
>(
iterator: I,
) -> std::result::Result<Self, E> {
Ok(MutableBuffer::try_from_trusted_len_iter(iterator)?.into())
}
}
impl<T: ArrowNativeType> FromIterator<T> for Buffer {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut iterator = iter.into_iter();
let size = std::mem::size_of::<T>();
// first iteration, which will likely reserve sufficient space for the buffer.
let mut buffer = match iterator.next() {
None => MutableBuffer::new(0),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut buffer = MutableBuffer::new(lower.saturating_add(1) * size);
unsafe {
std::ptr::write(buffer.as_mut_ptr() as *mut T, element);
buffer.set_len(size);
}
buffer
}
};
buffer.extend_from_iter(iterator);
buffer.into()
}
}
#[cfg(test)]
mod tests {
use std::thread;
use super::*;
#[test]
fn test_buffer_data_equality() {
let buf1 = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(buf1, buf2);
// slice with same offset should still preserve equality
let buf3 = buf1.slice(2);
assert_ne!(buf1, buf3);
let buf4 = buf2.slice(2);
assert_eq!(buf3, buf4);
// Different capacities should still preserve equality
let mut buf2 = MutableBuffer::new(65);
buf2.extend_from_slice(&[0u8, 1, 2, 3, 4]);
let buf2 = buf2.into();
assert_eq!(buf1, buf2);
// unequal because of different elements
let buf2 = Buffer::from(&[0, 0, 2, 3, 4]);
assert_ne!(buf1, buf2);
// unequal because of different length
let buf2 = Buffer::from(&[0, 1, 2, 3]);
assert_ne!(buf1, buf2);
}
#[test]
fn test_from_raw_parts() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_from_vec() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_copy() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = buf;
assert_eq!(5, buf2.len());
assert_eq!(64, buf2.capacity());
assert!(!buf2.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf2.as_slice());
}
#[test]
fn test_slice() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
let buf2 = buf.slice(2);
assert_eq!([6, 8, 10], buf2.as_slice());
assert_eq!(3, buf2.len());
assert_eq!(unsafe { buf.as_ptr().offset(2) }, buf2.as_ptr());
let buf3 = buf2.slice(1);
assert_eq!([8, 10], buf3.as_slice());
assert_eq!(2, buf3.len());
assert_eq!(unsafe { buf.as_ptr().offset(3) }, buf3.as_ptr());
let buf4 = buf.slice(5);
let empty_slice: [u8; 0] = [];
assert_eq!(empty_slice, buf4.as_slice());
assert_eq!(0, buf4.len());
assert!(buf4.is_empty());
assert_eq!(buf2.slice(2).as_slice(), &[10]);
}
#[test]
#[should_panic(
expected = "the offset of the new Buffer cannot exceed the existing length"
)]
fn test_slice_offset_out_of_bound() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
buf.slice(6);
}
#[test]
fn test_access_concurrently() {
let buffer = Buffer::from(vec![1, 2, 3, 4, 5]);
let buffer2 = buffer.clone();
assert_eq!([1, 2, 3, 4, 5], buffer.as_slice());
let buffer_copy = thread::spawn(move || {
// access buffer in another thread.
buffer
})
.join();
assert!(buffer_copy.is_ok());
assert_eq!(buffer2, buffer_copy.ok().unwrap());
}
macro_rules! check_as_typed_data {
($input: expr, $native_t: ty) => {{
let buffer = Buffer::from_slice_ref($input);
let slice: &[$native_t] = unsafe { buffer.typed_data::<$native_t>() };
assert_eq!($input, slice);
}};
}
#[test]
#[allow(clippy::float_cmp)]
fn test_as_typed_data() {
check_as_typed_data!(&[1i8, 3i8, 6i8], i8);
check_as_typed_data!(&[1u8, 3u8, 6u8], u8);
check_as_typed_data!(&[1i16, 3i16, 6i16], i16);
check_as_typed_data!(&[1i32, 3i32, 6i32], i32);
check_as_typed_data!(&[1i64, 3i64, 6i64], i64);
check_as_typed_data!(&[1u16, 3u16, 6u16], u16);
check_as_typed_data!(&[1u32, 3u32, 6u32], u32);
check_as_typed_data!(&[1u64, 3u64, 6u64], u64);
check_as_typed_data!(&[1f32, 3f32, 6f32], f32);
check_as_typed_data!(&[1f64, 3f64, 6f64], f64);
}
#[test]
fn test_count_bits() {
assert_eq!(0, Buffer::from(&[0b00000000]).count_set_bits());
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits());
assert_eq!(3, Buffer::from(&[0b00001101]).count_set_bits());
assert_eq!(6, Buffer::from(&[0b01001001, 0b01010010]).count_set_bits());
assert_eq!(16, Buffer::from(&[0b11111111, 0b11111111]).count_set_bits());
}
#[test]
fn test_count_bits_slice() {
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b00000000])
.slice(1)
.count_set_bits()
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111])
.slice(1)
.count_set_bits()
);
assert_eq!(
3,
Buffer::from(&[0b11111111, 0b11111111, 0b00001101])
.slice(2)
.count_set_bits()
);
assert_eq!(
6,
Buffer::from(&[0b11111111, 0b01001001, 0b01010010])
.slice(1)
.count_set_bits()
);
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111, 0b11111111, 0b11111111])
.slice(2)
.count_set_bits()
);
}
#[test]
fn test_count_bits_offset_slice() {
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 8));
assert_eq!(3, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 3));
assert_eq!(5, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 5));
assert_eq!(1, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 1));
assert_eq!(0, Buffer::from(&[0b11111111]).count_set_bits_offset(8, 0));
assert_eq!(2, Buffer::from(&[0b01010101]).count_set_bits_offset(0, 3));
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 16)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 10)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(3, 10)
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(8, 8)
);
assert_eq!(
5,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(11, 5)
);
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(16, 0)
);
assert_eq!(
2,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 5)
);
assert_eq!(
4,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 9)
);
}
}
| from_slice_ref | identifier_name |
immutable.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::fmt::Debug;
use std::iter::FromIterator;
use std::ptr::NonNull;
use std::sync::Arc;
use std::{convert::AsRef, usize};
use crate::util::bit_chunk_iterator::BitChunks;
use crate::{
bytes::{Bytes, Deallocation},
datatypes::ArrowNativeType,
ffi,
};
use super::ops::bitwise_unary_op_helper;
use super::MutableBuffer;
/// Buffer represents a contiguous memory region that can be shared with other buffers and across
/// thread boundaries.
#[derive(Clone, PartialEq, Debug)]
pub struct Buffer {
/// the internal byte buffer.
data: Arc<Bytes>,
/// The offset into the buffer.
offset: usize,
}
impl Buffer {
/// Auxiliary method to create a new Buffer
#[inline]
pub fn from_bytes(bytes: Bytes) -> Self {
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Initializes a [Buffer] from a slice of items.
pub fn from_slice_ref<U: ArrowNativeType, T: AsRef<[U]>>(items: &T) -> Self {
let slice = items.as_ref();
let capacity = slice.len() * std::mem::size_of::<U>();
let mut buffer = MutableBuffer::with_capacity(capacity);
buffer.extend_from_slice(slice);
buffer.into()
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` will free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `capacity` - Total allocated memory for the pointer `ptr`, in **bytes**
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes. If the `ptr` and `capacity` come from a `Buffer`, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: NonNull<u8>, len: usize, capacity: usize) -> Self {
assert!(len <= capacity);
Buffer::build_with_arguments(ptr, len, Deallocation::Native(capacity))
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` **does not** free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `data` - An [ffi::FFI_ArrowArray] with the data
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes and that the foreign deallocator frees the region.
pub unsafe fn from_unowned(
ptr: NonNull<u8>,
len: usize,
data: Arc<ffi::FFI_ArrowArray>,
) -> Self {
Buffer::build_with_arguments(ptr, len, Deallocation::Foreign(data))
}
/// Auxiliary method to create a new Buffer
unsafe fn build_with_arguments(
ptr: NonNull<u8>,
len: usize,
deallocation: Deallocation,
) -> Self {
let bytes = Bytes::new(ptr, len, deallocation);
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Returns the number of bytes in the buffer
pub fn len(&self) -> usize {
self.data.len() - self.offset
}
/// Returns the capacity of this buffer.
/// For externally owned buffers, this returns zero
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Returns whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.data.len() - self.offset == 0
}
/// Returns the byte slice stored in this buffer
pub fn as_slice(&self) -> &[u8] {
&self.data[self.offset..]
}
/// Returns a new [Buffer] that is a slice of this buffer starting at `offset`.
/// Doing so allows the same memory region to be shared between buffers.
/// # Panics
/// Panics iff `offset` is larger than `len`.
pub fn slice(&self, offset: usize) -> Self {
assert!(
offset <= self.len(),
"the offset of the new Buffer cannot exceed the existing length"
);
Self {
data: self.data.clone(),
offset: self.offset + offset,
}
}
/// Returns a pointer to the start of this buffer.
///
/// Note that this should be used cautiously, and the returned pointer should not be
/// stored anywhere, to avoid dangling pointers.
pub fn as_ptr(&self) -> *const u8 {
unsafe { self.data.ptr().as_ptr().add(self.offset) }
}
/// View buffer as typed slice.
///
/// # Safety
///
/// `ArrowNativeType` is public so that it can be used as a trait bound for other public
/// components, such as the `ToByteSlice` trait. However, this means that it can be
/// implemented by user defined types, which it is not intended for.
pub unsafe fn typed_data<T: ArrowNativeType + num::Num>(&self) -> &[T] |
/// Returns a slice of this buffer starting at a certain bit offset.
/// If the offset is byte-aligned the returned buffer is a shallow clone,
/// otherwise a new buffer is allocated and filled with a copy of the bits in the range.
pub fn bit_slice(&self, offset: usize, len: usize) -> Self {
if offset % 8 == 0 {
return self.slice(offset / 8);
}
bitwise_unary_op_helper(self, offset, len, |a| a)
}
/// Returns a `BitChunks` instance which can be used to iterate over this buffers bits
/// in larger chunks and starting at arbitrary bit offsets.
/// Note that both `offset` and `length` are measured in bits.
pub fn bit_chunks(&self, offset: usize, len: usize) -> BitChunks {
BitChunks::new(self.as_slice(), offset, len)
}
/// Returns the number of 1-bits in this buffer.
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}
/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
let chunks = self.bit_chunks(offset, len);
let mut count = chunks.iter().map(|c| c.count_ones() as usize).sum();
count += chunks.remainder_bits().count_ones() as usize;
count
}
}
/// Creating a `Buffer` instance by copying the memory from a `AsRef<[u8]>` into a newly
/// allocated memory region.
impl<T: AsRef<[u8]>> From<T> for Buffer {
fn from(p: T) -> Self {
// allocate aligned memory buffer
let slice = p.as_ref();
let len = slice.len();
let mut buffer = MutableBuffer::new(len);
buffer.extend_from_slice(slice);
buffer.into()
}
}
/// Creating a `Buffer` instance by storing the boolean values into the buffer
impl std::iter::FromIterator<bool> for Buffer {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = bool>,
{
MutableBuffer::from_iter(iter).into()
}
}
impl std::ops::Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
}
}
unsafe impl Sync for Buffer {}
unsafe impl Send for Buffer {}
impl From<MutableBuffer> for Buffer {
#[inline]
fn from(buffer: MutableBuffer) -> Self {
buffer.into_buffer()
}
}
impl Buffer {
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Example
/// ```
/// # use arrow::buffer::Buffer;
/// let v = vec![1u32];
/// let iter = v.iter().map(|x| x * 2);
/// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
/// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
/// ```
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
// This implementation is required for two reasons:
// 1. there is no trait `TrustedLen` in stable rust and therefore
// we can't specialize `extend` for `TrustedLen` like `Vec` does.
// 2. `from_trusted_len_iter` is faster.
#[inline]
pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
iterator: I,
) -> Self {
MutableBuffer::from_trusted_len_iter(iterator).into()
}
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
/// if any of the items of the iterator is an error.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
#[inline]
pub unsafe fn try_from_trusted_len_iter<
E,
T: ArrowNativeType,
I: Iterator<Item = std::result::Result<T, E>>,
>(
iterator: I,
) -> std::result::Result<Self, E> {
Ok(MutableBuffer::try_from_trusted_len_iter(iterator)?.into())
}
}
impl<T: ArrowNativeType> FromIterator<T> for Buffer {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut iterator = iter.into_iter();
let size = std::mem::size_of::<T>();
// first iteration, which will likely reserve sufficient space for the buffer.
let mut buffer = match iterator.next() {
None => MutableBuffer::new(0),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut buffer = MutableBuffer::new(lower.saturating_add(1) * size);
unsafe {
std::ptr::write(buffer.as_mut_ptr() as *mut T, element);
buffer.set_len(size);
}
buffer
}
};
buffer.extend_from_iter(iterator);
buffer.into()
}
}
#[cfg(test)]
mod tests {
use std::thread;
use super::*;
#[test]
fn test_buffer_data_equality() {
let buf1 = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(buf1, buf2);
// slice with same offset should still preserve equality
let buf3 = buf1.slice(2);
assert_ne!(buf1, buf3);
let buf4 = buf2.slice(2);
assert_eq!(buf3, buf4);
// Different capacities should still preserve equality
let mut buf2 = MutableBuffer::new(65);
buf2.extend_from_slice(&[0u8, 1, 2, 3, 4]);
let buf2 = buf2.into();
assert_eq!(buf1, buf2);
// unequal because of different elements
let buf2 = Buffer::from(&[0, 0, 2, 3, 4]);
assert_ne!(buf1, buf2);
// unequal because of different length
let buf2 = Buffer::from(&[0, 1, 2, 3]);
assert_ne!(buf1, buf2);
}
#[test]
fn test_from_raw_parts() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_from_vec() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_copy() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = buf;
assert_eq!(5, buf2.len());
assert_eq!(64, buf2.capacity());
assert!(!buf2.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf2.as_slice());
}
#[test]
fn test_slice() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
let buf2 = buf.slice(2);
assert_eq!([6, 8, 10], buf2.as_slice());
assert_eq!(3, buf2.len());
assert_eq!(unsafe { buf.as_ptr().offset(2) }, buf2.as_ptr());
let buf3 = buf2.slice(1);
assert_eq!([8, 10], buf3.as_slice());
assert_eq!(2, buf3.len());
assert_eq!(unsafe { buf.as_ptr().offset(3) }, buf3.as_ptr());
let buf4 = buf.slice(5);
let empty_slice: [u8; 0] = [];
assert_eq!(empty_slice, buf4.as_slice());
assert_eq!(0, buf4.len());
assert!(buf4.is_empty());
assert_eq!(buf2.slice(2).as_slice(), &[10]);
}
#[test]
#[should_panic(
expected = "the offset of the new Buffer cannot exceed the existing length"
)]
fn test_slice_offset_out_of_bound() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
buf.slice(6);
}
#[test]
fn test_access_concurrently() {
let buffer = Buffer::from(vec![1, 2, 3, 4, 5]);
let buffer2 = buffer.clone();
assert_eq!([1, 2, 3, 4, 5], buffer.as_slice());
let buffer_copy = thread::spawn(move || {
// access buffer in another thread.
buffer
})
.join();
assert!(buffer_copy.is_ok());
assert_eq!(buffer2, buffer_copy.ok().unwrap());
}
macro_rules! check_as_typed_data {
($input: expr, $native_t: ty) => {{
let buffer = Buffer::from_slice_ref($input);
let slice: &[$native_t] = unsafe { buffer.typed_data::<$native_t>() };
assert_eq!($input, slice);
}};
}
#[test]
#[allow(clippy::float_cmp)]
fn test_as_typed_data() {
check_as_typed_data!(&[1i8, 3i8, 6i8], i8);
check_as_typed_data!(&[1u8, 3u8, 6u8], u8);
check_as_typed_data!(&[1i16, 3i16, 6i16], i16);
check_as_typed_data!(&[1i32, 3i32, 6i32], i32);
check_as_typed_data!(&[1i64, 3i64, 6i64], i64);
check_as_typed_data!(&[1u16, 3u16, 6u16], u16);
check_as_typed_data!(&[1u32, 3u32, 6u32], u32);
check_as_typed_data!(&[1u64, 3u64, 6u64], u64);
check_as_typed_data!(&[1f32, 3f32, 6f32], f32);
check_as_typed_data!(&[1f64, 3f64, 6f64], f64);
}
#[test]
fn test_count_bits() {
assert_eq!(0, Buffer::from(&[0b00000000]).count_set_bits());
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits());
assert_eq!(3, Buffer::from(&[0b00001101]).count_set_bits());
assert_eq!(6, Buffer::from(&[0b01001001, 0b01010010]).count_set_bits());
assert_eq!(16, Buffer::from(&[0b11111111, 0b11111111]).count_set_bits());
}
#[test]
fn test_count_bits_slice() {
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b00000000])
.slice(1)
.count_set_bits()
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111])
.slice(1)
.count_set_bits()
);
assert_eq!(
3,
Buffer::from(&[0b11111111, 0b11111111, 0b00001101])
.slice(2)
.count_set_bits()
);
assert_eq!(
6,
Buffer::from(&[0b11111111, 0b01001001, 0b01010010])
.slice(1)
.count_set_bits()
);
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111, 0b11111111, 0b11111111])
.slice(2)
.count_set_bits()
);
}
#[test]
fn test_count_bits_offset_slice() {
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 8));
assert_eq!(3, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 3));
assert_eq!(5, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 5));
assert_eq!(1, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 1));
assert_eq!(0, Buffer::from(&[0b11111111]).count_set_bits_offset(8, 0));
assert_eq!(2, Buffer::from(&[0b01010101]).count_set_bits_offset(0, 3));
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 16)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 10)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(3, 10)
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(8, 8)
);
assert_eq!(
5,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(11, 5)
);
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(16, 0)
);
assert_eq!(
2,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 5)
);
assert_eq!(
4,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 9)
);
}
}
| {
// JUSTIFICATION
// Benefit
// Many of the buffers represent specific types, and consumers of `Buffer` often need to re-interpret them.
// Soundness
// * The pointer is non-null by construction
// * alignment asserted below.
let (prefix, offsets, suffix) = self.as_slice().align_to::<T>();
assert!(prefix.is_empty() && suffix.is_empty());
offsets
} | identifier_body |
immutable.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::fmt::Debug;
use std::iter::FromIterator;
use std::ptr::NonNull;
use std::sync::Arc;
use std::{convert::AsRef, usize};
use crate::util::bit_chunk_iterator::BitChunks;
use crate::{
bytes::{Bytes, Deallocation},
datatypes::ArrowNativeType,
ffi,
};
use super::ops::bitwise_unary_op_helper;
use super::MutableBuffer;
/// Buffer represents a contiguous memory region that can be shared with other buffers and across
/// thread boundaries.
#[derive(Clone, PartialEq, Debug)]
pub struct Buffer {
/// the internal byte buffer.
data: Arc<Bytes>,
/// The offset into the buffer.
offset: usize,
}
impl Buffer {
/// Auxiliary method to create a new Buffer
#[inline]
pub fn from_bytes(bytes: Bytes) -> Self {
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Initializes a [Buffer] from a slice of items.
pub fn from_slice_ref<U: ArrowNativeType, T: AsRef<[U]>>(items: &T) -> Self {
let slice = items.as_ref();
let capacity = slice.len() * std::mem::size_of::<U>();
let mut buffer = MutableBuffer::with_capacity(capacity);
buffer.extend_from_slice(slice);
buffer.into()
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` will free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `capacity` - Total allocated memory for the pointer `ptr`, in **bytes**
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes. If the `ptr` and `capacity` come from a `Buffer`, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: NonNull<u8>, len: usize, capacity: usize) -> Self {
assert!(len <= capacity);
Buffer::build_with_arguments(ptr, len, Deallocation::Native(capacity))
}
/// Creates a buffer from an existing memory region (must already be byte-aligned), this
/// `Buffer` **does not** free this piece of memory when dropped.
///
/// # Arguments
///
/// * `ptr` - Pointer to raw parts
/// * `len` - Length of raw parts in **bytes**
/// * `data` - An [ffi::FFI_ArrowArray] with the data
///
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is valid for `len`
/// bytes and that the foreign deallocator frees the region.
pub unsafe fn from_unowned(
ptr: NonNull<u8>,
len: usize,
data: Arc<ffi::FFI_ArrowArray>,
) -> Self {
Buffer::build_with_arguments(ptr, len, Deallocation::Foreign(data))
}
/// Auxiliary method to create a new Buffer
unsafe fn build_with_arguments(
ptr: NonNull<u8>,
len: usize,
deallocation: Deallocation,
) -> Self {
let bytes = Bytes::new(ptr, len, deallocation);
Buffer {
data: Arc::new(bytes),
offset: 0,
}
}
/// Returns the number of bytes in the buffer
pub fn len(&self) -> usize {
self.data.len() - self.offset
}
/// Returns the capacity of this buffer.
/// For externally owned buffers, this returns zero
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Returns whether the buffer is empty.
pub fn is_empty(&self) -> bool {
self.data.len() - self.offset == 0
}
/// Returns the byte slice stored in this buffer
pub fn as_slice(&self) -> &[u8] {
&self.data[self.offset..]
}
/// Returns a new [Buffer] that is a slice of this buffer starting at `offset`.
/// Doing so allows the same memory region to be shared between buffers.
/// # Panics
/// Panics iff `offset` is larger than `len`.
pub fn slice(&self, offset: usize) -> Self {
assert!(
offset <= self.len(),
"the offset of the new Buffer cannot exceed the existing length"
);
Self {
data: self.data.clone(),
offset: self.offset + offset,
}
}
/// Returns a pointer to the start of this buffer.
///
/// Note that this should be used cautiously, and the returned pointer should not be
/// stored anywhere, to avoid dangling pointers.
pub fn as_ptr(&self) -> *const u8 {
unsafe { self.data.ptr().as_ptr().add(self.offset) }
}
/// View buffer as typed slice.
///
/// # Safety
///
/// `ArrowNativeType` is public so that it can be used as a trait bound for other public
/// components, such as the `ToByteSlice` trait. However, this means that it can be
/// implemented by user defined types, which it is not intended for.
pub unsafe fn typed_data<T: ArrowNativeType + num::Num>(&self) -> &[T] {
// JUSTIFICATION
// Benefit
// Many of the buffers represent specific types, and consumers of `Buffer` often need to re-interpret them.
// Soundness
// * The pointer is non-null by construction
// * alignment asserted below.
let (prefix, offsets, suffix) = self.as_slice().align_to::<T>();
assert!(prefix.is_empty() && suffix.is_empty());
offsets
}
/// Returns a slice of this buffer starting at a certain bit offset.
/// If the offset is byte-aligned the returned buffer is a shallow clone,
/// otherwise a new buffer is allocated and filled with a copy of the bits in the range.
pub fn bit_slice(&self, offset: usize, len: usize) -> Self {
if offset % 8 == 0 {
return self.slice(offset / 8);
}
bitwise_unary_op_helper(self, offset, len, |a| a)
}
/// Returns a `BitChunks` instance which can be used to iterate over this buffers bits
/// in larger chunks and starting at arbitrary bit offsets.
/// Note that both `offset` and `length` are measured in bits.
pub fn bit_chunks(&self, offset: usize, len: usize) -> BitChunks {
BitChunks::new(self.as_slice(), offset, len)
}
/// Returns the number of 1-bits in this buffer.
pub fn count_set_bits(&self) -> usize {
let len_in_bits = self.len() * 8;
// self.offset is already taken into consideration by the bit_chunks implementation
self.count_set_bits_offset(0, len_in_bits)
}
/// Returns the number of 1-bits in this buffer, starting from `offset` with `length` bits
/// inspected. Note that both `offset` and `length` are measured in bits.
pub fn count_set_bits_offset(&self, offset: usize, len: usize) -> usize {
let chunks = self.bit_chunks(offset, len);
let mut count = chunks.iter().map(|c| c.count_ones() as usize).sum();
count += chunks.remainder_bits().count_ones() as usize;
count
}
}
/// Creating a `Buffer` instance by copying the memory from a `AsRef<[u8]>` into a newly
/// allocated memory region.
impl<T: AsRef<[u8]>> From<T> for Buffer {
fn from(p: T) -> Self {
// allocate aligned memory buffer
let slice = p.as_ref();
let len = slice.len();
let mut buffer = MutableBuffer::new(len);
buffer.extend_from_slice(slice);
buffer.into()
}
}
/// Creating a `Buffer` instance by storing the boolean values into the buffer
impl std::iter::FromIterator<bool> for Buffer {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = bool>,
{
MutableBuffer::from_iter(iter).into()
}
}
impl std::ops::Deref for Buffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len()) }
}
}
unsafe impl Sync for Buffer {}
unsafe impl Send for Buffer {}
impl From<MutableBuffer> for Buffer {
#[inline]
fn from(buffer: MutableBuffer) -> Self {
buffer.into_buffer()
}
}
impl Buffer {
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Example
/// ```
/// # use arrow::buffer::Buffer;
/// let v = vec![1u32];
/// let iter = v.iter().map(|x| x * 2);
/// let buffer = unsafe { Buffer::from_trusted_len_iter(iter) };
/// assert_eq!(buffer.len(), 4) // u32 has 4 bytes
/// ```
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
// This implementation is required for two reasons:
// 1. there is no trait `TrustedLen` in stable rust and therefore
// we can't specialize `extend` for `TrustedLen` like `Vec` does.
// 2. `from_trusted_len_iter` is faster.
#[inline]
pub unsafe fn from_trusted_len_iter<T: ArrowNativeType, I: Iterator<Item = T>>(
iterator: I,
) -> Self {
MutableBuffer::from_trusted_len_iter(iterator).into()
}
/// Creates a [`Buffer`] from an [`Iterator`] with a trusted (upper) length or errors
/// if any of the items of the iterator is an error.
/// Prefer this to `collect` whenever possible, as it is ~60% faster.
/// # Safety
/// This method assumes that the iterator's size is correct and is undefined behavior
/// to use it on an iterator that reports an incorrect length.
#[inline]
pub unsafe fn try_from_trusted_len_iter<
E,
T: ArrowNativeType,
I: Iterator<Item = std::result::Result<T, E>>,
>(
iterator: I,
) -> std::result::Result<Self, E> {
Ok(MutableBuffer::try_from_trusted_len_iter(iterator)?.into())
}
}
impl<T: ArrowNativeType> FromIterator<T> for Buffer {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut iterator = iter.into_iter();
let size = std::mem::size_of::<T>();
// first iteration, which will likely reserve sufficient space for the buffer.
let mut buffer = match iterator.next() {
None => MutableBuffer::new(0),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut buffer = MutableBuffer::new(lower.saturating_add(1) * size);
unsafe {
std::ptr::write(buffer.as_mut_ptr() as *mut T, element);
buffer.set_len(size);
}
buffer
}
};
buffer.extend_from_iter(iterator);
buffer.into()
}
}
#[cfg(test)]
mod tests {
use std::thread;
use super::*;
#[test]
fn test_buffer_data_equality() {
let buf1 = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(buf1, buf2);
// slice with same offset should still preserve equality
let buf3 = buf1.slice(2);
assert_ne!(buf1, buf3);
let buf4 = buf2.slice(2);
assert_eq!(buf3, buf4);
// Different capacities should still preserve equality
let mut buf2 = MutableBuffer::new(65);
buf2.extend_from_slice(&[0u8, 1, 2, 3, 4]);
let buf2 = buf2.into();
assert_eq!(buf1, buf2);
// unequal because of different elements
let buf2 = Buffer::from(&[0, 0, 2, 3, 4]);
assert_ne!(buf1, buf2);
// unequal because of different length
let buf2 = Buffer::from(&[0, 1, 2, 3]);
assert_ne!(buf1, buf2);
}
#[test]
fn test_from_raw_parts() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_from_vec() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
assert_eq!(5, buf.len());
assert!(!buf.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf.as_slice());
}
#[test]
fn test_copy() {
let buf = Buffer::from(&[0, 1, 2, 3, 4]);
let buf2 = buf;
assert_eq!(5, buf2.len());
assert_eq!(64, buf2.capacity());
assert!(!buf2.as_ptr().is_null());
assert_eq!([0, 1, 2, 3, 4], buf2.as_slice());
}
#[test]
fn test_slice() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
let buf2 = buf.slice(2);
assert_eq!([6, 8, 10], buf2.as_slice());
assert_eq!(3, buf2.len());
assert_eq!(unsafe { buf.as_ptr().offset(2) }, buf2.as_ptr());
let buf3 = buf2.slice(1);
assert_eq!([8, 10], buf3.as_slice());
assert_eq!(2, buf3.len());
assert_eq!(unsafe { buf.as_ptr().offset(3) }, buf3.as_ptr());
let buf4 = buf.slice(5);
let empty_slice: [u8; 0] = [];
assert_eq!(empty_slice, buf4.as_slice());
assert_eq!(0, buf4.len());
assert!(buf4.is_empty());
assert_eq!(buf2.slice(2).as_slice(), &[10]);
}
#[test]
#[should_panic(
expected = "the offset of the new Buffer cannot exceed the existing length"
)]
fn test_slice_offset_out_of_bound() {
let buf = Buffer::from(&[2, 4, 6, 8, 10]);
buf.slice(6);
}
#[test]
fn test_access_concurrently() {
let buffer = Buffer::from(vec![1, 2, 3, 4, 5]);
let buffer2 = buffer.clone();
assert_eq!([1, 2, 3, 4, 5], buffer.as_slice());
let buffer_copy = thread::spawn(move || {
// access buffer in another thread.
buffer
})
.join();
assert!(buffer_copy.is_ok()); | macro_rules! check_as_typed_data {
($input: expr, $native_t: ty) => {{
let buffer = Buffer::from_slice_ref($input);
let slice: &[$native_t] = unsafe { buffer.typed_data::<$native_t>() };
assert_eq!($input, slice);
}};
}
#[test]
#[allow(clippy::float_cmp)]
fn test_as_typed_data() {
check_as_typed_data!(&[1i8, 3i8, 6i8], i8);
check_as_typed_data!(&[1u8, 3u8, 6u8], u8);
check_as_typed_data!(&[1i16, 3i16, 6i16], i16);
check_as_typed_data!(&[1i32, 3i32, 6i32], i32);
check_as_typed_data!(&[1i64, 3i64, 6i64], i64);
check_as_typed_data!(&[1u16, 3u16, 6u16], u16);
check_as_typed_data!(&[1u32, 3u32, 6u32], u32);
check_as_typed_data!(&[1u64, 3u64, 6u64], u64);
check_as_typed_data!(&[1f32, 3f32, 6f32], f32);
check_as_typed_data!(&[1f64, 3f64, 6f64], f64);
}
#[test]
fn test_count_bits() {
assert_eq!(0, Buffer::from(&[0b00000000]).count_set_bits());
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits());
assert_eq!(3, Buffer::from(&[0b00001101]).count_set_bits());
assert_eq!(6, Buffer::from(&[0b01001001, 0b01010010]).count_set_bits());
assert_eq!(16, Buffer::from(&[0b11111111, 0b11111111]).count_set_bits());
}
#[test]
fn test_count_bits_slice() {
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b00000000])
.slice(1)
.count_set_bits()
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111])
.slice(1)
.count_set_bits()
);
assert_eq!(
3,
Buffer::from(&[0b11111111, 0b11111111, 0b00001101])
.slice(2)
.count_set_bits()
);
assert_eq!(
6,
Buffer::from(&[0b11111111, 0b01001001, 0b01010010])
.slice(1)
.count_set_bits()
);
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111, 0b11111111, 0b11111111])
.slice(2)
.count_set_bits()
);
}
#[test]
fn test_count_bits_offset_slice() {
assert_eq!(8, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 8));
assert_eq!(3, Buffer::from(&[0b11111111]).count_set_bits_offset(0, 3));
assert_eq!(5, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 5));
assert_eq!(1, Buffer::from(&[0b11111111]).count_set_bits_offset(3, 1));
assert_eq!(0, Buffer::from(&[0b11111111]).count_set_bits_offset(8, 0));
assert_eq!(2, Buffer::from(&[0b01010101]).count_set_bits_offset(0, 3));
assert_eq!(
16,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 16)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(0, 10)
);
assert_eq!(
10,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(3, 10)
);
assert_eq!(
8,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(8, 8)
);
assert_eq!(
5,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(11, 5)
);
assert_eq!(
0,
Buffer::from(&[0b11111111, 0b11111111]).count_set_bits_offset(16, 0)
);
assert_eq!(
2,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 5)
);
assert_eq!(
4,
Buffer::from(&[0b01101101, 0b10101010]).count_set_bits_offset(7, 9)
);
}
} | assert_eq!(buffer2, buffer_copy.ok().unwrap());
}
| random_line_split |
ZH.js | export default {
one_way: '单程',
round_trip: '往返',
flight_search_btn: '搜索',
Economy: '经济舱',
Business: '商务舱',
PremiumEconomy: '高端经济舱',
First: '头等舱',
BusinessAndFirst: '商务/头等舱',
departure: '去程',
departure_alias: '去程',
departure_title: '去程:',
return: '返程',
return_title: '返程:',
duration: '总时长',
stops: '中转',
btn_confirm: '确认',
btn_complete: '完成',
high_on_time_rate: '高准点率',
sort_by: '排序',
detail: '详情',
direct_flights_only: '只看直飞',
filter_by: '筛选',
multiple_airline: '多航空公司',
flight_summary_title: '选择供应商',
no_search_result_tip1: '哎呀,没有找到航班结果',
no_search_result_tip2: '请尝试更换筛选项',
no_search_result_tip3: '没有找到对应的返程结果',
no_search_result_tip4: '请尝试更换出行日期',
search_other_flight: '搜索其他航班',
day: '天',
new: '新',
old: '旧',
sec: '次',
include_tax: '含税',
rt_include_tax: '往返含税',
change_airport: '换机场',
share_flight: '共享',
filter_cancel: '清除',
filter_departure_time: '起飞时间',
filter_arrive_time: '到达时间',
filter_airline: '航空公司',
filter_airport: '起降机场',
filter_no_limit: '不限',
filter_dt1: '00:00-06:00',
filter_dt2: '06:00-12:00',
filter_dt3: '12:00-18:00',
filter_dt4: '18:00-24:00',
filter_at1: '00:00-06:00',
filter_at2: '06:00-12:00',
filter_at3: '12:00-18:00',
filter_at4: '18:00-24:00',
filter_departure_airport: '起飞机场',
filter_arrive_airport: '降落机场',
filter_transfer_airport: '中转机场',
filter_no_result: '没有合适的航班在此筛选范围内',
filter_clear: '清空筛选',
arrive_bottom_tips: '已展示所有航班',
timeline_show_all: '已显示全部',
timeline_lowest_price_ota: '最低价来自',
timeline_timeout_tips1: '航班价格可能发生变化,',
timeline_timeout_tips2: '将为你刷新以获取最新价格。',
timeline_change_date_limit: '目前仅支持前后三天日期切换哦,更多日期请返回首页选择。',
depdate_later_returndate: '去程日期不能晚于返程日期哦',
returndate_earer_depdate: '返程日期不能早于去程日期哦',
summary_detail_btn: '详情',
summary_hide_detail_btn: '收起详情',
summary_booking: '预订',
summary_rult_tip: '该行李/退改签规则需进入预订页查看',
summary_igola_booking: '辅助订购 ',
summary_ota_booking: '跳转官网订购 ', | summary_footer_tip: '飞呀辅助订购为你提供快捷预订通道,无需跳转到第三方平台。但出票和退改签服务仍由第三方机票服务平台提供。',
summary_no_result: '暂无平台报价',
summary_research: '重新搜索',
summary_timeout_tips1: '航班价格可能发生变化,将为你刷新',
summary_timeout_tips2: '以获取最新价格',
summary_quick_login: '动态码登录',
summary_quick_login_tip: '下单前先花10秒登录账号哦',
summary_other_login: '其他登录方式',
summary_setting_password: '设置密码',
summary_setting_password_tip: '首次登录设置个密码吧',
summary_register_tip: '点击完成表示阅读并同意',
summary_third_booking: '跳转第三方订购',
rule_tip_one: '行李/ 退改签规则>',
rule_tip_two: '航司直营 公正退改',
ota_redirect_tip: '以下供应商需跳转至官网预订',
member_quick_login: '验证码登录',
member_login: '密码登录',
phone: '手机号码',
phone_email: '手机号码/电子邮箱',
password: '密码',
forget_password: '忘记密码',
validation_code: '验证码',
validation_code_phone: '短信验证码',
get_validation_code: '获取验证码',
resend_validation_code: '重新发送',
other_ways: '第三方登录',
login: '登录',
go_login: '去登录',
register: '注册',
sign_up_tips: '没有账号?注册一个',
sign_in_tips: '已有账号?直接登录',
sign_up_have_account: '该账号已注册,去登录 >',
sign_in_account_lock: '帐号异常,请联系客服',
sign_in_account_wrong: '该帐号无效,请重新输入',
sign_in_not_phone: '该号码无效,请重新输入',
invalid_phone_err: '该号码已被绑定',
sign_in_not_account: '该账号还未注册,去注册 >',
sign_in_not_password: '您还未设置密码',
sign_in_code_wrong: '验证码错误',
sign_in_password_wrong: '密码错误',
account_manage_auth_reach_limit: '当天发送次数已达上限,请明天再试',
member_timeout: '请求超时,请稍候再试!',
server_error: '服务错误',
repeat_error: '请求过于频繁',
set_pwd_placeholder: '请设置8-16位密码',
pwd_warning: '密码为8-16个字符(不包含空格)',
pwd_same_warning: '新密码不能与旧密码相同哦!',
pwd_error: '密码不正确',
set_new_pwd_error: '您已设置密码',
no_pwd_tip: '该账号还没设置密码',
pwd_invalid_warning: '密码不符合规范',
right_pre_tips: '点击注册即同意',
right_flya: '《注册协议》',
term_service: '服务条款',
reset_pwd_not_account: '账号错误或未注册',
reset_pwd_success: '新密码设置成功',
step_next: '下一步',
set_new_pwd: '设置新密码',
set_new_pwd_placeholder: '请设置8-16位新密码',
old_pwd_check: '密码验证',
old_pwd_placeholder: '请输入旧密码',
submit: '提交',
member_relogin_tip: '您的账号信息已过期,请重新登录',
member_signin_tip: '登录后可查看订单和优惠券信息',
more_title: '更多',
version: '当前版本',
about_igola: '关于我们',
feedback: '意见反馈',
contact_customer_service: '联系客服',
customer_service_choose_tip: '请选择客服联系方式',
edit_nickName: '编辑昵称',
nickName_err: '长度为1-16个字符,且不支持特殊符号或空格!',
coupons: '优惠劵',
flya_zh: '飞呀',
flya_en: 'Flya',
flya_design: 'Designed by iGola',
copyright: 'Copyright © 2014-2017',
igola_company_name: '广州市骑鹅游信息技术咨询服务有限公司',
tab_all_order: '全部订单',
tab_pay_order: '待支付',
tab_unused_order: '待出行',
tab_refund_order: '退改签',
order_not_paid: '待支付',
order_paid: '支付成功',
order_expired: '订单过期',
order_booking: '出票中',
order_pending: '出票中',
order_success: '出票成功',
order_failed: '出票失败',
order_cancelled: '已取消',
order_refunding: '退款中',
order_refunded: '已退款',
btn_search_flights: '搜索机票',
btn_before_order: '一年前订单',
order_details_title: '订单详情',
order_details_to_pay1: '订单已生成,请在',
order_details_to_pay2: '内支付',
order_details_not_paid: '未支付',
order_details_booking: '支付成功,等待出票',
order_details_success: '出票成功',
order_details_failed: '出票失败',
order_details_expired: '订单过期',
order_details_pay_desc1: '由供应商/航司直接出票;',
order_details_pay_desc2: '支付后最快2小时内出票,最终预订成功需以出票成功为准;',
order_details_pay_desc3: '若没成功出票,预付款项将全额退还。',
order_details_booking_desc1: '您的订单最快在2小时内出票,出票成功后,您将会收到短信和邮件通知,请留意。',
order_details_booking_desc2: '若您预订的供应商出票失败,igola会自动匹配其他优质供应商为您出票,可能存在一定差价。',
order_details_booking_desc3: '若仍无法出票成功,客服人员会联系您,及时处理退款事宜。',
order_details_failed_desc: '客服将在24小时内与您联系并处理退款,如没有收到客服电话,请主动联系客服。',
order_details_orderId: '订单号:',
order_details_igola_Id: 'iGola订单号:',
order_details_orderTime: '下单时间:',
order_details_seat: '订座记录:',
order_details_price: '票价',
order_details_tax: '税费',
order_details_baggage: '行李费',
order_details_insurances: '保险费',
order_details_discount: '优惠',
order_details_total_price: '总价',
order_details_total: '总额:',
order_deatils_pay_btn: '去支付',
order_deatils_again_btn: '再订一张',
order_deatils_change_btn: '辅助改签',
order_deatils_refund_btn: '辅助退票',
order_detail_change_ticket: '请联系客服辅助改签',
order_detail_refund_ticket: '请联系客服辅助退票',
order_detail_online_service: '在线客服',
order_detail_phone_service: '热线电话',
order_detail_change_ticket_record: '改签记录',
order_passengers_title: '乘机人',
order_passengerInfo_title: '乘机人信息',
order_passengers_ticketNo: '票号:',
order_contact_title: '联系人',
order_suppliers_title: '供应商',
passengerDetails_cardType: '证件类型',
passengerDetails_cardNum: '证件号码',
passengerDetails_cardExpired: '证件有效期',
passengerDetails_issueAt: '签发地',
passenger_lastName: '姓',
passenger_firstName: '名',
passenger_birthday: '出生日期',
passenger_nationality: '国籍',
passenger_gender: '性别',
passenger_firstName_placeholder_ZH: '请填写中文名字',
passenger_lastName_placeholder_ZH: '请填写中文姓氏',
passenger_firstName_placeholder_EN: '如张岩应填 YAN',
passenger_lastName_placeholder_EN: '如张岩应填 ZHANG',
flights_detail_tip: '*航班起降均为当地时间',
profile_title: '个人资料',
avatar_title: '头像',
nickname_title: '昵称',
nickname_tip: '起个名字吧',
phone_title: '手机号码',
phone_bind: '绑定手机',
phone_change: '更换手机',
email_title: '邮箱',
pwd_title: '密码',
pwd_change: '修改密码',
pwd_change_success: '密码修改成功',
pwd_set: '设置密码',
pwd_set_success: '密码设置成功',
other_account_title: '第三方账号',
other_account_tip: '绑定后,下次可用第三方账号快速登录',
wechat: '微信',
qq: 'QQ',
bind_tip: '去绑定',
logout_title: '退出登录',
logout_confirm: '确定退出登录?',
take_picture: '照相',
pick_picture: '从相册选取',
login_success: '登录成功',
login_cancel: '登录已取消',
login_failed: '登录失败',
not_available_wechat: '无法正常使用微信(微信未安装)',
not_available_qq: '无法正常使用QQ(QQ未安装)',
update_success: '修改成功',
update_failed: '修改失败',
colon: ':',
copy: '复制',
copied: '已复制',
change_ticket: '改签记录',
change_ticket_status: '目标改签航班',
change_ticket_passenger: '乘机人',
change_ticket_process_tips: '已提交至供应商审核, 改签结果以短信确认为准。',
change_ticket_fee: '改签费',
change_ticket_upgrade_fee: '升舱费',
change_ticket_total_fee: '总额',
change_ticket_fee_is: '改签费为',
change_ticket_cancel: '改签失败',
change_ticket_refunding: '改签失败',
change_ticket_changing: '待审核',
change_ticket_changed: '改签完成',
change_ticket_has_cancel: '已取消',
change_ticket_has_pay: '已支付',
change_ticket_has_complete: '已完成',
change_ticket_change_support_ticket: '已支付',
change_ticket_difference: '已补差价',
change_ticket_counter_tip1: '请在',
change_ticket_counter_tip2: '内完成支付',
change_ticket_counter_tip3: '否则将自动取消改签申请',
change_ticket_pay_total_fee: '应付总额',
change_ticket_pay_btn: '去支付',
change_ticket_cancel_btn: '取消',
change_ticket_pay_history: '支付历史',
change_ticket_is_cancel: '是否确定取消本次改签?',
change_ticket_is_think_btn: '再考虑下',
change_ticket_is_cancel_btn: '确定取消',
change_ticket_check_reason: '查看原因',
change_ticket_ok: '我知道了',
depart_abbr: '去',
return_abbr: '返',
list_refresh_pull: '下拉可以刷新',
list_refresh_release: '释放立即更新',
list_refresh_loding: '努力加载中...',
list_refresh_done: '刷新成功',
list_loading: '正在加载...',
list_loading_no_more: '没有更多了...',
list_no_order: '当前没有订单',
list_no_order_tip: '快来开启新的旅程!',
clear: '清除',
confirm: '确定',
MON: '一',
TUE: '二',
WED: '三',
THU: '四',
FRI: '五',
SAT: '六',
SUN: '日',
departure_time: '出发时间',
where_to_go: '想去哪里?',
where_from: '从哪儿出发?',
please_enter_city: '请输入城市/机场名或三字码',
mainland: '国内',
international: '国际·港澳台',
history_search: '附近/最近搜索',
hotCity: '热门城市',
city: '城市',
search_more_city_tips: '更多城市请搜索查询',
// booking
generateFormErr: '您搜索的航班结果已过期,部分航班价格可能发生改变,请点击刷新获取最新的结果。',
detail_and_ticket_policy: '行李/退改签规则',
buy_limit: '购买限制',
change_conditions: '改签条件',
refund_conditions: '退票条件',
baggage_restrictions: '行李限制',
other_conditions: '其他说明',
ticket_policy: '退改签规则',
add_passengers: '添加乘机人',
totalOf: '余票{num}张',
contacts: '联系人',
edit: '编辑',
compatible_coupons: '可用优惠',
i_readed: '我已阅读并同意',
igola_information: '预订须知',
fares: '票价',
taxes: '税费',
adults: '成人',
children: '儿童',
infants: '婴儿',
total_price: '总价',
yuan: '{num}元',
details: '明细',
book: '预订',
price_updating: '正在获取最新价格',
unavailable: '无可用',
booking_igola_booking: '辅助\n订购',
booking_ota_booking: '跳转\n订购',
booking_ticket_limit_title: '注意:请确认购买资格,否则无法出票',
booking_confirm_buy: '确认购买',
booking_giveup_buy: '放弃购买',
booking_no_supplier: '抱歉,该航班余票不足,\n请重新搜索预订。',
booking_totalBaggageFee: '行李费',
booking_loading: '正在提交订单...',
booking_price_float: '机票价格发生了变动,请返回重新确认。',
booking_no_seat: '你来晚了,该舱位的机票已售完,请重新选择',
booking_timeout: '连接超时,请重试。',
booking_other_tip: '机票价格已发生变动,请重新选择。',
booking_session_expired: '价格已过期',
ticket_rule_null: '按航司客规为准。',
// 联系人
choose_Contacts: '选择联系人',
add_Contacts: '新增联系人',
edit_Contacts: '编辑联系人',
save: '保存',
chinese_or_english: '中文或英文',
lastname: '姓',
firstname: '名',
email: '邮箱',
get_msg: '用于接收行程单',
input_cellphone: '输入手机号码',
i_wrong: '我点错了',
sure_delete: '确定删除',
delete_contacts_text: '确定删除该联系人?',
set_default_contact_tips: '设置默认联系人可在预订时自动添加该联系人',
default_contact: '默认联系人',
chinese_mainland: '中国大陆',
primary: '默认',
please_input: '请填写',
please_select: '请选择',
lastname_err: '姓氏不支持数字、空格和特殊符号',
firstname_err: '名字不支持数字、空格和特殊符号',
mobile_err: '请输入11位有效号码',
email_err: '请输入正确的邮箱地址',
mobile: '手机',
sure_back_txt: '确定不保存退出吗?',
sure_back: '确定退出',
timeline_timeout: '您搜索的航班结果已过期,价格可能发生变化,请刷新以获取最新结果',
i_got_it: '我知道了',
booking_bottom_tips: '飞呀辅助订购为你提供快捷预订通道,无需跳转到第三方平台。但出票和退改签服务仍由第三方机票服务平台提供。',
verify_timeout_tips: '验价超时,请重试',
program_error_tips: '该航班价格已经售完,\n请尝试现在其他平台',
contact_exist_error_tips: '该联系人已存在',
try_again: '重试',
return_back: '返回',
ticket_sellout: '哎哟,票已卖光,\n看看其他供应商吧!',
// 优惠券
coupon_coupon: '优惠券',
coupon_discard: '不使用优惠券',
coupon_use_now: '立即使用 >',
coupon_effect_date: '有效期',
coupon_rules: '使用规则',
coupon_null: '当前没有优惠券',
coupon_tips_title: 'Tips',
coupon_tips: '听说“iGola骑鹅旅行”微信公众号经常发放优惠券哦!',
coupon_ready: '可使用',
coupon_locked: '已锁定',
coupon_used: '已使用',
coupon_expired: '已过期',
// 乘机人
passenger_add_passenger: '添加乘机人',
passenger_edit_passenger: '编辑乘机人',
passenger_choose_passenger: '选择乘机人',
passenger_credentials_type: '证件类型',
passenger_credentials_num: '证件号码',
passenger_credentials_num_placeholder: '输入护照号码',
passenger_credentials_expiredAt: '证件有效期',
passenger_credentials_issueAt: '签发地',
passport: '护照',
ID_card: '身份证',
HK_Macau_Entry_Exit_Permit: '港澳通行证',
Exit_Entry_Permit_Republic_of_China: '台湾通行证',
Mainland_travel_permit_for_Taiwan_residents: '台胞证',
reentry_permit: '回乡证',
PP: '护照',
ID: '身份证',
GA: '港澳通行证',
TW: '台湾通行证',
TB: '台胞证',
HX: '回乡证',
passenger_notify: '*为了您能顺利出行,请确保出行日期至少比证件有效期早6个月',
passenger_base_info: '乘机人信息',
passenger_add_credentials: '添加证件',
passenger_lastname_en: '英文姓',
passenger_firstname_en: '英文名',
passenger_lastname_zh: '中文姓',
passenger_firstname_zh: '中文名',
passenger_lastname_placeholder: '如:张岩应填ZHANG',
passenger_firstname_placeholder: '如:张岩应填YAN',
passenger_birthday: '出生日期',
passenger_nationality: '国籍',
passenger_gender: '性别',
passenger_in_person: '这是我本人',
add_passengers: '新增乘机人',
passenger_leftticket: '余票{num}张,最多选{num}人',
please_enter_num: '请输入{type}号码',
male: '男',
female: '女',
passenger_no_credential_err: '请至少添加1个证件',
ID_card_err: '请输入18位身份证号码',
cardNum_err: '请输入5-15位{type}号码',
name_err: '{type}不能含有数字或特殊字符',
please_input_cardNum: '请输入证件号码',
please_select_expiredAt: '请选择证件有效期',
please_select_issueAt: '请选择签发地',
please_input_lastName_EN: '请输入英文姓',
please_input_firstName_EN: '请输入英文名',
please_input_lastName_ZH: '请输入中文姓',
please_input_firstName_ZH: '请输入中文名',
please_select_birthday: '请选择出生日期',
please_select_nationality: '请选择国籍',
please_select_gender: '请选择性别',
passenger_primary: '本人',
delete_passengers_text: '确定删除该乘机人?',
passengers_set_primary_text: '将乘机人“{name}”设置为本人?',
btn_cancel: '取消',
passengers_I: '我',
passengers_I_msg: '关联我的信息,订票更方便',
passengers_btn_link: '关联',
passengers_btn_link_tips: '请关联或手动添加1位乘机人,设置为我的信息。',
passengers_onlyChild_tips: '暂不支持儿童单独购票,请至少添加一个成人。',
passengers_tooManyChild_tips: '一个成人最多携带两个儿童购票,请确认。',
passengers_sameCard_tips: '不能提交相同的乘机人,请确认。',
passengers_choose_zero_passenger_err: '请至少添加1个乘机人',
} | summary_price: '各大平台实时价格', | random_line_split |
surface.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of cross-process surfaces for Linux. This uses X pixmaps.
#![allow(non_snake_case)]
use texturegl::Texture;
use geom::size::Size2D;
use libc::{c_char, c_int, c_uint, c_void};
use glx;
use gleam::gl;
use skia::{SkiaSkNativeSharedGLContextRef, SkiaSkNativeSharedGLContextStealSurface};
use std::ascii::OwnedAsciiExt;
use std::ffi::{CString, CStr};
use std::mem;
use std::ptr;
use std::str;
use xlib::{Display, Pixmap, XCreateGC, XCreateImage, XCreatePixmap, XDefaultScreen};
use xlib::{XDisplayString, XFree, XFreePixmap, XGetGeometry, XOpenDisplay, XPutImage, XRootWindow};
use xlib::{XVisualInfo, ZPixmap};
/// The display and visual info. This is needed in order to upload on the painting side. This
/// holds a weak reference to the display and will not close it when done.
///
/// FIXME(pcwalton): Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if !NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void != ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn | (&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety?
unsafe {
let pixmap_attributes = [
glx::TEXTURE_TARGET_EXT as i32, glx::TEXTURE_2D_EXT as i32,
glx::TEXTURE_FORMAT_EXT as i32, glx::TEXTURE_FORMAT_RGBA_EXT as i32,
0
];
let glx_display = mem::transmute(native_context.display);
let glx_pixmap = glx::CreatePixmap(glx_display,
native_context.framebuffer_configuration.expect(
"GLX 1.3 should have a framebuffer_configuration"),
self.pixmap,
pixmap_attributes.as_ptr());
let glXBindTexImageEXT: extern "C" fn(*mut Display, glx::types::GLXDrawable, c_int, *mut c_int) =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXBindTexImageEXT\x00".as_bytes()[0])));
assert!(glXBindTexImageEXT as *mut c_void != ptr::null_mut());
let _bound = texture.bind();
glXBindTexImageEXT(native_context.display,
mem::transmute(glx_pixmap),
glx::FRONT_EXT as i32,
ptr::null_mut());
assert_eq!(gl::GetError(), gl::NO_ERROR);
// FIXME(pcwalton): Recycle these for speed?
glx::DestroyPixmap(glx_display, glx_pixmap);
}
}
/// This may only be called on the painting side.
pub fn upload(&mut self, graphics_context: &NativePaintingGraphicsContext, data: &[u8]) {
unsafe {
// Ensure that we're running on the render task. Take the display.
let pixmap = self.pixmap;
// Figure out the width, height, and depth of the pixmap.
let mut root_window = 0;
let mut x = 0;
let mut y = 0;
let mut width = 0;
let mut height = 0;
let mut border_width = 0;
let mut depth = 0;
let _ = XGetGeometry(graphics_context.display,
mem::transmute(pixmap),
&mut root_window,
&mut x,
&mut y,
&mut width,
&mut height,
&mut border_width,
&mut depth);
// Create the image.
let image = XCreateImage(graphics_context.display,
(*graphics_context.visual_info).visual,
depth,
ZPixmap,
0,
mem::transmute(&data[0]),
width as c_uint,
height as c_uint,
32,
0);
// Create the X graphics context.
let gc = XCreateGC(graphics_context.display, pixmap, 0, ptr::null_mut());
// Draw the image.
let _ = XPutImage(graphics_context.display,
pixmap,
gc,
image,
0,
0,
0,
0,
width,
height);
}
}
pub fn get_id(&self) -> isize {
self.pixmap as isize
}
pub fn destroy(&mut self, graphics_context: &NativePaintingGraphicsContext) {
unsafe {
assert!(self.pixmap != 0);
XFreePixmap(graphics_context.display, self.pixmap);
self.mark_wont_leak()
}
}
pub fn mark_will_leak(&mut self) {
self.will_leak = true;
}
pub fn mark_wont_leak(&mut self) {
self.will_leak = false;
}
}
| bind_to_texture | identifier_name |
surface.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of cross-process surfaces for Linux. This uses X pixmaps.
#![allow(non_snake_case)]
use texturegl::Texture;
use geom::size::Size2D;
use libc::{c_char, c_int, c_uint, c_void};
use glx;
use gleam::gl;
use skia::{SkiaSkNativeSharedGLContextRef, SkiaSkNativeSharedGLContextStealSurface};
use std::ascii::OwnedAsciiExt;
use std::ffi::{CString, CStr};
use std::mem;
use std::ptr;
use std::str;
use xlib::{Display, Pixmap, XCreateGC, XCreateImage, XCreatePixmap, XDefaultScreen};
use xlib::{XDisplayString, XFree, XFreePixmap, XGetGeometry, XOpenDisplay, XPutImage, XRootWindow};
use xlib::{XVisualInfo, ZPixmap};
/// The display and visual info. This is needed in order to upload on the painting side. This
/// holds a weak reference to the display and will not close it when done.
///
/// FIXME(pcwalton): Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if !NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void != ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
} | /// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn bind_to_texture(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety?
unsafe {
let pixmap_attributes = [
glx::TEXTURE_TARGET_EXT as i32, glx::TEXTURE_2D_EXT as i32,
glx::TEXTURE_FORMAT_EXT as i32, glx::TEXTURE_FORMAT_RGBA_EXT as i32,
0
];
let glx_display = mem::transmute(native_context.display);
let glx_pixmap = glx::CreatePixmap(glx_display,
native_context.framebuffer_configuration.expect(
"GLX 1.3 should have a framebuffer_configuration"),
self.pixmap,
pixmap_attributes.as_ptr());
let glXBindTexImageEXT: extern "C" fn(*mut Display, glx::types::GLXDrawable, c_int, *mut c_int) =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXBindTexImageEXT\x00".as_bytes()[0])));
assert!(glXBindTexImageEXT as *mut c_void != ptr::null_mut());
let _bound = texture.bind();
glXBindTexImageEXT(native_context.display,
mem::transmute(glx_pixmap),
glx::FRONT_EXT as i32,
ptr::null_mut());
assert_eq!(gl::GetError(), gl::NO_ERROR);
// FIXME(pcwalton): Recycle these for speed?
glx::DestroyPixmap(glx_display, glx_pixmap);
}
}
/// This may only be called on the painting side.
pub fn upload(&mut self, graphics_context: &NativePaintingGraphicsContext, data: &[u8]) {
unsafe {
// Ensure that we're running on the render task. Take the display.
let pixmap = self.pixmap;
// Figure out the width, height, and depth of the pixmap.
let mut root_window = 0;
let mut x = 0;
let mut y = 0;
let mut width = 0;
let mut height = 0;
let mut border_width = 0;
let mut depth = 0;
let _ = XGetGeometry(graphics_context.display,
mem::transmute(pixmap),
&mut root_window,
&mut x,
&mut y,
&mut width,
&mut height,
&mut border_width,
&mut depth);
// Create the image.
let image = XCreateImage(graphics_context.display,
(*graphics_context.visual_info).visual,
depth,
ZPixmap,
0,
mem::transmute(&data[0]),
width as c_uint,
height as c_uint,
32,
0);
// Create the X graphics context.
let gc = XCreateGC(graphics_context.display, pixmap, 0, ptr::null_mut());
// Draw the image.
let _ = XPutImage(graphics_context.display,
pixmap,
gc,
image,
0,
0,
0,
0,
width,
height);
}
}
pub fn get_id(&self) -> isize {
self.pixmap as isize
}
pub fn destroy(&mut self, graphics_context: &NativePaintingGraphicsContext) {
unsafe {
assert!(self.pixmap != 0);
XFreePixmap(graphics_context.display, self.pixmap);
self.mark_wont_leak()
}
}
pub fn mark_will_leak(&mut self) {
self.will_leak = true;
}
pub fn mark_wont_leak(&mut self) {
self.will_leak = false;
}
} | unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata { | random_line_split |
surface.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of cross-process surfaces for Linux. This uses X pixmaps.
#![allow(non_snake_case)]
use texturegl::Texture;
use geom::size::Size2D;
use libc::{c_char, c_int, c_uint, c_void};
use glx;
use gleam::gl;
use skia::{SkiaSkNativeSharedGLContextRef, SkiaSkNativeSharedGLContextStealSurface};
use std::ascii::OwnedAsciiExt;
use std::ffi::{CString, CStr};
use std::mem;
use std::ptr;
use std::str;
use xlib::{Display, Pixmap, XCreateGC, XCreateImage, XCreatePixmap, XDefaultScreen};
use xlib::{XDisplayString, XFree, XFreePixmap, XGetGeometry, XOpenDisplay, XPutImage, XRootWindow};
use xlib::{XVisualInfo, ZPixmap};
/// The display and visual info. This is needed in order to upload on the painting side. This
/// holds a weak reference to the display and will not close it when done.
///
/// FIXME(pcwalton): Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if !NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void != ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
}
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn bind_to_texture(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety?
unsafe {
let pixmap_attributes = [
glx::TEXTURE_TARGET_EXT as i32, glx::TEXTURE_2D_EXT as i32,
glx::TEXTURE_FORMAT_EXT as i32, glx::TEXTURE_FORMAT_RGBA_EXT as i32,
0
];
let glx_display = mem::transmute(native_context.display);
let glx_pixmap = glx::CreatePixmap(glx_display,
native_context.framebuffer_configuration.expect(
"GLX 1.3 should have a framebuffer_configuration"),
self.pixmap,
pixmap_attributes.as_ptr());
let glXBindTexImageEXT: extern "C" fn(*mut Display, glx::types::GLXDrawable, c_int, *mut c_int) =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXBindTexImageEXT\x00".as_bytes()[0])));
assert!(glXBindTexImageEXT as *mut c_void != ptr::null_mut());
let _bound = texture.bind();
glXBindTexImageEXT(native_context.display,
mem::transmute(glx_pixmap),
glx::FRONT_EXT as i32,
ptr::null_mut());
assert_eq!(gl::GetError(), gl::NO_ERROR);
// FIXME(pcwalton): Recycle these for speed?
glx::DestroyPixmap(glx_display, glx_pixmap);
}
}
/// This may only be called on the painting side.
pub fn upload(&mut self, graphics_context: &NativePaintingGraphicsContext, data: &[u8]) {
unsafe {
// Ensure that we're running on the render task. Take the display.
let pixmap = self.pixmap;
// Figure out the width, height, and depth of the pixmap.
let mut root_window = 0;
let mut x = 0;
let mut y = 0;
let mut width = 0;
let mut height = 0;
let mut border_width = 0;
let mut depth = 0;
let _ = XGetGeometry(graphics_context.display,
mem::transmute(pixmap),
&mut root_window,
&mut x,
&mut y,
&mut width,
&mut height,
&mut border_width,
&mut depth);
// Create the image.
let image = XCreateImage(graphics_context.display,
(*graphics_context.visual_info).visual,
depth,
ZPixmap,
0,
mem::transmute(&data[0]),
width as c_uint,
height as c_uint,
32,
0);
// Create the X graphics context.
let gc = XCreateGC(graphics_context.display, pixmap, 0, ptr::null_mut());
// Draw the image.
let _ = XPutImage(graphics_context.display,
pixmap,
gc,
image,
0,
0,
0,
0,
width,
height);
}
}
pub fn get_id(&self) -> isize {
self.pixmap as isize
}
pub fn destroy(&mut self, graphics_context: &NativePaintingGraphicsContext) {
unsafe {
assert!(self.pixmap != 0);
XFreePixmap(graphics_context.display, self.pixmap);
self.mark_wont_leak()
}
}
pub fn mark_will_leak(&mut self) |
pub fn mark_wont_leak(&mut self) {
self.will_leak = false;
}
}
| {
self.will_leak = true;
} | identifier_body |
surface.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Implementation of cross-process surfaces for Linux. This uses X pixmaps.
#![allow(non_snake_case)]
use texturegl::Texture;
use geom::size::Size2D;
use libc::{c_char, c_int, c_uint, c_void};
use glx;
use gleam::gl;
use skia::{SkiaSkNativeSharedGLContextRef, SkiaSkNativeSharedGLContextStealSurface};
use std::ascii::OwnedAsciiExt;
use std::ffi::{CString, CStr};
use std::mem;
use std::ptr;
use std::str;
use xlib::{Display, Pixmap, XCreateGC, XCreateImage, XCreatePixmap, XDefaultScreen};
use xlib::{XDisplayString, XFree, XFreePixmap, XGetGeometry, XOpenDisplay, XPutImage, XRootWindow};
use xlib::{XVisualInfo, ZPixmap};
/// The display and visual info. This is needed in order to upload on the painting side. This
/// holds a weak reference to the display and will not close it when done.
///
/// FIXME(pcwalton): Mark nonsendable.
#[allow(missing_copy_implementations)]
pub struct NativePaintingGraphicsContext {
pub display: *mut Display,
visual_info: *mut XVisualInfo,
}
impl NativePaintingGraphicsContext {
pub fn from_metadata(metadata: &NativeGraphicsMetadata) -> NativePaintingGraphicsContext {
// FIXME(pcwalton): It would be more robust to actually have the compositor pass the
// visual.
let (compositor_visual_info, _) =
NativeCompositingGraphicsContext::compositor_visual_info(metadata.display);
NativePaintingGraphicsContext {
display: metadata.display,
visual_info: compositor_visual_info,
}
}
}
/// The display, visual info, and framebuffer configuration. This is needed in order to bind to a
/// texture on the compositor side. This holds only a *weak* reference to the display and does not
/// close it.
///
/// FIXME(pcwalton): Unchecked weak references are bad and can violate memory safety. This is hard
/// to fix because the Display is given to us by the native windowing system, but we should fix it
/// someday.
///
/// FIXME(pcwalton): Mark nonsendable.
#[derive(Copy, Clone)]
pub struct NativeCompositingGraphicsContext {
display: *mut Display,
framebuffer_configuration: Option<glx::types::GLXFBConfig>,
}
impl NativeCompositingGraphicsContext {
/// Chooses the compositor visual info using the same algorithm that the compositor uses.
///
/// FIXME(pcwalton): It would be more robust to actually have the compositor pass the visual.
fn compositor_visual_info(display: *mut Display) -> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
// If display is null, we'll assume we are going to be rendering
// in headless mode without X running.
if display == ptr::null_mut() {
return (ptr::null_mut(), None);
}
unsafe {
let fbconfig_attributes = [
glx::DOUBLEBUFFER as i32, 0,
glx::DRAWABLE_TYPE as i32, glx::PIXMAP_BIT as i32 | glx::WINDOW_BIT as i32,
glx::BIND_TO_TEXTURE_RGBA_EXT as i32, 1,
glx::RENDER_TYPE as i32, glx::RGBA_BIT as i32,
glx::ALPHA_SIZE as i32, 8,
0
];
let screen = XDefaultScreen(display);
let mut number_of_configs = 0;
let configs = glx::ChooseFBConfig(mem::transmute(display),
screen,
fbconfig_attributes.as_ptr(),
&mut number_of_configs);
NativeCompositingGraphicsContext::get_compatible_configuration(display,
configs,
number_of_configs)
}
}
fn get_compatible_configuration(display: *mut Display,
configs: *mut glx::types::GLXFBConfig,
number_of_configs: i32)
-> (*mut XVisualInfo, Option<glx::types::GLXFBConfig>) {
unsafe {
if number_of_configs == 0 {
panic!("glx::ChooseFBConfig returned no configurations.");
}
if !NativeCompositingGraphicsContext::need_to_find_32_bit_depth_visual(display) {
let config = *configs.offset(0);
let visual = glx::GetVisualFromFBConfig(mem::transmute(display), config);
return (mem::transmute(visual), Some(config));
}
// NVidia (and AMD/ATI) drivers have RGBA configurations that use 24-bit
// XVisual, not capable of representing an alpha-channel in Pixmap form,
// so we look for the configuration with a full set of 32 bits.
for i in 0..number_of_configs as isize {
let config = *configs.offset(i);
let visual: *mut XVisualInfo =
mem::transmute(glx::GetVisualFromFBConfig(mem::transmute(display), config));
if (*visual).depth == 32 {
return (mem::transmute(visual), Some(config));
}
XFree(mem::transmute(visual));
}
panic!("Could not find 32-bit visual.");
}
}
fn need_to_find_32_bit_depth_visual(display: *mut Display) -> bool {
unsafe {
let glXGetClientString: extern "C" fn(*mut Display, c_int) -> *const c_char =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXGetClientString\x00".as_bytes()[0])));
assert!(glXGetClientString as *mut c_void != ptr::null_mut());
let glx_vendor = glx::GetClientString(mem::transmute(display), glx::VENDOR as i32);
if glx_vendor == ptr::null() {
panic!("Could not determine GLX vendor.");
}
let glx_vendor =
str::from_utf8(CStr::from_ptr(glx_vendor).to_bytes())
.ok()
.expect("GLX client vendor string not in UTF-8 format.");
let glx_vendor = String::from_str(glx_vendor).into_ascii_lowercase();
glx_vendor.contains("nvidia") || glx_vendor.contains("ati")
}
}
/// Creates a native graphics context from the given X display connection. This uses GLX. Only
/// the compositor is allowed to call this.
pub fn from_display(display: *mut Display) -> NativeCompositingGraphicsContext {
let (_, fbconfig) = NativeCompositingGraphicsContext::compositor_visual_info(display);
NativeCompositingGraphicsContext {
display: display,
framebuffer_configuration: fbconfig,
}
}
}
/// The X display.
#[derive(Clone, Copy)]
pub struct NativeGraphicsMetadata {
pub display: *mut Display,
}
unsafe impl Send for NativeGraphicsMetadata {}
impl NativeGraphicsMetadata {
/// Creates graphics metadata from a metadata descriptor.
pub fn from_descriptor(descriptor: &NativeGraphicsMetadataDescriptor)
-> NativeGraphicsMetadata {
// WARNING: We currently rely on the X display connection being the
// same in both the Painting and Compositing contexts, as otherwise
// the X Pixmap will not be sharable across them. Using this
// method breaks that assumption.
unsafe {
let c_str = CString::new(descriptor.display.as_bytes()).unwrap();
let display = XOpenDisplay(c_str.as_ptr() as *mut _);
if display.is_null() {
panic!("XOpenDisplay() failed!");
}
NativeGraphicsMetadata {
display: display,
}
}
}
}
/// A sendable form of the X display string.
#[derive(Clone, RustcDecodable, RustcEncodable)]
pub struct NativeGraphicsMetadataDescriptor {
display: String,
}
impl NativeGraphicsMetadataDescriptor {
/// Creates a metadata descriptor from metadata.
pub fn from_metadata(metadata: NativeGraphicsMetadata) -> NativeGraphicsMetadataDescriptor {
unsafe {
let c_str = XDisplayString(metadata.display) as *const _;
let bytes = CStr::from_ptr(c_str).to_bytes();
NativeGraphicsMetadataDescriptor {
display: str::from_utf8(bytes).unwrap().to_string(),
}
}
}
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PixmapNativeSurface {
/// The pixmap.
pixmap: Pixmap,
/// Whether this pixmap will leak if the destructor runs. This is for debugging purposes.
will_leak: bool,
}
impl Drop for PixmapNativeSurface {
fn drop(&mut self) {
if self.will_leak |
}
}
impl PixmapNativeSurface {
fn from_pixmap(pixmap: Pixmap) -> PixmapNativeSurface {
PixmapNativeSurface {
pixmap: pixmap,
will_leak: true,
}
}
pub fn from_skia_shared_gl_context(context: SkiaSkNativeSharedGLContextRef)
-> PixmapNativeSurface {
unsafe {
let surface = SkiaSkNativeSharedGLContextStealSurface(context);
PixmapNativeSurface::from_pixmap(mem::transmute(surface))
}
}
pub fn new(native_context: &NativePaintingGraphicsContext, size: Size2D<i32>, _stride: i32)
-> PixmapNativeSurface {
unsafe {
// Create the pixmap.
let screen = XDefaultScreen(native_context.display);
let window = XRootWindow(native_context.display, screen);
// The X server we use for testing on build machines always returns
// visuals that report 24 bit depth. But creating a 32 bit pixmap does work, so
// hard code the depth here.
let pixmap = XCreatePixmap(native_context.display,
window,
size.width as c_uint,
size.height as c_uint,
32);
PixmapNativeSurface::from_pixmap(pixmap)
}
}
/// This may only be called on the compositor side.
pub fn bind_to_texture(&self,
native_context: &NativeCompositingGraphicsContext,
texture: &Texture,
size: Size2D<isize>) {
// Create the GLX pixmap.
//
// FIXME(pcwalton): RAII for exception safety?
unsafe {
let pixmap_attributes = [
glx::TEXTURE_TARGET_EXT as i32, glx::TEXTURE_2D_EXT as i32,
glx::TEXTURE_FORMAT_EXT as i32, glx::TEXTURE_FORMAT_RGBA_EXT as i32,
0
];
let glx_display = mem::transmute(native_context.display);
let glx_pixmap = glx::CreatePixmap(glx_display,
native_context.framebuffer_configuration.expect(
"GLX 1.3 should have a framebuffer_configuration"),
self.pixmap,
pixmap_attributes.as_ptr());
let glXBindTexImageEXT: extern "C" fn(*mut Display, glx::types::GLXDrawable, c_int, *mut c_int) =
mem::transmute(glx::GetProcAddress(mem::transmute(&"glXBindTexImageEXT\x00".as_bytes()[0])));
assert!(glXBindTexImageEXT as *mut c_void != ptr::null_mut());
let _bound = texture.bind();
glXBindTexImageEXT(native_context.display,
mem::transmute(glx_pixmap),
glx::FRONT_EXT as i32,
ptr::null_mut());
assert_eq!(gl::GetError(), gl::NO_ERROR);
// FIXME(pcwalton): Recycle these for speed?
glx::DestroyPixmap(glx_display, glx_pixmap);
}
}
/// This may only be called on the painting side.
pub fn upload(&mut self, graphics_context: &NativePaintingGraphicsContext, data: &[u8]) {
unsafe {
// Ensure that we're running on the render task. Take the display.
let pixmap = self.pixmap;
// Figure out the width, height, and depth of the pixmap.
let mut root_window = 0;
let mut x = 0;
let mut y = 0;
let mut width = 0;
let mut height = 0;
let mut border_width = 0;
let mut depth = 0;
let _ = XGetGeometry(graphics_context.display,
mem::transmute(pixmap),
&mut root_window,
&mut x,
&mut y,
&mut width,
&mut height,
&mut border_width,
&mut depth);
// Create the image.
let image = XCreateImage(graphics_context.display,
(*graphics_context.visual_info).visual,
depth,
ZPixmap,
0,
mem::transmute(&data[0]),
width as c_uint,
height as c_uint,
32,
0);
// Create the X graphics context.
let gc = XCreateGC(graphics_context.display, pixmap, 0, ptr::null_mut());
// Draw the image.
let _ = XPutImage(graphics_context.display,
pixmap,
gc,
image,
0,
0,
0,
0,
width,
height);
}
}
pub fn get_id(&self) -> isize {
self.pixmap as isize
}
pub fn destroy(&mut self, graphics_context: &NativePaintingGraphicsContext) {
unsafe {
assert!(self.pixmap != 0);
XFreePixmap(graphics_context.display, self.pixmap);
self.mark_wont_leak()
}
}
pub fn mark_will_leak(&mut self) {
self.will_leak = true;
}
pub fn mark_wont_leak(&mut self) {
self.will_leak = false;
}
}
| {
panic!("You should have disposed of the pixmap properly with destroy()! This pixmap \
will leak!");
} | conditional_block |
mountfs.go | // Package fs provides mountpath and FQN abstractions and methods to resolve/map stored content
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
package fs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"unsafe"
"github.com/NVIDIA/aistore/3rdparty/atomic"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/ios"
"github.com/OneOfOne/xxhash"
)
const (
pkgName = "fs"
uQuantum = 10 // each GET adds a "quantum" of utilization to the mountpath
)
// mountpath lifecycle-change enum
const (
Add = "add-mp"
Remove = "remove-mp"
Enable = "enable-mp"
Disable = "disable-mp"
)
// globals
var (
Mountpaths *MountedFS
)
// Terminology:
// - a mountpath is equivalent to (configurable) fspath - both terms are used interchangeably;
// - each mountpath is, simply, a local directory that is serviced by a local filesystem;
// - there's a 1-to-1 relationship between a mountpath and a local filesystem
// (different mountpaths map onto different filesystems, and vise versa);
// - mountpaths of the form <filesystem-mountpoint>/a/b/c are supported.
type (
MPI map[string]*MountpathInfo
PathRunGroup interface {
Reg(r PathRunner)
Unreg(r PathRunner)
}
// As a rule, running xactions are aborted and restarted on any mountpath change.
// But for a few xactions it can be too harsh. E.g, aborting and restarting
// `download` xaction results in waste of time and network traffic to
// redownload objects. These xactions should subscribe to mountpath changes
// as a `PathRunner`s to `PathRunGroup` events and adapt on the fly.
PathRunner interface {
cmn.Runner
Name() string
ReqAddMountpath(mpath string)
ReqRemoveMountpath(mpath string)
ReqEnableMountpath(mpath string)
ReqDisableMountpath(mpath string)
}
MountpathInfo struct {
Path string // Cleaned OrigPath
OrigPath string // As entered by the user, must be used for logging / returning errors
Fsid syscall.Fsid
FileSystem string
PathDigest uint64
// atomic, only increasing counter to prevent name conflicts
// see: FastRemoveDir method
removeDirCounter atomic.Uint64
// LOM caches
lomCaches cmn.MultiSyncMap
}
// MountedFS holds all mountpaths for the target.
MountedFS struct {
mu sync.Mutex
// fsIDs is set in which we store fsids of mountpaths. This allows for
// determining if there are any duplications of file system - we allow
// only one mountpath per file system.
fsIDs map[syscall.Fsid]string
// checkFsID determines if we should actually check FSID when adding new
// mountpath. By default it is set to true.
checkFsID bool
// Available mountpaths - mountpaths which are used to store the data.
available atomic.Pointer
// Disabled mountpaths - mountpaths which for some reason did not pass
// the health check and cannot be used for a moment.
disabled atomic.Pointer
// Cached pointer to mountpathInfo used to store BMD
xattrMpath atomic.Pointer
// Iostats for the available mountpaths
ios ios.IOStater
}
ChangeReq struct {
Action string // MountPath action enum (above)
Path string // path
}
)
func MountpathAdd(p string) ChangeReq { return ChangeReq{Action: Add, Path: p} }
func MountpathRem(p string) ChangeReq { return ChangeReq{Action: Remove, Path: p} }
func MountpathEnb(p string) ChangeReq { return ChangeReq{Action: Enable, Path: p} }
func MountpathDis(p string) ChangeReq { return ChangeReq{Action: Disable, Path: p} }
//
// MountpathInfo
//
func newMountpath(cleanPath, origPath string, fsid syscall.Fsid, fs string) *MountpathInfo {
mi := &MountpathInfo{
Path: cleanPath,
OrigPath: origPath,
Fsid: fsid,
FileSystem: fs,
PathDigest: xxhash.ChecksumString64S(cleanPath, cmn.MLCG32),
}
mi.removeDirCounter.Store(uint64(time.Now().UnixNano()))
return mi
}
func (mi *MountpathInfo) LomCache(idx int) *sync.Map { return mi.lomCaches.Get(idx) }
func (mi *MountpathInfo) evictLomCache() {
for idx := range mi.lomCaches.M {
cache := mi.LomCache(idx)
cache.Range(func(key interface{}, _ interface{}) bool {
cache.Delete(key)
return true
})
}
}
// FastRemoveDir removes directory in steps:
// 1. Synchronously gets temporary directory name
// 2. Synchronously renames old folder to temporary directory
// 3. Asynchronously deletes temporary directory
func (mi *MountpathInfo) FastRemoveDir(dir string) error {
// `dir` will be renamed to non-existing bucket. Then we will
// try to remove it asynchronously. In case of power cycle we expect that
// LRU will take care of removing the rest of the bucket.
var (
counter = mi.removeDirCounter.Inc()
nonExistingDir = fmt.Sprintf("$removing-%d", counter)
tmpDir = filepath.Join(mi.Path, nonExistingDir)
)
// loose assumption: removing something which doesn't exist is fine
if err := Access(dir); err != nil && os.IsNotExist(err) {
return nil
}
if err := cmn.CreateDir(filepath.Dir(tmpDir)); err != nil {
return err
}
if err := os.Rename(dir, tmpDir); err != nil {
if os.IsExist(err) {
// Slow path - `tmpDir` (or rather `nonExistingDir`) for some reason already exists...
//
// Even though `nonExistingDir` should not exist we cannot fully be sure.
// There are at least two cases when this might not be true:
// 1. `nonExistingDir` is leftover after target crash.
// 2. Mountpath was removed and then added again. The counter
// will be reset and if we will be removing dirs quickly enough
// the counter will catch up with counter of the previous mountpath.
// If the directories from the previous mountpath were not yet removed
// (slow disk or filesystem) we can end up with the same name.
// For now we try to fight this with randomizing the initial counter.
// In background remove leftover directory.
go func() {
glog.Errorf("%s already exists, removing...", tmpDir)
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("removing leftover %s failed, err: %v", tmpDir, err)
}
}()
// This time generate fully unique name...
tmpDir, err = ioutil.TempDir(mi.Path, nonExistingDir)
if err != nil {
return err
}
// Retry renaming - hopefully it should succeed now.
err = os.Rename(dir, tmpDir)
}
// Someone removed dir before os.Rename, nothing more to do.
if os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
}
// Schedule removing temporary directory which is our old `dir`
go func() {
// TODO: in the future, the actual operation must be delegated to LRU
// that'd take of care of it while pacing itself with regards to the
// current disk utilization and space availability.
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("RemoveAll for %q failed with %v", tmpDir, err)
}
}()
return nil
}
func (mi *MountpathInfo) IsIdle(config *cmn.Config, timestamp time.Time) bool {
if config == nil {
config = cmn.GCO.Get()
}
curr := Mountpaths.ios.GetMpathUtil(mi.Path, timestamp)
return curr >= 0 && curr < config.Disk.DiskUtilLowWM
}
func (mi *MountpathInfo) String() string {
return fmt.Sprintf("mp[%s, fs=%s]", mi.Path, mi.FileSystem)
}
///////////////
// make-path //
///////////////
func (mi *MountpathInfo) makePathBuf(bck cmn.Bck, contentType string, extra int) (buf []byte) {
var (
nsLen, bckNameLen, ctLen int
provLen = 1 + 1 + len(bck.Provider)
)
if !bck.Ns.IsGlobal() {
nsLen = 1
if bck.Ns.IsRemote() {
nsLen += 1 + len(bck.Ns.UUID)
}
nsLen += 1 + len(bck.Ns.Name)
}
if bck.Name != "" {
bckNameLen = 1 + len(bck.Name)
}
if contentType != "" {
cmn.Assert(bckNameLen > 0)
cmn.Assert(len(contentType) == contentTypeLen)
ctLen = 1 + 1 + contentTypeLen
}
buf = make([]byte, 0, len(mi.Path)+provLen+nsLen+bckNameLen+ctLen+extra)
buf = append(buf, mi.Path...)
buf = append(buf, filepath.Separator, prefProvider)
buf = append(buf, bck.Provider...)
if nsLen > 0 {
buf = append(buf, filepath.Separator)
if bck.Ns.IsRemote() {
buf = append(buf, prefNsUUID)
buf = append(buf, bck.Ns.UUID...)
}
buf = append(buf, prefNsName)
buf = append(buf, bck.Ns.Name...)
}
if bckNameLen > 0 {
buf = append(buf, filepath.Separator)
buf = append(buf, bck.Name...)
}
if ctLen > 0 {
buf = append(buf, filepath.Separator, prefCT)
buf = append(buf, contentType...)
}
return
}
func (mi *MountpathInfo) MakePathBck(bck cmn.Bck) string {
buf := mi.makePathBuf(bck, "", 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathCT(bck cmn.Bck, contentType string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "")
buf := mi.makePathBuf(bck, contentType, 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathFQN(bck cmn.Bck, contentType, objName string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "" && objName != "")
buf := mi.makePathBuf(bck, contentType, 1+len(objName))
buf = append(buf, filepath.Separator)
buf = append(buf, objName...)
return *(*string)(unsafe.Pointer(&buf))
}
//
// MountedFS
//
func (mfs *MountedFS) LoadBalanceGET(objfqn, objmpath string, copies MPI, now time.Time) (fqn string) {
var (
mpathUtils, mpathRRs = mfs.ios.GetAllMpathUtils(now)
objutil, ok = mpathUtils[objmpath]
rr, _ = mpathRRs[objmpath] // GET round-robin counter (zeros out every iostats refresh i-val)
util = objutil
r = rr
)
fqn = objfqn
if !ok {
cmn.DassertMsg(false, objmpath, pkgName)
return
}
for copyfqn, copympi := range copies {
var (
u int64
c, rrcnt int32
)
if u, ok = mpathUtils[copympi.Path]; !ok {
continue
}
if r, ok = mpathRRs[copympi.Path]; !ok {
if u < util {
fqn, util, rr = copyfqn, u, r
}
continue
}
c = r.Load()
if rr != nil {
rrcnt = rr.Load()
}
if u < util && c <= rrcnt { // the obvious choice
fqn, util, rr = copyfqn, u, r
continue
}
if u+int64(c)*uQuantum < util+int64(rrcnt)*uQuantum { // heuristics - make uQuantum configurable?
fqn, util, rr = copyfqn, u, r
}
}
// NOTE: the counter could've been already inc-ed
// could keep track of the second best and use CAS to recerve-inc and compare
// can wait though
if rr != nil {
rr.Inc()
}
return
}
// ios delegators
func (mfs *MountedFS) GetMpathUtil(mpath string, now time.Time) int64 {
return mfs.ios.GetMpathUtil(mpath, now)
}
func (mfs *MountedFS) GetAllMpathUtils(now time.Time) (utils map[string]int64) {
utils, _ = mfs.ios.GetAllMpathUtils(now)
return
}
func (mfs *MountedFS) LogAppend(lines []string) []string {
return mfs.ios.LogAppend(lines)
}
func (mfs *MountedFS) GetSelectedDiskStats() (m map[string]*ios.SelectedDiskStats) {
return mfs.ios.GetSelectedDiskStats()
}
// Init prepares and adds provided mountpaths. Also validates the mountpaths
// for duplication and availability.
func (mfs *MountedFS) Init(fsPaths []string) error {
if len(fsPaths) == 0 {
// (usability) not to clutter the log with backtraces when starting up and validating config
return fmt.Errorf("FATAL: no fspaths - see README => Configuration and/or fspaths section in the config.sh")
}
for _, path := range fsPaths {
if err := mfs.Add(path); err != nil {
return err
}
}
return nil
}
// Add adds new mountpath to the target's mountpaths.
// FIXME: unify error messages for original and clean mountpath
func (mfs *MountedFS) Add(mpath string) error {
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
if err := Access(cleanMpath); err != nil {
return fmt.Errorf("fspath %q %s, err: %v", mpath, cmn.DoesNotExist, err)
}
statfs := syscall.Statfs_t{}
if err := syscall.Statfs(cleanMpath, &statfs); err != nil {
return fmt.Errorf("cannot statfs fspath %q, err: %w", mpath, err)
}
fs, err := fqn2fsAtStartup(cleanMpath)
if err != nil {
return fmt.Errorf("cannot get filesystem: %v", err)
}
mp := newMountpath(cleanMpath, mpath, statfs.Fsid, fs)
mfs.mu.Lock()
defer mfs.mu.Unlock()
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, exists := availablePaths[mp.Path]; exists {
return fmt.Errorf("tried to add already registered mountpath: %v", mp.Path)
}
if existingPath, exists := mfs.fsIDs[mp.Fsid]; exists && mfs.checkFsID {
return fmt.Errorf("tried to add path %v but same fsid (%v) was already registered by %v", mpath, mp.Fsid, existingPath)
}
mfs.ios.AddMpath(mp.Path, mp.FileSystem)
availablePaths[mp.Path] = mp
mfs.fsIDs[mp.Fsid] = cleanMpath
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Remove removes mountpaths from the target's mountpaths. It searches
// for the mountpath in available and disabled (if the mountpath is not found
// in available).
func (mfs *MountedFS) Remove(mpath string) error {
var (
mp *MountpathInfo
exists bool
)
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mp, exists = availablePaths[cleanMpath]; !exists {
if mp, exists = disabledPaths[cleanMpath]; !exists {
return fmt.Errorf("tried to remove non-existing mountpath: %v", mpath)
}
delete(disabledPaths, cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
delete(availablePaths, cleanMpath)
mfs.ios.RemoveMpath(cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
go mp.evictLomCache()
if l := len(availablePaths); l == 0 {
glog.Errorf("removed the last available mountpath %s", mp)
} else {
glog.Infof("removed mountpath %s (%d remain(s) active)", mp, l)
}
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Enable enables previously disabled mountpath. enabled is set to
// true if mountpath has been moved from disabled to available and exists is
// set to true if such mountpath even exists.
func (mfs *MountedFS) Enable(mpath string) (enabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, ok := availablePaths[cleanMpath]; ok {
return false, nil
}
if mp, ok := disabledPaths[cleanMpath]; ok {
availablePaths[cleanMpath] = mp
mfs.ios.AddMpath(cleanMpath, mp.FileSystem)
delete(disabledPaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
return true, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Disable disables an available mountpath. disabled is set to true if
// mountpath has been moved from available to disabled and exists is set to
// true if such mountpath even exists.
func (mfs *MountedFS) Disable(mpath string) (disabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mpathInfo, ok := availablePaths[cleanMpath]; ok {
disabledPaths[cleanMpath] = mpathInfo
mfs.ios.RemoveMpath(cleanMpath)
delete(availablePaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
if l := len(availablePaths); l == 0 {
glog.Errorf("disabled the last available mountpath %s", mpathInfo)
} else {
glog.Infof("disabled mountpath %s (%d remain(s) active)", mpathInfo, l)
}
go mpathInfo.evictLomCache()
return true, nil
}
if _, ok := disabledPaths[cleanMpath]; ok {
return false, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Returns number of available mountpaths
func (mfs *MountedFS) NumAvail() int {
availablePaths := (*MPI)(mfs.available.Load())
return len(*availablePaths)
}
// Mountpaths returns both available and disabled mountpaths.
func (mfs *MountedFS) Get() (MPI, MPI) |
// DisableFsIDCheck disables fsid checking when adding new mountpath
func (mfs *MountedFS) DisableFsIDCheck() { mfs.checkFsID = false }
func (mfs *MountedFS) CreateBuckets(op string, bcks ...cmn.Bck) (errs []error) {
var (
availablePaths, _ = mfs.Get()
totalDirs = len(availablePaths) * len(bcks) * len(CSM.RegisteredContentTypes)
totalCreatedDirs int
)
for _, mi := range availablePaths {
for _, bck := range bcks {
num, err := mi.createBckDirs(bck)
if err != nil {
errs = append(errs, err)
} else {
totalCreatedDirs += num
}
}
}
if errs == nil && totalCreatedDirs != totalDirs {
errs = append(errs, fmt.Errorf("failed to create %d out of %d buckets' directories: %v",
totalDirs-totalCreatedDirs, totalDirs, bcks))
}
if errs == nil && glog.FastV(4, glog.SmoduleFS) {
glog.Infof("%s(create bucket dirs): %v, num=%d", op, bcks, totalDirs)
}
return
}
func (mfs *MountedFS) DestroyBuckets(op string, bcks ...cmn.Bck) error {
const destroyStr = "destroy-ais-bucket-dir"
var (
availablePaths, _ = mfs.Get()
totalDirs = len(availablePaths) * len(bcks)
totalDestroyedDirs = 0
)
for _, mpathInfo := range availablePaths {
for _, bck := range bcks {
dir := mpathInfo.MakePathBck(bck)
if err := mpathInfo.FastRemoveDir(dir); err != nil {
glog.Errorf("%s: failed to %s (dir: %q, err: %v)", op, destroyStr, dir, err)
} else {
totalDestroyedDirs++
}
}
}
if totalDestroyedDirs != totalDirs {
return fmt.Errorf("failed to destroy %d out of %d buckets' directories: %v", totalDirs-totalDestroyedDirs, totalDirs, bcks)
}
if glog.FastV(4, glog.SmoduleFS) {
glog.Infof("%s: %s (buckets %v, num dirs %d)", op, destroyStr, bcks, totalDirs)
}
return nil
}
func (mfs *MountedFS) FetchFSInfo() cmn.FSInfo {
var (
fsInfo = cmn.FSInfo{}
availablePaths, _ = mfs.Get()
visitedFS = make(map[syscall.Fsid]struct{})
)
for mpath := range availablePaths {
statfs := &syscall.Statfs_t{}
if err := syscall.Statfs(mpath, statfs); err != nil {
glog.Errorf("Failed to statfs mp %q, err: %v", mpath, err)
continue
}
if _, ok := visitedFS[statfs.Fsid]; ok {
continue
}
visitedFS[statfs.Fsid] = struct{}{}
fsInfo.FSUsed += (statfs.Blocks - statfs.Bavail) * uint64(statfs.Bsize)
fsInfo.FSCapacity += statfs.Blocks * uint64(statfs.Bsize)
}
if fsInfo.FSCapacity > 0 {
// FIXME: assuming all mountpaths have approx. the same capacity
fsInfo.PctFSUsed = float64(fsInfo.FSUsed*100) / float64(fsInfo.FSCapacity)
}
return fsInfo
}
func (mfs *MountedFS) RenameBucketDirs(bckFrom, bckTo cmn.Bck) (err error) {
availablePaths, _ := mfs.Get()
renamed := make([]*MountpathInfo, 0, len(availablePaths))
for _, mpathInfo := range availablePaths {
fromPath := mpathInfo.MakePathBck(bckFrom)
toPath := mpathInfo.MakePathBck(bckTo)
// os.Rename fails when renaming to a directory which already exists.
// We should remove destination bucket directory before rename. It's reasonable to do so
// as all targets agreed to rename and rename was committed in BMD.
os.RemoveAll(toPath)
if err = os.Rename(fromPath, toPath); err != nil {
break
}
renamed = append(renamed, mpathInfo)
}
if err == nil {
return
}
for _, mpathInfo := range renamed {
fromPath := mpathInfo.MakePathBck(bckTo)
toPath := mpathInfo.MakePathBck(bckFrom)
if erd := os.Rename(fromPath, toPath); erd != nil {
glog.Error(erd)
}
}
return
}
func (mi *MountpathInfo) CreateMissingBckDirs(bck cmn.Bck) (err error) {
for contentType := range CSM.RegisteredContentTypes {
dir := mi.MakePathCT(bck, contentType)
if err = Access(dir); err == nil {
continue
}
if err = cmn.CreateDir(dir); err != nil {
return
}
}
return
}
//
// private methods
//
func (mfs *MountedFS) updatePaths(available, disabled MPI) {
mfs.available.Store(unsafe.Pointer(&available))
mfs.disabled.Store(unsafe.Pointer(&disabled))
mfs.xattrMpath.Store(unsafe.Pointer(nil))
}
// Creates all CT directories for a given (mountpath, bck)
// NOTE handling of empty dirs
func (mi *MountpathInfo) createBckDirs(bck cmn.Bck) (num int, err error) {
for contentType := range CSM.RegisteredContentTypes {
dir := mi.MakePathCT(bck, contentType)
if err := Access(dir); err == nil {
names, empty, errEmpty := IsDirEmpty(dir)
if errEmpty != nil {
return num, errEmpty
}
if !empty {
err = fmt.Errorf("bucket %s: directory %s already exists and is not empty (%v...)", bck, dir, names)
if contentType != WorkfileType {
return num, err
}
glog.Warning(err)
}
} else if err := cmn.CreateDir(dir); err != nil {
return num, fmt.Errorf("bucket %s: failed to create directory %s: %w", bck, dir, err)
}
num++
}
return num, nil
}
// mountpathsCopy returns a shallow copy of current mountpaths
func (mfs *MountedFS) mountpathsCopy() (MPI, MPI) {
availablePaths, disabledPaths := mfs.Get()
availableCopy := make(MPI, len(availablePaths))
disabledCopy := make(MPI, len(availablePaths))
for mpath, mpathInfo := range availablePaths {
availableCopy[mpath] = mpathInfo
}
for mpath, mpathInfo := range disabledPaths {
disabledCopy[mpath] = mpathInfo
}
return availableCopy, disabledCopy
}
func (mfs *MountedFS) String() string {
availablePaths, _ := mfs.Get()
s := "\n"
for _, mpathInfo := range availablePaths {
s += mpathInfo.String() + "\n"
}
return strings.TrimSuffix(s, "\n")
}
// Select a "random" mountpath using HRW algorithm to store/load bucket metadata
func (mfs *MountedFS) MpathForMetadata() (mpath *MountpathInfo, err error) {
// fast path
mp := mfs.xattrMpath.Load()
if mp != nil {
return (*MountpathInfo)(mp), nil
}
// slow path
avail := (*MPI)(mfs.available.Load())
if len(*avail) == 0 {
return nil, fmt.Errorf("no mountpath available")
}
maxVal := uint64(0)
for _, m := range *avail {
if m.PathDigest > maxVal {
maxVal = m.PathDigest
mpath = m
}
}
if mpath == nil {
return nil, fmt.Errorf("failed to choose a mountpath")
}
if glog.FastV(4, glog.SmoduleFS) {
glog.Infof("Mountpath %q selected for storing BMD in xattrs", mpath.Path)
}
mfs.xattrMpath.Store(unsafe.Pointer(mpath))
return mpath, nil
}
| {
var (
availablePaths = (*MPI)(mfs.available.Load())
disabledPaths = (*MPI)(mfs.disabled.Load())
)
if availablePaths == nil {
tmp := make(MPI, 10)
availablePaths = &tmp
}
if disabledPaths == nil {
tmp := make(MPI, 10)
disabledPaths = &tmp
}
return *availablePaths, *disabledPaths
} | identifier_body |
mountfs.go | // Package fs provides mountpath and FQN abstractions and methods to resolve/map stored content
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
package fs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"unsafe"
"github.com/NVIDIA/aistore/3rdparty/atomic"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/ios"
"github.com/OneOfOne/xxhash"
)
const (
pkgName = "fs"
uQuantum = 10 // each GET adds a "quantum" of utilization to the mountpath
)
// mountpath lifecycle-change enum
const (
Add = "add-mp"
Remove = "remove-mp"
Enable = "enable-mp"
Disable = "disable-mp"
)
// globals
var (
Mountpaths *MountedFS
)
// Terminology:
// - a mountpath is equivalent to (configurable) fspath - both terms are used interchangeably;
// - each mountpath is, simply, a local directory that is serviced by a local filesystem;
// - there's a 1-to-1 relationship between a mountpath and a local filesystem
// (different mountpaths map onto different filesystems, and vise versa);
// - mountpaths of the form <filesystem-mountpoint>/a/b/c are supported.
type (
MPI map[string]*MountpathInfo
PathRunGroup interface {
Reg(r PathRunner)
Unreg(r PathRunner)
}
// As a rule, running xactions are aborted and restarted on any mountpath change.
// But for a few xactions it can be too harsh. E.g, aborting and restarting
// `download` xaction results in waste of time and network traffic to
// redownload objects. These xactions should subscribe to mountpath changes
// as a `PathRunner`s to `PathRunGroup` events and adapt on the fly.
PathRunner interface {
cmn.Runner
Name() string
ReqAddMountpath(mpath string)
ReqRemoveMountpath(mpath string)
ReqEnableMountpath(mpath string)
ReqDisableMountpath(mpath string)
}
MountpathInfo struct {
Path string // Cleaned OrigPath
OrigPath string // As entered by the user, must be used for logging / returning errors
Fsid syscall.Fsid
FileSystem string
PathDigest uint64
// atomic, only increasing counter to prevent name conflicts
// see: FastRemoveDir method
removeDirCounter atomic.Uint64
// LOM caches
lomCaches cmn.MultiSyncMap
}
// MountedFS holds all mountpaths for the target.
MountedFS struct {
mu sync.Mutex
// fsIDs is set in which we store fsids of mountpaths. This allows for
// determining if there are any duplications of file system - we allow
// only one mountpath per file system.
fsIDs map[syscall.Fsid]string
// checkFsID determines if we should actually check FSID when adding new
// mountpath. By default it is set to true.
checkFsID bool
// Available mountpaths - mountpaths which are used to store the data.
available atomic.Pointer
// Disabled mountpaths - mountpaths which for some reason did not pass
// the health check and cannot be used for a moment.
disabled atomic.Pointer
// Cached pointer to mountpathInfo used to store BMD
xattrMpath atomic.Pointer
// Iostats for the available mountpaths
ios ios.IOStater
}
ChangeReq struct {
Action string // MountPath action enum (above)
Path string // path
}
)
func MountpathAdd(p string) ChangeReq { return ChangeReq{Action: Add, Path: p} }
func MountpathRem(p string) ChangeReq { return ChangeReq{Action: Remove, Path: p} }
func MountpathEnb(p string) ChangeReq { return ChangeReq{Action: Enable, Path: p} }
func MountpathDis(p string) ChangeReq { return ChangeReq{Action: Disable, Path: p} }
//
// MountpathInfo
//
func newMountpath(cleanPath, origPath string, fsid syscall.Fsid, fs string) *MountpathInfo {
mi := &MountpathInfo{
Path: cleanPath,
OrigPath: origPath,
Fsid: fsid,
FileSystem: fs,
PathDigest: xxhash.ChecksumString64S(cleanPath, cmn.MLCG32),
}
mi.removeDirCounter.Store(uint64(time.Now().UnixNano()))
return mi
}
func (mi *MountpathInfo) LomCache(idx int) *sync.Map { return mi.lomCaches.Get(idx) }
func (mi *MountpathInfo) evictLomCache() {
for idx := range mi.lomCaches.M {
cache := mi.LomCache(idx)
cache.Range(func(key interface{}, _ interface{}) bool {
cache.Delete(key)
return true
})
}
}
// FastRemoveDir removes directory in steps:
// 1. Synchronously gets temporary directory name
// 2. Synchronously renames old folder to temporary directory
// 3. Asynchronously deletes temporary directory
func (mi *MountpathInfo) FastRemoveDir(dir string) error {
// `dir` will be renamed to non-existing bucket. Then we will
// try to remove it asynchronously. In case of power cycle we expect that
// LRU will take care of removing the rest of the bucket.
var (
counter = mi.removeDirCounter.Inc()
nonExistingDir = fmt.Sprintf("$removing-%d", counter)
tmpDir = filepath.Join(mi.Path, nonExistingDir)
)
// loose assumption: removing something which doesn't exist is fine
if err := Access(dir); err != nil && os.IsNotExist(err) {
return nil
}
if err := cmn.CreateDir(filepath.Dir(tmpDir)); err != nil {
return err
}
if err := os.Rename(dir, tmpDir); err != nil {
if os.IsExist(err) {
// Slow path - `tmpDir` (or rather `nonExistingDir`) for some reason already exists...
//
// Even though `nonExistingDir` should not exist we cannot fully be sure. | // the counter will catch up with counter of the previous mountpath.
// If the directories from the previous mountpath were not yet removed
// (slow disk or filesystem) we can end up with the same name.
// For now we try to fight this with randomizing the initial counter.
// In background remove leftover directory.
go func() {
glog.Errorf("%s already exists, removing...", tmpDir)
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("removing leftover %s failed, err: %v", tmpDir, err)
}
}()
// This time generate fully unique name...
tmpDir, err = ioutil.TempDir(mi.Path, nonExistingDir)
if err != nil {
return err
}
// Retry renaming - hopefully it should succeed now.
err = os.Rename(dir, tmpDir)
}
// Someone removed dir before os.Rename, nothing more to do.
if os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
}
// Schedule removing temporary directory which is our old `dir`
go func() {
// TODO: in the future, the actual operation must be delegated to LRU
// that'd take of care of it while pacing itself with regards to the
// current disk utilization and space availability.
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("RemoveAll for %q failed with %v", tmpDir, err)
}
}()
return nil
}
func (mi *MountpathInfo) IsIdle(config *cmn.Config, timestamp time.Time) bool {
if config == nil {
config = cmn.GCO.Get()
}
curr := Mountpaths.ios.GetMpathUtil(mi.Path, timestamp)
return curr >= 0 && curr < config.Disk.DiskUtilLowWM
}
func (mi *MountpathInfo) String() string {
return fmt.Sprintf("mp[%s, fs=%s]", mi.Path, mi.FileSystem)
}
///////////////
// make-path //
///////////////
func (mi *MountpathInfo) makePathBuf(bck cmn.Bck, contentType string, extra int) (buf []byte) {
var (
nsLen, bckNameLen, ctLen int
provLen = 1 + 1 + len(bck.Provider)
)
if !bck.Ns.IsGlobal() {
nsLen = 1
if bck.Ns.IsRemote() {
nsLen += 1 + len(bck.Ns.UUID)
}
nsLen += 1 + len(bck.Ns.Name)
}
if bck.Name != "" {
bckNameLen = 1 + len(bck.Name)
}
if contentType != "" {
cmn.Assert(bckNameLen > 0)
cmn.Assert(len(contentType) == contentTypeLen)
ctLen = 1 + 1 + contentTypeLen
}
buf = make([]byte, 0, len(mi.Path)+provLen+nsLen+bckNameLen+ctLen+extra)
buf = append(buf, mi.Path...)
buf = append(buf, filepath.Separator, prefProvider)
buf = append(buf, bck.Provider...)
if nsLen > 0 {
buf = append(buf, filepath.Separator)
if bck.Ns.IsRemote() {
buf = append(buf, prefNsUUID)
buf = append(buf, bck.Ns.UUID...)
}
buf = append(buf, prefNsName)
buf = append(buf, bck.Ns.Name...)
}
if bckNameLen > 0 {
buf = append(buf, filepath.Separator)
buf = append(buf, bck.Name...)
}
if ctLen > 0 {
buf = append(buf, filepath.Separator, prefCT)
buf = append(buf, contentType...)
}
return
}
func (mi *MountpathInfo) MakePathBck(bck cmn.Bck) string {
buf := mi.makePathBuf(bck, "", 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathCT(bck cmn.Bck, contentType string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "")
buf := mi.makePathBuf(bck, contentType, 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathFQN(bck cmn.Bck, contentType, objName string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "" && objName != "")
buf := mi.makePathBuf(bck, contentType, 1+len(objName))
buf = append(buf, filepath.Separator)
buf = append(buf, objName...)
return *(*string)(unsafe.Pointer(&buf))
}
//
// MountedFS
//
func (mfs *MountedFS) LoadBalanceGET(objfqn, objmpath string, copies MPI, now time.Time) (fqn string) {
var (
mpathUtils, mpathRRs = mfs.ios.GetAllMpathUtils(now)
objutil, ok = mpathUtils[objmpath]
rr, _ = mpathRRs[objmpath] // GET round-robin counter (zeros out every iostats refresh i-val)
util = objutil
r = rr
)
fqn = objfqn
if !ok {
cmn.DassertMsg(false, objmpath, pkgName)
return
}
for copyfqn, copympi := range copies {
var (
u int64
c, rrcnt int32
)
if u, ok = mpathUtils[copympi.Path]; !ok {
continue
}
if r, ok = mpathRRs[copympi.Path]; !ok {
if u < util {
fqn, util, rr = copyfqn, u, r
}
continue
}
c = r.Load()
if rr != nil {
rrcnt = rr.Load()
}
if u < util && c <= rrcnt { // the obvious choice
fqn, util, rr = copyfqn, u, r
continue
}
if u+int64(c)*uQuantum < util+int64(rrcnt)*uQuantum { // heuristics - make uQuantum configurable?
fqn, util, rr = copyfqn, u, r
}
}
// NOTE: the counter could've been already inc-ed
// could keep track of the second best and use CAS to recerve-inc and compare
// can wait though
if rr != nil {
rr.Inc()
}
return
}
// ios delegators
func (mfs *MountedFS) GetMpathUtil(mpath string, now time.Time) int64 {
return mfs.ios.GetMpathUtil(mpath, now)
}
func (mfs *MountedFS) GetAllMpathUtils(now time.Time) (utils map[string]int64) {
utils, _ = mfs.ios.GetAllMpathUtils(now)
return
}
func (mfs *MountedFS) LogAppend(lines []string) []string {
return mfs.ios.LogAppend(lines)
}
func (mfs *MountedFS) GetSelectedDiskStats() (m map[string]*ios.SelectedDiskStats) {
return mfs.ios.GetSelectedDiskStats()
}
// Init prepares and adds provided mountpaths. Also validates the mountpaths
// for duplication and availability.
func (mfs *MountedFS) Init(fsPaths []string) error {
if len(fsPaths) == 0 {
// (usability) not to clutter the log with backtraces when starting up and validating config
return fmt.Errorf("FATAL: no fspaths - see README => Configuration and/or fspaths section in the config.sh")
}
for _, path := range fsPaths {
if err := mfs.Add(path); err != nil {
return err
}
}
return nil
}
// Add adds new mountpath to the target's mountpaths.
// FIXME: unify error messages for original and clean mountpath
func (mfs *MountedFS) Add(mpath string) error {
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
if err := Access(cleanMpath); err != nil {
return fmt.Errorf("fspath %q %s, err: %v", mpath, cmn.DoesNotExist, err)
}
statfs := syscall.Statfs_t{}
if err := syscall.Statfs(cleanMpath, &statfs); err != nil {
return fmt.Errorf("cannot statfs fspath %q, err: %w", mpath, err)
}
fs, err := fqn2fsAtStartup(cleanMpath)
if err != nil {
return fmt.Errorf("cannot get filesystem: %v", err)
}
mp := newMountpath(cleanMpath, mpath, statfs.Fsid, fs)
mfs.mu.Lock()
defer mfs.mu.Unlock()
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, exists := availablePaths[mp.Path]; exists {
return fmt.Errorf("tried to add already registered mountpath: %v", mp.Path)
}
if existingPath, exists := mfs.fsIDs[mp.Fsid]; exists && mfs.checkFsID {
return fmt.Errorf("tried to add path %v but same fsid (%v) was already registered by %v", mpath, mp.Fsid, existingPath)
}
mfs.ios.AddMpath(mp.Path, mp.FileSystem)
availablePaths[mp.Path] = mp
mfs.fsIDs[mp.Fsid] = cleanMpath
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Remove removes mountpaths from the target's mountpaths. It searches
// for the mountpath in available and disabled (if the mountpath is not found
// in available).
func (mfs *MountedFS) Remove(mpath string) error {
var (
mp *MountpathInfo
exists bool
)
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mp, exists = availablePaths[cleanMpath]; !exists {
if mp, exists = disabledPaths[cleanMpath]; !exists {
return fmt.Errorf("tried to remove non-existing mountpath: %v", mpath)
}
delete(disabledPaths, cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
delete(availablePaths, cleanMpath)
mfs.ios.RemoveMpath(cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
go mp.evictLomCache()
if l := len(availablePaths); l == 0 {
glog.Errorf("removed the last available mountpath %s", mp)
} else {
glog.Infof("removed mountpath %s (%d remain(s) active)", mp, l)
}
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Enable enables previously disabled mountpath. enabled is set to
// true if mountpath has been moved from disabled to available and exists is
// set to true if such mountpath even exists.
func (mfs *MountedFS) Enable(mpath string) (enabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, ok := availablePaths[cleanMpath]; ok {
return false, nil
}
if mp, ok := disabledPaths[cleanMpath]; ok {
availablePaths[cleanMpath] = mp
mfs.ios.AddMpath(cleanMpath, mp.FileSystem)
delete(disabledPaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
return true, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Disable disables an available mountpath. disabled is set to true if
// mountpath has been moved from available to disabled and exists is set to
// true if such mountpath even exists.
func (mfs *MountedFS) Disable(mpath string) (disabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mpathInfo, ok := availablePaths[cleanMpath]; ok {
disabledPaths[cleanMpath] = mpathInfo
mfs.ios.RemoveMpath(cleanMpath)
delete(availablePaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
if l := len(availablePaths); l == 0 {
glog.Errorf("disabled the last available mountpath %s", mpathInfo)
} else {
glog.Infof("disabled mountpath %s (%d remain(s) active)", mpathInfo, l)
}
go mpathInfo.evictLomCache()
return true, nil
}
if _, ok := disabledPaths[cleanMpath]; ok {
return false, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Returns number of available mountpaths
func (mfs *MountedFS) NumAvail() int {
availablePaths := (*MPI)(mfs.available.Load())
return len(*availablePaths)
}
// Mountpaths returns both available and disabled mountpaths.
func (mfs *MountedFS) Get() (MPI, MPI) {
var (
availablePaths = (*MPI)(mfs.available.Load())
disabledPaths = (*MPI)(mfs.disabled.Load())
)
if availablePaths == nil {
tmp := make(MPI, 10)
availablePaths = &tmp
}
if disabledPaths == nil {
tmp := make(MPI, 10)
disabledPaths = &tmp
}
return *availablePaths, *disabledPaths
}
// DisableFsIDCheck disables fsid checking when adding new mountpath
func (mfs *MountedFS) DisableFsIDCheck() { mfs.checkFsID = false }
func (mfs *MountedFS) CreateBuckets(op string, bcks ...cmn.Bck) (errs []error) {
var (
availablePaths, _ = mfs.Get()
totalDirs = len(availablePaths) * len(bcks) * len(CSM.RegisteredContentTypes)
totalCreatedDirs int
)
for _, mi := range availablePaths {
for _, bck := range bcks {
num, err := mi.createBckDirs(bck)
if err != nil {
errs = append(errs, err)
} else {
totalCreatedDirs += num
}
}
}
if errs == nil && totalCreatedDirs != totalDirs {
errs = append(errs, fmt.Errorf("failed to create %d out of %d buckets' directories: %v",
totalDirs-totalCreatedDirs, totalDirs, bcks))
}
if errs == nil && glog.FastV(4, glog.SmoduleFS) {
glog.Infof("%s(create bucket dirs): %v, num=%d", op, bcks, totalDirs)
}
return
}
func (mfs *MountedFS) DestroyBuckets(op string, bcks ...cmn.Bck) error {
const destroyStr = "destroy-ais-bucket-dir"
var (
availablePaths, _ = mfs.Get()
totalDirs = len(availablePaths) * len(bcks)
totalDestroyedDirs = 0
)
for _, mpathInfo := range availablePaths {
for _, bck := range bcks {
dir := mpathInfo.MakePathBck(bck)
if err := mpathInfo.FastRemoveDir(dir); err != nil {
glog.Errorf("%s: failed to %s (dir: %q, err: %v)", op, destroyStr, dir, err)
} else {
totalDestroyedDirs++
}
}
}
if totalDestroyedDirs != totalDirs {
return fmt.Errorf("failed to destroy %d out of %d buckets' directories: %v", totalDirs-totalDestroyedDirs, totalDirs, bcks)
}
if glog.FastV(4, glog.SmoduleFS) {
glog.Infof("%s: %s (buckets %v, num dirs %d)", op, destroyStr, bcks, totalDirs)
}
return nil
}
func (mfs *MountedFS) FetchFSInfo() cmn.FSInfo {
var (
fsInfo = cmn.FSInfo{}
availablePaths, _ = mfs.Get()
visitedFS = make(map[syscall.Fsid]struct{})
)
for mpath := range availablePaths {
statfs := &syscall.Statfs_t{}
if err := syscall.Statfs(mpath, statfs); err != nil {
glog.Errorf("Failed to statfs mp %q, err: %v", mpath, err)
continue
}
if _, ok := visitedFS[statfs.Fsid]; ok {
continue
}
visitedFS[statfs.Fsid] = struct{}{}
fsInfo.FSUsed += (statfs.Blocks - statfs.Bavail) * uint64(statfs.Bsize)
fsInfo.FSCapacity += statfs.Blocks * uint64(statfs.Bsize)
}
if fsInfo.FSCapacity > 0 {
// FIXME: assuming all mountpaths have approx. the same capacity
fsInfo.PctFSUsed = float64(fsInfo.FSUsed*100) / float64(fsInfo.FSCapacity)
}
return fsInfo
}
func (mfs *MountedFS) RenameBucketDirs(bckFrom, bckTo cmn.Bck) (err error) {
availablePaths, _ := mfs.Get()
renamed := make([]*MountpathInfo, 0, len(availablePaths))
for _, mpathInfo := range availablePaths {
fromPath := mpathInfo.MakePathBck(bckFrom)
toPath := mpathInfo.MakePathBck(bckTo)
// os.Rename fails when renaming to a directory which already exists.
// We should remove destination bucket directory before rename. It's reasonable to do so
// as all targets agreed to rename and rename was committed in BMD.
os.RemoveAll(toPath)
if err = os.Rename(fromPath, toPath); err != nil {
break
}
renamed = append(renamed, mpathInfo)
}
if err == nil {
return
}
for _, mpathInfo := range renamed {
fromPath := mpathInfo.MakePathBck(bckTo)
toPath := mpathInfo.MakePathBck(bckFrom)
if erd := os.Rename(fromPath, toPath); erd != nil {
glog.Error(erd)
}
}
return
}
func (mi *MountpathInfo) CreateMissingBckDirs(bck cmn.Bck) (err error) {
for contentType := range CSM.RegisteredContentTypes {
dir := mi.MakePathCT(bck, contentType)
if err = Access(dir); err == nil {
continue
}
if err = cmn.CreateDir(dir); err != nil {
return
}
}
return
}
//
// private methods
//
func (mfs *MountedFS) updatePaths(available, disabled MPI) {
mfs.available.Store(unsafe.Pointer(&available))
mfs.disabled.Store(unsafe.Pointer(&disabled))
mfs.xattrMpath.Store(unsafe.Pointer(nil))
}
// Creates all CT directories for a given (mountpath, bck)
// NOTE handling of empty dirs
func (mi *MountpathInfo) createBckDirs(bck cmn.Bck) (num int, err error) {
for contentType := range CSM.RegisteredContentTypes {
dir := mi.MakePathCT(bck, contentType)
if err := Access(dir); err == nil {
names, empty, errEmpty := IsDirEmpty(dir)
if errEmpty != nil {
return num, errEmpty
}
if !empty {
err = fmt.Errorf("bucket %s: directory %s already exists and is not empty (%v...)", bck, dir, names)
if contentType != WorkfileType {
return num, err
}
glog.Warning(err)
}
} else if err := cmn.CreateDir(dir); err != nil {
return num, fmt.Errorf("bucket %s: failed to create directory %s: %w", bck, dir, err)
}
num++
}
return num, nil
}
// mountpathsCopy returns a shallow copy of current mountpaths
func (mfs *MountedFS) mountpathsCopy() (MPI, MPI) {
availablePaths, disabledPaths := mfs.Get()
availableCopy := make(MPI, len(availablePaths))
disabledCopy := make(MPI, len(availablePaths))
for mpath, mpathInfo := range availablePaths {
availableCopy[mpath] = mpathInfo
}
for mpath, mpathInfo := range disabledPaths {
disabledCopy[mpath] = mpathInfo
}
return availableCopy, disabledCopy
}
func (mfs *MountedFS) String() string {
availablePaths, _ := mfs.Get()
s := "\n"
for _, mpathInfo := range availablePaths {
s += mpathInfo.String() + "\n"
}
return strings.TrimSuffix(s, "\n")
}
// Select a "random" mountpath using HRW algorithm to store/load bucket metadata
func (mfs *MountedFS) MpathForMetadata() (mpath *MountpathInfo, err error) {
// fast path
mp := mfs.xattrMpath.Load()
if mp != nil {
return (*MountpathInfo)(mp), nil
}
// slow path
avail := (*MPI)(mfs.available.Load())
if len(*avail) == 0 {
return nil, fmt.Errorf("no mountpath available")
}
maxVal := uint64(0)
for _, m := range *avail {
if m.PathDigest > maxVal {
maxVal = m.PathDigest
mpath = m
}
}
if mpath == nil {
return nil, fmt.Errorf("failed to choose a mountpath")
}
if glog.FastV(4, glog.SmoduleFS) {
glog.Infof("Mountpath %q selected for storing BMD in xattrs", mpath.Path)
}
mfs.xattrMpath.Store(unsafe.Pointer(mpath))
return mpath, nil
} | // There are at least two cases when this might not be true:
// 1. `nonExistingDir` is leftover after target crash.
// 2. Mountpath was removed and then added again. The counter
// will be reset and if we will be removing dirs quickly enough | random_line_split |
mountfs.go | // Package fs provides mountpath and FQN abstractions and methods to resolve/map stored content
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
package fs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"unsafe"
"github.com/NVIDIA/aistore/3rdparty/atomic"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/ios"
"github.com/OneOfOne/xxhash"
)
const (
pkgName = "fs"
uQuantum = 10 // each GET adds a "quantum" of utilization to the mountpath
)
// mountpath lifecycle-change enum
const (
Add = "add-mp"
Remove = "remove-mp"
Enable = "enable-mp"
Disable = "disable-mp"
)
// globals
var (
Mountpaths *MountedFS
)
// Terminology:
// - a mountpath is equivalent to (configurable) fspath - both terms are used interchangeably;
// - each mountpath is, simply, a local directory that is serviced by a local filesystem;
// - there's a 1-to-1 relationship between a mountpath and a local filesystem
// (different mountpaths map onto different filesystems, and vise versa);
// - mountpaths of the form <filesystem-mountpoint>/a/b/c are supported.
type (
MPI map[string]*MountpathInfo
PathRunGroup interface {
Reg(r PathRunner)
Unreg(r PathRunner)
}
// As a rule, running xactions are aborted and restarted on any mountpath change.
// But for a few xactions it can be too harsh. E.g, aborting and restarting
// `download` xaction results in waste of time and network traffic to
// redownload objects. These xactions should subscribe to mountpath changes
// as a `PathRunner`s to `PathRunGroup` events and adapt on the fly.
PathRunner interface {
cmn.Runner
Name() string
ReqAddMountpath(mpath string)
ReqRemoveMountpath(mpath string)
ReqEnableMountpath(mpath string)
ReqDisableMountpath(mpath string)
}
MountpathInfo struct {
Path string // Cleaned OrigPath
OrigPath string // As entered by the user, must be used for logging / returning errors
Fsid syscall.Fsid
FileSystem string
PathDigest uint64
// atomic, only increasing counter to prevent name conflicts
// see: FastRemoveDir method
removeDirCounter atomic.Uint64
// LOM caches
lomCaches cmn.MultiSyncMap
}
// MountedFS holds all mountpaths for the target.
MountedFS struct {
mu sync.Mutex
// fsIDs is set in which we store fsids of mountpaths. This allows for
// determining if there are any duplications of file system - we allow
// only one mountpath per file system.
fsIDs map[syscall.Fsid]string
// checkFsID determines if we should actually check FSID when adding new
// mountpath. By default it is set to true.
checkFsID bool
// Available mountpaths - mountpaths which are used to store the data.
available atomic.Pointer
// Disabled mountpaths - mountpaths which for some reason did not pass
// the health check and cannot be used for a moment.
disabled atomic.Pointer
// Cached pointer to mountpathInfo used to store BMD
xattrMpath atomic.Pointer
// Iostats for the available mountpaths
ios ios.IOStater
}
ChangeReq struct {
Action string // MountPath action enum (above)
Path string // path
}
)
func MountpathAdd(p string) ChangeReq { return ChangeReq{Action: Add, Path: p} }
func MountpathRem(p string) ChangeReq { return ChangeReq{Action: Remove, Path: p} }
func MountpathEnb(p string) ChangeReq { return ChangeReq{Action: Enable, Path: p} }
func MountpathDis(p string) ChangeReq { return ChangeReq{Action: Disable, Path: p} }
//
// MountpathInfo
//
func newMountpath(cleanPath, origPath string, fsid syscall.Fsid, fs string) *MountpathInfo {
mi := &MountpathInfo{
Path: cleanPath,
OrigPath: origPath,
Fsid: fsid,
FileSystem: fs,
PathDigest: xxhash.ChecksumString64S(cleanPath, cmn.MLCG32),
}
mi.removeDirCounter.Store(uint64(time.Now().UnixNano()))
return mi
}
func (mi *MountpathInfo) LomCache(idx int) *sync.Map { return mi.lomCaches.Get(idx) }
func (mi *MountpathInfo) evictLomCache() {
for idx := range mi.lomCaches.M {
cache := mi.LomCache(idx)
cache.Range(func(key interface{}, _ interface{}) bool {
cache.Delete(key)
return true
})
}
}
// FastRemoveDir removes directory in steps:
// 1. Synchronously gets temporary directory name
// 2. Synchronously renames old folder to temporary directory
// 3. Asynchronously deletes temporary directory
func (mi *MountpathInfo) FastRemoveDir(dir string) error {
// `dir` will be renamed to non-existing bucket. Then we will
// try to remove it asynchronously. In case of power cycle we expect that
// LRU will take care of removing the rest of the bucket.
var (
counter = mi.removeDirCounter.Inc()
nonExistingDir = fmt.Sprintf("$removing-%d", counter)
tmpDir = filepath.Join(mi.Path, nonExistingDir)
)
// loose assumption: removing something which doesn't exist is fine
if err := Access(dir); err != nil && os.IsNotExist(err) {
return nil
}
if err := cmn.CreateDir(filepath.Dir(tmpDir)); err != nil {
return err
}
if err := os.Rename(dir, tmpDir); err != nil {
if os.IsExist(err) {
// Slow path - `tmpDir` (or rather `nonExistingDir`) for some reason already exists...
//
// Even though `nonExistingDir` should not exist we cannot fully be sure.
// There are at least two cases when this might not be true:
// 1. `nonExistingDir` is leftover after target crash.
// 2. Mountpath was removed and then added again. The counter
// will be reset and if we will be removing dirs quickly enough
// the counter will catch up with counter of the previous mountpath.
// If the directories from the previous mountpath were not yet removed
// (slow disk or filesystem) we can end up with the same name.
// For now we try to fight this with randomizing the initial counter.
// In background remove leftover directory.
go func() {
glog.Errorf("%s already exists, removing...", tmpDir)
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("removing leftover %s failed, err: %v", tmpDir, err)
}
}()
// This time generate fully unique name...
tmpDir, err = ioutil.TempDir(mi.Path, nonExistingDir)
if err != nil {
return err
}
// Retry renaming - hopefully it should succeed now.
err = os.Rename(dir, tmpDir)
}
// Someone removed dir before os.Rename, nothing more to do.
if os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
}
// Schedule removing temporary directory which is our old `dir`
go func() {
// TODO: in the future, the actual operation must be delegated to LRU
// that'd take of care of it while pacing itself with regards to the
// current disk utilization and space availability.
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("RemoveAll for %q failed with %v", tmpDir, err)
}
}()
return nil
}
func (mi *MountpathInfo) IsIdle(config *cmn.Config, timestamp time.Time) bool {
if config == nil {
config = cmn.GCO.Get()
}
curr := Mountpaths.ios.GetMpathUtil(mi.Path, timestamp)
return curr >= 0 && curr < config.Disk.DiskUtilLowWM
}
func (mi *MountpathInfo) String() string {
return fmt.Sprintf("mp[%s, fs=%s]", mi.Path, mi.FileSystem)
}
///////////////
// make-path //
///////////////
func (mi *MountpathInfo) makePathBuf(bck cmn.Bck, contentType string, extra int) (buf []byte) {
var (
nsLen, bckNameLen, ctLen int
provLen = 1 + 1 + len(bck.Provider)
)
if !bck.Ns.IsGlobal() {
nsLen = 1
if bck.Ns.IsRemote() {
nsLen += 1 + len(bck.Ns.UUID)
}
nsLen += 1 + len(bck.Ns.Name)
}
if bck.Name != "" {
bckNameLen = 1 + len(bck.Name)
}
if contentType != "" {
cmn.Assert(bckNameLen > 0)
cmn.Assert(len(contentType) == contentTypeLen)
ctLen = 1 + 1 + contentTypeLen
}
buf = make([]byte, 0, len(mi.Path)+provLen+nsLen+bckNameLen+ctLen+extra)
buf = append(buf, mi.Path...)
buf = append(buf, filepath.Separator, prefProvider)
buf = append(buf, bck.Provider...)
if nsLen > 0 {
buf = append(buf, filepath.Separator)
if bck.Ns.IsRemote() {
buf = append(buf, prefNsUUID)
buf = append(buf, bck.Ns.UUID...)
}
buf = append(buf, prefNsName)
buf = append(buf, bck.Ns.Name...)
}
if bckNameLen > 0 {
buf = append(buf, filepath.Separator)
buf = append(buf, bck.Name...)
}
if ctLen > 0 {
buf = append(buf, filepath.Separator, prefCT)
buf = append(buf, contentType...)
}
return
}
func (mi *MountpathInfo) MakePathBck(bck cmn.Bck) string {
buf := mi.makePathBuf(bck, "", 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathCT(bck cmn.Bck, contentType string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "")
buf := mi.makePathBuf(bck, contentType, 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathFQN(bck cmn.Bck, contentType, objName string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "" && objName != "")
buf := mi.makePathBuf(bck, contentType, 1+len(objName))
buf = append(buf, filepath.Separator)
buf = append(buf, objName...)
return *(*string)(unsafe.Pointer(&buf))
}
//
// MountedFS
//
func (mfs *MountedFS) LoadBalanceGET(objfqn, objmpath string, copies MPI, now time.Time) (fqn string) {
var (
mpathUtils, mpathRRs = mfs.ios.GetAllMpathUtils(now)
objutil, ok = mpathUtils[objmpath]
rr, _ = mpathRRs[objmpath] // GET round-robin counter (zeros out every iostats refresh i-val)
util = objutil
r = rr
)
fqn = objfqn
if !ok {
cmn.DassertMsg(false, objmpath, pkgName)
return
}
for copyfqn, copympi := range copies {
var (
u int64
c, rrcnt int32
)
if u, ok = mpathUtils[copympi.Path]; !ok {
continue
}
if r, ok = mpathRRs[copympi.Path]; !ok {
if u < util {
fqn, util, rr = copyfqn, u, r
}
continue
}
c = r.Load()
if rr != nil {
rrcnt = rr.Load()
}
if u < util && c <= rrcnt { // the obvious choice
fqn, util, rr = copyfqn, u, r
continue
}
if u+int64(c)*uQuantum < util+int64(rrcnt)*uQuantum { // heuristics - make uQuantum configurable?
fqn, util, rr = copyfqn, u, r
}
}
// NOTE: the counter could've been already inc-ed
// could keep track of the second best and use CAS to recerve-inc and compare
// can wait though
if rr != nil {
rr.Inc()
}
return
}
// ios delegators
func (mfs *MountedFS) GetMpathUtil(mpath string, now time.Time) int64 {
return mfs.ios.GetMpathUtil(mpath, now)
}
func (mfs *MountedFS) GetAllMpathUtils(now time.Time) (utils map[string]int64) {
utils, _ = mfs.ios.GetAllMpathUtils(now)
return
}
func (mfs *MountedFS) LogAppend(lines []string) []string {
return mfs.ios.LogAppend(lines)
}
func (mfs *MountedFS) GetSelectedDiskStats() (m map[string]*ios.SelectedDiskStats) {
return mfs.ios.GetSelectedDiskStats()
}
// Init prepares and adds provided mountpaths. Also validates the mountpaths
// for duplication and availability.
func (mfs *MountedFS) Init(fsPaths []string) error {
if len(fsPaths) == 0 {
// (usability) not to clutter the log with backtraces when starting up and validating config
return fmt.Errorf("FATAL: no fspaths - see README => Configuration and/or fspaths section in the config.sh")
}
for _, path := range fsPaths |
return nil
}
// Add adds new mountpath to the target's mountpaths.
// FIXME: unify error messages for original and clean mountpath
func (mfs *MountedFS) Add(mpath string) error {
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
if err := Access(cleanMpath); err != nil {
return fmt.Errorf("fspath %q %s, err: %v", mpath, cmn.DoesNotExist, err)
}
statfs := syscall.Statfs_t{}
if err := syscall.Statfs(cleanMpath, &statfs); err != nil {
return fmt.Errorf("cannot statfs fspath %q, err: %w", mpath, err)
}
fs, err := fqn2fsAtStartup(cleanMpath)
if err != nil {
return fmt.Errorf("cannot get filesystem: %v", err)
}
mp := newMountpath(cleanMpath, mpath, statfs.Fsid, fs)
mfs.mu.Lock()
defer mfs.mu.Unlock()
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, exists := availablePaths[mp.Path]; exists {
return fmt.Errorf("tried to add already registered mountpath: %v", mp.Path)
}
if existingPath, exists := mfs.fsIDs[mp.Fsid]; exists && mfs.checkFsID {
return fmt.Errorf("tried to add path %v but same fsid (%v) was already registered by %v", mpath, mp.Fsid, existingPath)
}
mfs.ios.AddMpath(mp.Path, mp.FileSystem)
availablePaths[mp.Path] = mp
mfs.fsIDs[mp.Fsid] = cleanMpath
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Remove removes mountpaths from the target's mountpaths. It searches
// for the mountpath in available and disabled (if the mountpath is not found
// in available).
func (mfs *MountedFS) Remove(mpath string) error {
var (
mp *MountpathInfo
exists bool
)
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mp, exists = availablePaths[cleanMpath]; !exists {
if mp, exists = disabledPaths[cleanMpath]; !exists {
return fmt.Errorf("tried to remove non-existing mountpath: %v", mpath)
}
delete(disabledPaths, cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
delete(availablePaths, cleanMpath)
mfs.ios.RemoveMpath(cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
go mp.evictLomCache()
if l := len(availablePaths); l == 0 {
glog.Errorf("removed the last available mountpath %s", mp)
} else {
glog.Infof("removed mountpath %s (%d remain(s) active)", mp, l)
}
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Enable enables previously disabled mountpath. enabled is set to
// true if mountpath has been moved from disabled to available and exists is
// set to true if such mountpath even exists.
func (mfs *MountedFS) Enable(mpath string) (enabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, ok := availablePaths[cleanMpath]; ok {
return false, nil
}
if mp, ok := disabledPaths[cleanMpath]; ok {
availablePaths[cleanMpath] = mp
mfs.ios.AddMpath(cleanMpath, mp.FileSystem)
delete(disabledPaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
return true, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Disable disables an available mountpath. disabled is set to true if
// mountpath has been moved from available to disabled and exists is set to
// true if such mountpath even exists.
func (mfs *MountedFS) Disable(mpath string) (disabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mpathInfo, ok := availablePaths[cleanMpath]; ok {
disabledPaths[cleanMpath] = mpathInfo
mfs.ios.RemoveMpath(cleanMpath)
delete(availablePaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
if l := len(availablePaths); l == 0 {
glog.Errorf("disabled the last available mountpath %s", mpathInfo)
} else {
glog.Infof("disabled mountpath %s (%d remain(s) active)", mpathInfo, l)
}
go mpathInfo.evictLomCache()
return true, nil
}
if _, ok := disabledPaths[cleanMpath]; ok {
return false, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Returns number of available mountpaths
func (mfs *MountedFS) NumAvail() int {
availablePaths := (*MPI)(mfs.available.Load())
return len(*availablePaths)
}
// Mountpaths returns both available and disabled mountpaths.
func (mfs *MountedFS) Get() (MPI, MPI) {
var (
availablePaths = (*MPI)(mfs.available.Load())
disabledPaths = (*MPI)(mfs.disabled.Load())
)
if availablePaths == nil {
tmp := make(MPI, 10)
availablePaths = &tmp
}
if disabledPaths == nil {
tmp := make(MPI, 10)
disabledPaths = &tmp
}
return *availablePaths, *disabledPaths
}
// DisableFsIDCheck disables fsid checking when adding new mountpath
func (mfs *MountedFS) DisableFsIDCheck() { mfs.checkFsID = false }
func (mfs *MountedFS) CreateBuckets(op string, bcks ...cmn.Bck) (errs []error) {
var (
availablePaths, _ = mfs.Get()
totalDirs = len(availablePaths) * len(bcks) * len(CSM.RegisteredContentTypes)
totalCreatedDirs int
)
for _, mi := range availablePaths {
for _, bck := range bcks {
num, err := mi.createBckDirs(bck)
if err != nil {
errs = append(errs, err)
} else {
totalCreatedDirs += num
}
}
}
if errs == nil && totalCreatedDirs != totalDirs {
errs = append(errs, fmt.Errorf("failed to create %d out of %d buckets' directories: %v",
totalDirs-totalCreatedDirs, totalDirs, bcks))
}
if errs == nil && glog.FastV(4, glog.SmoduleFS) {
glog.Infof("%s(create bucket dirs): %v, num=%d", op, bcks, totalDirs)
}
return
}
func (mfs *MountedFS) DestroyBuckets(op string, bcks ...cmn.Bck) error {
const destroyStr = "destroy-ais-bucket-dir"
var (
availablePaths, _ = mfs.Get()
totalDirs = len(availablePaths) * len(bcks)
totalDestroyedDirs = 0
)
for _, mpathInfo := range availablePaths {
for _, bck := range bcks {
dir := mpathInfo.MakePathBck(bck)
if err := mpathInfo.FastRemoveDir(dir); err != nil {
glog.Errorf("%s: failed to %s (dir: %q, err: %v)", op, destroyStr, dir, err)
} else {
totalDestroyedDirs++
}
}
}
if totalDestroyedDirs != totalDirs {
return fmt.Errorf("failed to destroy %d out of %d buckets' directories: %v", totalDirs-totalDestroyedDirs, totalDirs, bcks)
}
if glog.FastV(4, glog.SmoduleFS) {
glog.Infof("%s: %s (buckets %v, num dirs %d)", op, destroyStr, bcks, totalDirs)
}
return nil
}
func (mfs *MountedFS) FetchFSInfo() cmn.FSInfo {
var (
fsInfo = cmn.FSInfo{}
availablePaths, _ = mfs.Get()
visitedFS = make(map[syscall.Fsid]struct{})
)
for mpath := range availablePaths {
statfs := &syscall.Statfs_t{}
if err := syscall.Statfs(mpath, statfs); err != nil {
glog.Errorf("Failed to statfs mp %q, err: %v", mpath, err)
continue
}
if _, ok := visitedFS[statfs.Fsid]; ok {
continue
}
visitedFS[statfs.Fsid] = struct{}{}
fsInfo.FSUsed += (statfs.Blocks - statfs.Bavail) * uint64(statfs.Bsize)
fsInfo.FSCapacity += statfs.Blocks * uint64(statfs.Bsize)
}
if fsInfo.FSCapacity > 0 {
// FIXME: assuming all mountpaths have approx. the same capacity
fsInfo.PctFSUsed = float64(fsInfo.FSUsed*100) / float64(fsInfo.FSCapacity)
}
return fsInfo
}
func (mfs *MountedFS) RenameBucketDirs(bckFrom, bckTo cmn.Bck) (err error) {
availablePaths, _ := mfs.Get()
renamed := make([]*MountpathInfo, 0, len(availablePaths))
for _, mpathInfo := range availablePaths {
fromPath := mpathInfo.MakePathBck(bckFrom)
toPath := mpathInfo.MakePathBck(bckTo)
// os.Rename fails when renaming to a directory which already exists.
// We should remove destination bucket directory before rename. It's reasonable to do so
// as all targets agreed to rename and rename was committed in BMD.
os.RemoveAll(toPath)
if err = os.Rename(fromPath, toPath); err != nil {
break
}
renamed = append(renamed, mpathInfo)
}
if err == nil {
return
}
for _, mpathInfo := range renamed {
fromPath := mpathInfo.MakePathBck(bckTo)
toPath := mpathInfo.MakePathBck(bckFrom)
if erd := os.Rename(fromPath, toPath); erd != nil {
glog.Error(erd)
}
}
return
}
func (mi *MountpathInfo) CreateMissingBckDirs(bck cmn.Bck) (err error) {
for contentType := range CSM.RegisteredContentTypes {
dir := mi.MakePathCT(bck, contentType)
if err = Access(dir); err == nil {
continue
}
if err = cmn.CreateDir(dir); err != nil {
return
}
}
return
}
//
// private methods
//
func (mfs *MountedFS) updatePaths(available, disabled MPI) {
mfs.available.Store(unsafe.Pointer(&available))
mfs.disabled.Store(unsafe.Pointer(&disabled))
mfs.xattrMpath.Store(unsafe.Pointer(nil))
}
// Creates all CT directories for a given (mountpath, bck)
// NOTE handling of empty dirs
func (mi *MountpathInfo) createBckDirs(bck cmn.Bck) (num int, err error) {
for contentType := range CSM.RegisteredContentTypes {
dir := mi.MakePathCT(bck, contentType)
if err := Access(dir); err == nil {
names, empty, errEmpty := IsDirEmpty(dir)
if errEmpty != nil {
return num, errEmpty
}
if !empty {
err = fmt.Errorf("bucket %s: directory %s already exists and is not empty (%v...)", bck, dir, names)
if contentType != WorkfileType {
return num, err
}
glog.Warning(err)
}
} else if err := cmn.CreateDir(dir); err != nil {
return num, fmt.Errorf("bucket %s: failed to create directory %s: %w", bck, dir, err)
}
num++
}
return num, nil
}
// mountpathsCopy returns a shallow copy of current mountpaths
func (mfs *MountedFS) mountpathsCopy() (MPI, MPI) {
availablePaths, disabledPaths := mfs.Get()
availableCopy := make(MPI, len(availablePaths))
disabledCopy := make(MPI, len(availablePaths))
for mpath, mpathInfo := range availablePaths {
availableCopy[mpath] = mpathInfo
}
for mpath, mpathInfo := range disabledPaths {
disabledCopy[mpath] = mpathInfo
}
return availableCopy, disabledCopy
}
func (mfs *MountedFS) String() string {
availablePaths, _ := mfs.Get()
s := "\n"
for _, mpathInfo := range availablePaths {
s += mpathInfo.String() + "\n"
}
return strings.TrimSuffix(s, "\n")
}
// Select a "random" mountpath using HRW algorithm to store/load bucket metadata
func (mfs *MountedFS) MpathForMetadata() (mpath *MountpathInfo, err error) {
// fast path
mp := mfs.xattrMpath.Load()
if mp != nil {
return (*MountpathInfo)(mp), nil
}
// slow path
avail := (*MPI)(mfs.available.Load())
if len(*avail) == 0 {
return nil, fmt.Errorf("no mountpath available")
}
maxVal := uint64(0)
for _, m := range *avail {
if m.PathDigest > maxVal {
maxVal = m.PathDigest
mpath = m
}
}
if mpath == nil {
return nil, fmt.Errorf("failed to choose a mountpath")
}
if glog.FastV(4, glog.SmoduleFS) {
glog.Infof("Mountpath %q selected for storing BMD in xattrs", mpath.Path)
}
mfs.xattrMpath.Store(unsafe.Pointer(mpath))
return mpath, nil
}
| {
if err := mfs.Add(path); err != nil {
return err
}
} | conditional_block |
mountfs.go | // Package fs provides mountpath and FQN abstractions and methods to resolve/map stored content
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
package fs
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"unsafe"
"github.com/NVIDIA/aistore/3rdparty/atomic"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/ios"
"github.com/OneOfOne/xxhash"
)
const (
pkgName = "fs"
uQuantum = 10 // each GET adds a "quantum" of utilization to the mountpath
)
// mountpath lifecycle-change enum
const (
Add = "add-mp"
Remove = "remove-mp"
Enable = "enable-mp"
Disable = "disable-mp"
)
// globals
var (
Mountpaths *MountedFS
)
// Terminology:
// - a mountpath is equivalent to (configurable) fspath - both terms are used interchangeably;
// - each mountpath is, simply, a local directory that is serviced by a local filesystem;
// - there's a 1-to-1 relationship between a mountpath and a local filesystem
// (different mountpaths map onto different filesystems, and vise versa);
// - mountpaths of the form <filesystem-mountpoint>/a/b/c are supported.
type (
MPI map[string]*MountpathInfo
PathRunGroup interface {
Reg(r PathRunner)
Unreg(r PathRunner)
}
// As a rule, running xactions are aborted and restarted on any mountpath change.
// But for a few xactions it can be too harsh. E.g, aborting and restarting
// `download` xaction results in waste of time and network traffic to
// redownload objects. These xactions should subscribe to mountpath changes
// as a `PathRunner`s to `PathRunGroup` events and adapt on the fly.
PathRunner interface {
cmn.Runner
Name() string
ReqAddMountpath(mpath string)
ReqRemoveMountpath(mpath string)
ReqEnableMountpath(mpath string)
ReqDisableMountpath(mpath string)
}
MountpathInfo struct {
Path string // Cleaned OrigPath
OrigPath string // As entered by the user, must be used for logging / returning errors
Fsid syscall.Fsid
FileSystem string
PathDigest uint64
// atomic, only increasing counter to prevent name conflicts
// see: FastRemoveDir method
removeDirCounter atomic.Uint64
// LOM caches
lomCaches cmn.MultiSyncMap
}
// MountedFS holds all mountpaths for the target.
MountedFS struct {
mu sync.Mutex
// fsIDs is set in which we store fsids of mountpaths. This allows for
// determining if there are any duplications of file system - we allow
// only one mountpath per file system.
fsIDs map[syscall.Fsid]string
// checkFsID determines if we should actually check FSID when adding new
// mountpath. By default it is set to true.
checkFsID bool
// Available mountpaths - mountpaths which are used to store the data.
available atomic.Pointer
// Disabled mountpaths - mountpaths which for some reason did not pass
// the health check and cannot be used for a moment.
disabled atomic.Pointer
// Cached pointer to mountpathInfo used to store BMD
xattrMpath atomic.Pointer
// Iostats for the available mountpaths
ios ios.IOStater
}
ChangeReq struct {
Action string // MountPath action enum (above)
Path string // path
}
)
func MountpathAdd(p string) ChangeReq { return ChangeReq{Action: Add, Path: p} }
func MountpathRem(p string) ChangeReq { return ChangeReq{Action: Remove, Path: p} }
func MountpathEnb(p string) ChangeReq { return ChangeReq{Action: Enable, Path: p} }
func MountpathDis(p string) ChangeReq { return ChangeReq{Action: Disable, Path: p} }
//
// MountpathInfo
//
func newMountpath(cleanPath, origPath string, fsid syscall.Fsid, fs string) *MountpathInfo {
mi := &MountpathInfo{
Path: cleanPath,
OrigPath: origPath,
Fsid: fsid,
FileSystem: fs,
PathDigest: xxhash.ChecksumString64S(cleanPath, cmn.MLCG32),
}
mi.removeDirCounter.Store(uint64(time.Now().UnixNano()))
return mi
}
func (mi *MountpathInfo) LomCache(idx int) *sync.Map { return mi.lomCaches.Get(idx) }
func (mi *MountpathInfo) evictLomCache() {
for idx := range mi.lomCaches.M {
cache := mi.LomCache(idx)
cache.Range(func(key interface{}, _ interface{}) bool {
cache.Delete(key)
return true
})
}
}
// FastRemoveDir removes directory in steps:
// 1. Synchronously gets temporary directory name
// 2. Synchronously renames old folder to temporary directory
// 3. Asynchronously deletes temporary directory
func (mi *MountpathInfo) FastRemoveDir(dir string) error {
// `dir` will be renamed to non-existing bucket. Then we will
// try to remove it asynchronously. In case of power cycle we expect that
// LRU will take care of removing the rest of the bucket.
var (
counter = mi.removeDirCounter.Inc()
nonExistingDir = fmt.Sprintf("$removing-%d", counter)
tmpDir = filepath.Join(mi.Path, nonExistingDir)
)
// loose assumption: removing something which doesn't exist is fine
if err := Access(dir); err != nil && os.IsNotExist(err) {
return nil
}
if err := cmn.CreateDir(filepath.Dir(tmpDir)); err != nil {
return err
}
if err := os.Rename(dir, tmpDir); err != nil {
if os.IsExist(err) {
// Slow path - `tmpDir` (or rather `nonExistingDir`) for some reason already exists...
//
// Even though `nonExistingDir` should not exist we cannot fully be sure.
// There are at least two cases when this might not be true:
// 1. `nonExistingDir` is leftover after target crash.
// 2. Mountpath was removed and then added again. The counter
// will be reset and if we will be removing dirs quickly enough
// the counter will catch up with counter of the previous mountpath.
// If the directories from the previous mountpath were not yet removed
// (slow disk or filesystem) we can end up with the same name.
// For now we try to fight this with randomizing the initial counter.
// In background remove leftover directory.
go func() {
glog.Errorf("%s already exists, removing...", tmpDir)
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("removing leftover %s failed, err: %v", tmpDir, err)
}
}()
// This time generate fully unique name...
tmpDir, err = ioutil.TempDir(mi.Path, nonExistingDir)
if err != nil {
return err
}
// Retry renaming - hopefully it should succeed now.
err = os.Rename(dir, tmpDir)
}
// Someone removed dir before os.Rename, nothing more to do.
if os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
}
// Schedule removing temporary directory which is our old `dir`
go func() {
// TODO: in the future, the actual operation must be delegated to LRU
// that'd take of care of it while pacing itself with regards to the
// current disk utilization and space availability.
if err := os.RemoveAll(tmpDir); err != nil {
glog.Errorf("RemoveAll for %q failed with %v", tmpDir, err)
}
}()
return nil
}
func (mi *MountpathInfo) IsIdle(config *cmn.Config, timestamp time.Time) bool {
if config == nil {
config = cmn.GCO.Get()
}
curr := Mountpaths.ios.GetMpathUtil(mi.Path, timestamp)
return curr >= 0 && curr < config.Disk.DiskUtilLowWM
}
func (mi *MountpathInfo) String() string {
return fmt.Sprintf("mp[%s, fs=%s]", mi.Path, mi.FileSystem)
}
///////////////
// make-path //
///////////////
func (mi *MountpathInfo) makePathBuf(bck cmn.Bck, contentType string, extra int) (buf []byte) {
var (
nsLen, bckNameLen, ctLen int
provLen = 1 + 1 + len(bck.Provider)
)
if !bck.Ns.IsGlobal() {
nsLen = 1
if bck.Ns.IsRemote() {
nsLen += 1 + len(bck.Ns.UUID)
}
nsLen += 1 + len(bck.Ns.Name)
}
if bck.Name != "" {
bckNameLen = 1 + len(bck.Name)
}
if contentType != "" {
cmn.Assert(bckNameLen > 0)
cmn.Assert(len(contentType) == contentTypeLen)
ctLen = 1 + 1 + contentTypeLen
}
buf = make([]byte, 0, len(mi.Path)+provLen+nsLen+bckNameLen+ctLen+extra)
buf = append(buf, mi.Path...)
buf = append(buf, filepath.Separator, prefProvider)
buf = append(buf, bck.Provider...)
if nsLen > 0 {
buf = append(buf, filepath.Separator)
if bck.Ns.IsRemote() {
buf = append(buf, prefNsUUID)
buf = append(buf, bck.Ns.UUID...)
}
buf = append(buf, prefNsName)
buf = append(buf, bck.Ns.Name...)
}
if bckNameLen > 0 {
buf = append(buf, filepath.Separator)
buf = append(buf, bck.Name...)
}
if ctLen > 0 {
buf = append(buf, filepath.Separator, prefCT)
buf = append(buf, contentType...)
}
return
}
func (mi *MountpathInfo) MakePathBck(bck cmn.Bck) string {
buf := mi.makePathBuf(bck, "", 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathCT(bck cmn.Bck, contentType string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "")
buf := mi.makePathBuf(bck, contentType, 0)
return *(*string)(unsafe.Pointer(&buf))
}
func (mi *MountpathInfo) MakePathFQN(bck cmn.Bck, contentType, objName string) string {
cmn.AssertMsg(bck.Valid(), bck.String())
cmn.Assert(contentType != "" && objName != "")
buf := mi.makePathBuf(bck, contentType, 1+len(objName))
buf = append(buf, filepath.Separator)
buf = append(buf, objName...)
return *(*string)(unsafe.Pointer(&buf))
}
//
// MountedFS
//
func (mfs *MountedFS) LoadBalanceGET(objfqn, objmpath string, copies MPI, now time.Time) (fqn string) {
var (
mpathUtils, mpathRRs = mfs.ios.GetAllMpathUtils(now)
objutil, ok = mpathUtils[objmpath]
rr, _ = mpathRRs[objmpath] // GET round-robin counter (zeros out every iostats refresh i-val)
util = objutil
r = rr
)
fqn = objfqn
if !ok {
cmn.DassertMsg(false, objmpath, pkgName)
return
}
for copyfqn, copympi := range copies {
var (
u int64
c, rrcnt int32
)
if u, ok = mpathUtils[copympi.Path]; !ok {
continue
}
if r, ok = mpathRRs[copympi.Path]; !ok {
if u < util {
fqn, util, rr = copyfqn, u, r
}
continue
}
c = r.Load()
if rr != nil {
rrcnt = rr.Load()
}
if u < util && c <= rrcnt { // the obvious choice
fqn, util, rr = copyfqn, u, r
continue
}
if u+int64(c)*uQuantum < util+int64(rrcnt)*uQuantum { // heuristics - make uQuantum configurable?
fqn, util, rr = copyfqn, u, r
}
}
// NOTE: the counter could've been already inc-ed
// could keep track of the second best and use CAS to recerve-inc and compare
// can wait though
if rr != nil {
rr.Inc()
}
return
}
// ios delegators
func (mfs *MountedFS) GetMpathUtil(mpath string, now time.Time) int64 {
return mfs.ios.GetMpathUtil(mpath, now)
}
func (mfs *MountedFS) GetAllMpathUtils(now time.Time) (utils map[string]int64) {
utils, _ = mfs.ios.GetAllMpathUtils(now)
return
}
func (mfs *MountedFS) LogAppend(lines []string) []string {
return mfs.ios.LogAppend(lines)
}
func (mfs *MountedFS) GetSelectedDiskStats() (m map[string]*ios.SelectedDiskStats) {
return mfs.ios.GetSelectedDiskStats()
}
// Init prepares and adds provided mountpaths. Also validates the mountpaths
// for duplication and availability.
func (mfs *MountedFS) Init(fsPaths []string) error {
if len(fsPaths) == 0 {
// (usability) not to clutter the log with backtraces when starting up and validating config
return fmt.Errorf("FATAL: no fspaths - see README => Configuration and/or fspaths section in the config.sh")
}
for _, path := range fsPaths {
if err := mfs.Add(path); err != nil {
return err
}
}
return nil
}
// Add adds new mountpath to the target's mountpaths.
// FIXME: unify error messages for original and clean mountpath
func (mfs *MountedFS) Add(mpath string) error {
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
if err := Access(cleanMpath); err != nil {
return fmt.Errorf("fspath %q %s, err: %v", mpath, cmn.DoesNotExist, err)
}
statfs := syscall.Statfs_t{}
if err := syscall.Statfs(cleanMpath, &statfs); err != nil {
return fmt.Errorf("cannot statfs fspath %q, err: %w", mpath, err)
}
fs, err := fqn2fsAtStartup(cleanMpath)
if err != nil {
return fmt.Errorf("cannot get filesystem: %v", err)
}
mp := newMountpath(cleanMpath, mpath, statfs.Fsid, fs)
mfs.mu.Lock()
defer mfs.mu.Unlock()
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, exists := availablePaths[mp.Path]; exists {
return fmt.Errorf("tried to add already registered mountpath: %v", mp.Path)
}
if existingPath, exists := mfs.fsIDs[mp.Fsid]; exists && mfs.checkFsID {
return fmt.Errorf("tried to add path %v but same fsid (%v) was already registered by %v", mpath, mp.Fsid, existingPath)
}
mfs.ios.AddMpath(mp.Path, mp.FileSystem)
availablePaths[mp.Path] = mp
mfs.fsIDs[mp.Fsid] = cleanMpath
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Remove removes mountpaths from the target's mountpaths. It searches
// for the mountpath in available and disabled (if the mountpath is not found
// in available).
func (mfs *MountedFS) | (mpath string) error {
var (
mp *MountpathInfo
exists bool
)
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mp, exists = availablePaths[cleanMpath]; !exists {
if mp, exists = disabledPaths[cleanMpath]; !exists {
return fmt.Errorf("tried to remove non-existing mountpath: %v", mpath)
}
delete(disabledPaths, cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
delete(availablePaths, cleanMpath)
mfs.ios.RemoveMpath(cleanMpath)
delete(mfs.fsIDs, mp.Fsid)
go mp.evictLomCache()
if l := len(availablePaths); l == 0 {
glog.Errorf("removed the last available mountpath %s", mp)
} else {
glog.Infof("removed mountpath %s (%d remain(s) active)", mp, l)
}
mfs.updatePaths(availablePaths, disabledPaths)
return nil
}
// Enable enables previously disabled mountpath. enabled is set to
// true if mountpath has been moved from disabled to available and exists is
// set to true if such mountpath even exists.
func (mfs *MountedFS) Enable(mpath string) (enabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if _, ok := availablePaths[cleanMpath]; ok {
return false, nil
}
if mp, ok := disabledPaths[cleanMpath]; ok {
availablePaths[cleanMpath] = mp
mfs.ios.AddMpath(cleanMpath, mp.FileSystem)
delete(disabledPaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
return true, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Disable disables an available mountpath. disabled is set to true if
// mountpath has been moved from available to disabled and exists is set to
// true if such mountpath even exists.
func (mfs *MountedFS) Disable(mpath string) (disabled bool, err error) {
mfs.mu.Lock()
defer mfs.mu.Unlock()
cleanMpath, err := cmn.ValidateMpath(mpath)
if err != nil {
return false, err
}
availablePaths, disabledPaths := mfs.mountpathsCopy()
if mpathInfo, ok := availablePaths[cleanMpath]; ok {
disabledPaths[cleanMpath] = mpathInfo
mfs.ios.RemoveMpath(cleanMpath)
delete(availablePaths, cleanMpath)
mfs.updatePaths(availablePaths, disabledPaths)
if l := len(availablePaths); l == 0 {
glog.Errorf("disabled the last available mountpath %s", mpathInfo)
} else {
glog.Infof("disabled mountpath %s (%d remain(s) active)", mpathInfo, l)
}
go mpathInfo.evictLomCache()
return true, nil
}
if _, ok := disabledPaths[cleanMpath]; ok {
return false, nil
}
return false, cmn.NewNoMountpathError(mpath)
}
// Returns number of available mountpaths
func (mfs *MountedFS) NumAvail() int {
availablePaths := (*MPI)(mfs.available.Load())
return len(*availablePaths)
}
// Mountpaths returns both available and disabled mountpaths.
func (mfs *MountedFS) Get() (MPI, MPI) {
var (
availablePaths = (*MPI)(mfs.available.Load())
disabledPaths = (*MPI)(mfs.disabled.Load())
)
if availablePaths == nil {
tmp := make(MPI, 10)
availablePaths = &tmp
}
if disabledPaths == nil {
tmp := make(MPI, 10)
disabledPaths = &tmp
}
return *availablePaths, *disabledPaths
}
// DisableFsIDCheck disables fsid checking when adding new mountpath
func (mfs *MountedFS) DisableFsIDCheck() { mfs.checkFsID = false }
func (mfs *MountedFS) CreateBuckets(op string, bcks ...cmn.Bck) (errs []error) {
var (
availablePaths, _ = mfs.Get()
totalDirs = len(availablePaths) * len(bcks) * len(CSM.RegisteredContentTypes)
totalCreatedDirs int
)
for _, mi := range availablePaths {
for _, bck := range bcks {
num, err := mi.createBckDirs(bck)
if err != nil {
errs = append(errs, err)
} else {
totalCreatedDirs += num
}
}
}
if errs == nil && totalCreatedDirs != totalDirs {
errs = append(errs, fmt.Errorf("failed to create %d out of %d buckets' directories: %v",
totalDirs-totalCreatedDirs, totalDirs, bcks))
}
if errs == nil && glog.FastV(4, glog.SmoduleFS) {
glog.Infof("%s(create bucket dirs): %v, num=%d", op, bcks, totalDirs)
}
return
}
func (mfs *MountedFS) DestroyBuckets(op string, bcks ...cmn.Bck) error {
const destroyStr = "destroy-ais-bucket-dir"
var (
availablePaths, _ = mfs.Get()
totalDirs = len(availablePaths) * len(bcks)
totalDestroyedDirs = 0
)
for _, mpathInfo := range availablePaths {
for _, bck := range bcks {
dir := mpathInfo.MakePathBck(bck)
if err := mpathInfo.FastRemoveDir(dir); err != nil {
glog.Errorf("%s: failed to %s (dir: %q, err: %v)", op, destroyStr, dir, err)
} else {
totalDestroyedDirs++
}
}
}
if totalDestroyedDirs != totalDirs {
return fmt.Errorf("failed to destroy %d out of %d buckets' directories: %v", totalDirs-totalDestroyedDirs, totalDirs, bcks)
}
if glog.FastV(4, glog.SmoduleFS) {
glog.Infof("%s: %s (buckets %v, num dirs %d)", op, destroyStr, bcks, totalDirs)
}
return nil
}
func (mfs *MountedFS) FetchFSInfo() cmn.FSInfo {
var (
fsInfo = cmn.FSInfo{}
availablePaths, _ = mfs.Get()
visitedFS = make(map[syscall.Fsid]struct{})
)
for mpath := range availablePaths {
statfs := &syscall.Statfs_t{}
if err := syscall.Statfs(mpath, statfs); err != nil {
glog.Errorf("Failed to statfs mp %q, err: %v", mpath, err)
continue
}
if _, ok := visitedFS[statfs.Fsid]; ok {
continue
}
visitedFS[statfs.Fsid] = struct{}{}
fsInfo.FSUsed += (statfs.Blocks - statfs.Bavail) * uint64(statfs.Bsize)
fsInfo.FSCapacity += statfs.Blocks * uint64(statfs.Bsize)
}
if fsInfo.FSCapacity > 0 {
// FIXME: assuming all mountpaths have approx. the same capacity
fsInfo.PctFSUsed = float64(fsInfo.FSUsed*100) / float64(fsInfo.FSCapacity)
}
return fsInfo
}
func (mfs *MountedFS) RenameBucketDirs(bckFrom, bckTo cmn.Bck) (err error) {
availablePaths, _ := mfs.Get()
renamed := make([]*MountpathInfo, 0, len(availablePaths))
for _, mpathInfo := range availablePaths {
fromPath := mpathInfo.MakePathBck(bckFrom)
toPath := mpathInfo.MakePathBck(bckTo)
// os.Rename fails when renaming to a directory which already exists.
// We should remove destination bucket directory before rename. It's reasonable to do so
// as all targets agreed to rename and rename was committed in BMD.
os.RemoveAll(toPath)
if err = os.Rename(fromPath, toPath); err != nil {
break
}
renamed = append(renamed, mpathInfo)
}
if err == nil {
return
}
for _, mpathInfo := range renamed {
fromPath := mpathInfo.MakePathBck(bckTo)
toPath := mpathInfo.MakePathBck(bckFrom)
if erd := os.Rename(fromPath, toPath); erd != nil {
glog.Error(erd)
}
}
return
}
func (mi *MountpathInfo) CreateMissingBckDirs(bck cmn.Bck) (err error) {
for contentType := range CSM.RegisteredContentTypes {
dir := mi.MakePathCT(bck, contentType)
if err = Access(dir); err == nil {
continue
}
if err = cmn.CreateDir(dir); err != nil {
return
}
}
return
}
//
// private methods
//
func (mfs *MountedFS) updatePaths(available, disabled MPI) {
mfs.available.Store(unsafe.Pointer(&available))
mfs.disabled.Store(unsafe.Pointer(&disabled))
mfs.xattrMpath.Store(unsafe.Pointer(nil))
}
// Creates all CT directories for a given (mountpath, bck)
// NOTE handling of empty dirs
func (mi *MountpathInfo) createBckDirs(bck cmn.Bck) (num int, err error) {
for contentType := range CSM.RegisteredContentTypes {
dir := mi.MakePathCT(bck, contentType)
if err := Access(dir); err == nil {
names, empty, errEmpty := IsDirEmpty(dir)
if errEmpty != nil {
return num, errEmpty
}
if !empty {
err = fmt.Errorf("bucket %s: directory %s already exists and is not empty (%v...)", bck, dir, names)
if contentType != WorkfileType {
return num, err
}
glog.Warning(err)
}
} else if err := cmn.CreateDir(dir); err != nil {
return num, fmt.Errorf("bucket %s: failed to create directory %s: %w", bck, dir, err)
}
num++
}
return num, nil
}
// mountpathsCopy returns a shallow copy of current mountpaths
func (mfs *MountedFS) mountpathsCopy() (MPI, MPI) {
availablePaths, disabledPaths := mfs.Get()
availableCopy := make(MPI, len(availablePaths))
disabledCopy := make(MPI, len(availablePaths))
for mpath, mpathInfo := range availablePaths {
availableCopy[mpath] = mpathInfo
}
for mpath, mpathInfo := range disabledPaths {
disabledCopy[mpath] = mpathInfo
}
return availableCopy, disabledCopy
}
func (mfs *MountedFS) String() string {
availablePaths, _ := mfs.Get()
s := "\n"
for _, mpathInfo := range availablePaths {
s += mpathInfo.String() + "\n"
}
return strings.TrimSuffix(s, "\n")
}
// Select a "random" mountpath using HRW algorithm to store/load bucket metadata
func (mfs *MountedFS) MpathForMetadata() (mpath *MountpathInfo, err error) {
// fast path
mp := mfs.xattrMpath.Load()
if mp != nil {
return (*MountpathInfo)(mp), nil
}
// slow path
avail := (*MPI)(mfs.available.Load())
if len(*avail) == 0 {
return nil, fmt.Errorf("no mountpath available")
}
maxVal := uint64(0)
for _, m := range *avail {
if m.PathDigest > maxVal {
maxVal = m.PathDigest
mpath = m
}
}
if mpath == nil {
return nil, fmt.Errorf("failed to choose a mountpath")
}
if glog.FastV(4, glog.SmoduleFS) {
glog.Infof("Mountpath %q selected for storing BMD in xattrs", mpath.Path)
}
mfs.xattrMpath.Store(unsafe.Pointer(mpath))
return mpath, nil
}
| Remove | identifier_name |
webmux.py | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import logging
import os, os.path, socket
import sys, subprocess, threading, time
import requests, re
import tornado.web
from tornado.netutil import bind_unix_socket
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from tornado.escape import json_decode
import tornado.options
import terminado
import traceback
STATIC_DIR = os.path.join(os.path.dirname(terminado.__file__), "_static")
TEMPLATE_DIR = os.path.dirname(__file__)
# This is the port we'll start handing things out at
port_base = 2023
server_list = {}
def get_global_ip():
global server_list
while server_list['ivolethe']['global_ip'] == 'webmux.cflo.at':
try:
findTags = re.compile(r'<.*?>')
findIP = re.compile(r'\d+\.\d+\.\d+\.\d+')
html = requests.get('http://checkip.dyndns.org' ).text()
ipaddress = findIP.search(findTags.sub('', html))
if ipaddress is not None:
server_list['ivolethe']['global_ip'] = ipaddress.group(0)
logging.info("Found global IP to be %s"%(server_list['ivolethe']['global_ip']))
except:
pass
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("1.1.1.1", 80))
return s.getsockname()[0]
def reset_server_list():
global server_list
server_list = {
'ivolethe': {
'hostname': 'ivolethe',
'host_port': 22,
'webmux_port': 22,
'global_ip': 'webmux.cflo.at',
'local_ip': get_local_ip(),
'user': 'sabae',
'direct': True,
'last_direct_try': 1e100,
}
}
t = threading.Thread(target=get_global_ip)
t.daemon = True
t.start()
def kill_all_tunnels():
"""
Sometimes we just need to kill all the tunnels that have come in ever, so we
don't rely upon our list, we instead ask `lsof` to look for all processes
that are listening on the first 100 ports of our port_base and kill 'em all.
"""
lsof_cmd = "lsof -i:%d-%d -P -n"%(port_base, port_base+100)
try:
lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
return []
except:
traceback.print_exc(file=sys.stdout)
logging.warning("Unable to probe active tunnels")
return []
ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\n')[1:] if l]))
for p in ssh_procs:
|
return ssh_procs
class WebmuxTermManager(terminado.NamedTermManager):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(WebmuxTermManager, self).__init__(**kwargs)
def get_terminal(self, port_number):
from terminado.management import MaxTerminalsReached
# This is important lel
assert port_number is not None
if port_number in self.terminals:
return self.terminals[port_number]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Find server mapped to this port
name = next(filter(lambda n: server_list[n]['webmux_port'] == int(port_number), server_list.keys()))
s = server_list[name]
# Create new terminal
logging.info("Attempting to connect to: %s@%s:%d", s['user'], name, s['webmux_port'])
self.shell_command = ["ssh", "-C", "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no", "-p", port_number, s['user']+"@webmux.cflo.at"]
term = self.new_terminal()
term.term_name = port_number
self.terminals[port_number] = term
self.start_reading(term)
return term
class IndexPageHandler(tornado.web.RequestHandler):
"""Render the index page"""
def get(self):
logging.info("Hit the index page")
return self.render("index.html", static=self.static_url, server_list=server_list)
class RegistrationPageHandler(tornado.web.RequestHandler):
"""Return a port number for a hostname"""
def post(self):
try:
data = json_decode(self.request.body)
except:
logging.warn("Couldn't decode JSON body \"%s\" from IP %s"%(self.request.body, self.request.headers.get('X-Real-Ip')))
return
# Always update the 'global_ip'
data['global_ip'] = self.request.headers.get("X-Real-IP")
# Convert `host_port` to an integer
data['host_port'] = int(data['host_port'])
# If this hostname does not already exist in server_list, then initialize some sane defaults for `data`
# before we put it into `server_list`.
if not data['hostname'] in server_list:
port_number = max([server_list[k]['webmux_port'] for k in server_list] + [port_base - 1]) + 1
data['webmux_port'] = port_number
data['direct'] = False
data['last_direct_try'] = 0
server_list[data['hostname']] = data
else:
# Otherwise update server_list with the given data
server_list[data['hostname']].update(data)
data = server_list[data['hostname']]
# Log out a little bit
logging.info("Registered %s at %s:%d on webmux port %d"%(data['hostname'], data['global_ip'], data['host_port'], data['webmux_port']))
self.write(str(data['webmux_port']))
class ResetPageHandler(tornado.web.RequestHandler):
"""Reset all SSH connections forwarding ports"""
def get(self):
ssh_procs = kill_all_tunnels()
reset_server_list()
logging.info("Killed %d live SSH tunnels"%(len(ssh_procs)))
self.write("Killed %d live SSH tunnels"%(len(ssh_procs)))
class TerminalPageHandler(tornado.web.RequestHandler):
def get_host(self, port_number):
for hostname in server_list:
if server_list[hostname]['webmux_port'] == port_number:
return hostname
return "host on port " + port_number
"""Render the /shell/[\\d]+ pages"""
def get(self, port_number):
return self.render("term.html", static=self.static_url,
ws_url_path="/_websocket/"+port_number,
hostname=self.get_host(port_number))
def sabanetify(hostname):
import hashlib
h = hashlib.sha256(hostname.encode('utf-8')).hexdigest()[:16]
return "fd37:5040::" + ":".join([h[idx:idx+4] for idx in range(0, len(h), 4)])
class BashPageHandler(tornado.web.RequestHandler):
"""Render the /bash page"""
def get(self):
global server_list
commands = "#webmuxbash\n"
# Add some helpful tools at the beginning
commands += """
# Helper function to see if we're on the same global subnet or not,
# (just checks if the X's are the same in X.X.X.Z, this is good enough
# 99% of the time)
same_global_subnet() {
if [[ -z "${GLOBAL_IP}" ]]; then
GLOBAL_IP="$(curl -s http://whatismyip.akamai.com)"
fi
[[ ${GLOBAL_IP%.*} == ${1%.*} ]]
}
# Check if an interface is "up"
wireguard_up()
{
if [[ $(uname 2>/dev/null) == "Darwin" ]]; then
[[ -n $(ifconfig 2>/dev/null | grep -e "^utun[^ ]: flags=.*UP[,>]" -A 4 | grep -e "inet6 fd37:5040::") ]]
else
[[ -n $(ip address show $(wg show interfaces 2>/dev/null) up 2>/dev/null) ]]
fi
}
"""
for name in server_list:
s = server_list[name]
build_command = lambda name, prog: "function %s() { title %s; tmux_escape %s \"$@\"; title; }\n"%(name, name, prog)
ssh_cmd = "ssh -A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
# Add .global for connecting to global host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['global_ip'])
commands += build_command(name+".global", prog)
# Add .local for connecting to local host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['local_ip'])
commands += build_command(name+".local", prog)
# Add .webmux command for connecting to webmux reverse-tunnel
prog = ssh_cmd + "-p %d %s@webmux.cflo.at"%(s['webmux_port'], s['user'])
commands += build_command(name+".webmux", prog)
# Add .sabanet command for connecting over wireguard
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], sabanetify(name))
commands += build_command(name+".sabanet", prog)
commands += """
function %s()
{
if wireguard_up; then
%s.sabanet "$@";
elif same_global_subnet "%s"; then
%s.local "$@";
else
%s.webmux "$@";
fi;
}
"""%(name, name, s['global_ip'], name, name)
self.write('\n'.join([l.lstrip() for l in commands.split('\n')]))
if __name__ == "__main__":
# Parse things like --loglevel
tornado.options.parse_command_line()
term_manager = WebmuxTermManager(shell_command=["echo"], max_terminals=100)
handlers = [
(r"/", IndexPageHandler),
(r"/bash", BashPageHandler),
(r"/reset", ResetPageHandler),
(r"/register", RegistrationPageHandler),
(r"/_websocket/(\w+)", terminado.TermSocket, {'term_manager': term_manager}),
(r"/shell/([\d]+)/?", TerminalPageHandler),
(r"/webmux_static/(.*)", tornado.web.StaticFileHandler, {'path':os.path.join(TEMPLATE_DIR,"webmux_static")}),
]
application = tornado.web.Application(handlers, static_path=STATIC_DIR,
template_path=TEMPLATE_DIR,
term_manager=term_manager, debug=True)
application.listen(8888)
try:
# If we restarted or something, then be sure to cause all tunnels to reconnect
reset_server_list()
ssh_procs = kill_all_tunnels()
logging.info("Killed %d SSH tunnels"%(len(ssh_procs)))
logging.info("All systems operational, commander")
IOLoop.current().start()
except KeyboardInterrupt:
logging.info("\nShutting down due to SIGINT")
finally:
term_manager.shutdown()
IOLoop.current().close()
| subprocess.call(["kill", p]) | conditional_block |
webmux.py | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import logging
import os, os.path, socket
import sys, subprocess, threading, time
import requests, re
import tornado.web
from tornado.netutil import bind_unix_socket
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from tornado.escape import json_decode
import tornado.options
import terminado
import traceback
STATIC_DIR = os.path.join(os.path.dirname(terminado.__file__), "_static")
TEMPLATE_DIR = os.path.dirname(__file__)
# This is the port we'll start handing things out at
port_base = 2023
server_list = {}
def get_global_ip():
global server_list
while server_list['ivolethe']['global_ip'] == 'webmux.cflo.at':
try:
findTags = re.compile(r'<.*?>')
findIP = re.compile(r'\d+\.\d+\.\d+\.\d+')
html = requests.get('http://checkip.dyndns.org' ).text()
ipaddress = findIP.search(findTags.sub('', html))
if ipaddress is not None:
server_list['ivolethe']['global_ip'] = ipaddress.group(0)
logging.info("Found global IP to be %s"%(server_list['ivolethe']['global_ip']))
except:
pass
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("1.1.1.1", 80))
return s.getsockname()[0]
def reset_server_list():
global server_list
server_list = {
'ivolethe': {
'hostname': 'ivolethe',
'host_port': 22,
'webmux_port': 22,
'global_ip': 'webmux.cflo.at',
'local_ip': get_local_ip(),
'user': 'sabae',
'direct': True,
'last_direct_try': 1e100,
}
}
t = threading.Thread(target=get_global_ip)
t.daemon = True
t.start()
def kill_all_tunnels():
"""
Sometimes we just need to kill all the tunnels that have come in ever, so we
don't rely upon our list, we instead ask `lsof` to look for all processes
that are listening on the first 100 ports of our port_base and kill 'em all.
"""
lsof_cmd = "lsof -i:%d-%d -P -n"%(port_base, port_base+100)
try:
lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
return []
except:
traceback.print_exc(file=sys.stdout)
logging.warning("Unable to probe active tunnels")
return []
ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\n')[1:] if l]))
for p in ssh_procs:
subprocess.call(["kill", p])
return ssh_procs
class WebmuxTermManager(terminado.NamedTermManager):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(WebmuxTermManager, self).__init__(**kwargs)
def get_terminal(self, port_number):
from terminado.management import MaxTerminalsReached
# This is important lel
assert port_number is not None
if port_number in self.terminals:
return self.terminals[port_number]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Find server mapped to this port
name = next(filter(lambda n: server_list[n]['webmux_port'] == int(port_number), server_list.keys()))
s = server_list[name]
# Create new terminal
logging.info("Attempting to connect to: %s@%s:%d", s['user'], name, s['webmux_port'])
self.shell_command = ["ssh", "-C", "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no", "-p", port_number, s['user']+"@webmux.cflo.at"]
term = self.new_terminal()
term.term_name = port_number
self.terminals[port_number] = term
self.start_reading(term)
return term
class IndexPageHandler(tornado.web.RequestHandler):
"""Render the index page"""
def get(self):
logging.info("Hit the index page")
return self.render("index.html", static=self.static_url, server_list=server_list)
class RegistrationPageHandler(tornado.web.RequestHandler):
"""Return a port number for a hostname"""
def post(self):
try:
data = json_decode(self.request.body)
except:
logging.warn("Couldn't decode JSON body \"%s\" from IP %s"%(self.request.body, self.request.headers.get('X-Real-Ip')))
return
# Always update the 'global_ip'
data['global_ip'] = self.request.headers.get("X-Real-IP")
# Convert `host_port` to an integer
data['host_port'] = int(data['host_port'])
# If this hostname does not already exist in server_list, then initialize some sane defaults for `data`
# before we put it into `server_list`.
if not data['hostname'] in server_list:
port_number = max([server_list[k]['webmux_port'] for k in server_list] + [port_base - 1]) + 1
data['webmux_port'] = port_number
data['direct'] = False
data['last_direct_try'] = 0
server_list[data['hostname']] = data
else:
# Otherwise update server_list with the given data | # Log out a little bit
logging.info("Registered %s at %s:%d on webmux port %d"%(data['hostname'], data['global_ip'], data['host_port'], data['webmux_port']))
self.write(str(data['webmux_port']))
class ResetPageHandler(tornado.web.RequestHandler):
"""Reset all SSH connections forwarding ports"""
def get(self):
ssh_procs = kill_all_tunnels()
reset_server_list()
logging.info("Killed %d live SSH tunnels"%(len(ssh_procs)))
self.write("Killed %d live SSH tunnels"%(len(ssh_procs)))
class TerminalPageHandler(tornado.web.RequestHandler):
def get_host(self, port_number):
for hostname in server_list:
if server_list[hostname]['webmux_port'] == port_number:
return hostname
return "host on port " + port_number
"""Render the /shell/[\\d]+ pages"""
def get(self, port_number):
return self.render("term.html", static=self.static_url,
ws_url_path="/_websocket/"+port_number,
hostname=self.get_host(port_number))
def sabanetify(hostname):
import hashlib
h = hashlib.sha256(hostname.encode('utf-8')).hexdigest()[:16]
return "fd37:5040::" + ":".join([h[idx:idx+4] for idx in range(0, len(h), 4)])
class BashPageHandler(tornado.web.RequestHandler):
"""Render the /bash page"""
def get(self):
global server_list
commands = "#webmuxbash\n"
# Add some helpful tools at the beginning
commands += """
# Helper function to see if we're on the same global subnet or not,
# (just checks if the X's are the same in X.X.X.Z, this is good enough
# 99% of the time)
same_global_subnet() {
if [[ -z "${GLOBAL_IP}" ]]; then
GLOBAL_IP="$(curl -s http://whatismyip.akamai.com)"
fi
[[ ${GLOBAL_IP%.*} == ${1%.*} ]]
}
# Check if an interface is "up"
wireguard_up()
{
if [[ $(uname 2>/dev/null) == "Darwin" ]]; then
[[ -n $(ifconfig 2>/dev/null | grep -e "^utun[^ ]: flags=.*UP[,>]" -A 4 | grep -e "inet6 fd37:5040::") ]]
else
[[ -n $(ip address show $(wg show interfaces 2>/dev/null) up 2>/dev/null) ]]
fi
}
"""
for name in server_list:
s = server_list[name]
build_command = lambda name, prog: "function %s() { title %s; tmux_escape %s \"$@\"; title; }\n"%(name, name, prog)
ssh_cmd = "ssh -A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
# Add .global for connecting to global host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['global_ip'])
commands += build_command(name+".global", prog)
# Add .local for connecting to local host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['local_ip'])
commands += build_command(name+".local", prog)
# Add .webmux command for connecting to webmux reverse-tunnel
prog = ssh_cmd + "-p %d %s@webmux.cflo.at"%(s['webmux_port'], s['user'])
commands += build_command(name+".webmux", prog)
# Add .sabanet command for connecting over wireguard
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], sabanetify(name))
commands += build_command(name+".sabanet", prog)
commands += """
function %s()
{
if wireguard_up; then
%s.sabanet "$@";
elif same_global_subnet "%s"; then
%s.local "$@";
else
%s.webmux "$@";
fi;
}
"""%(name, name, s['global_ip'], name, name)
self.write('\n'.join([l.lstrip() for l in commands.split('\n')]))
if __name__ == "__main__":
# Parse things like --loglevel
tornado.options.parse_command_line()
term_manager = WebmuxTermManager(shell_command=["echo"], max_terminals=100)
handlers = [
(r"/", IndexPageHandler),
(r"/bash", BashPageHandler),
(r"/reset", ResetPageHandler),
(r"/register", RegistrationPageHandler),
(r"/_websocket/(\w+)", terminado.TermSocket, {'term_manager': term_manager}),
(r"/shell/([\d]+)/?", TerminalPageHandler),
(r"/webmux_static/(.*)", tornado.web.StaticFileHandler, {'path':os.path.join(TEMPLATE_DIR,"webmux_static")}),
]
application = tornado.web.Application(handlers, static_path=STATIC_DIR,
template_path=TEMPLATE_DIR,
term_manager=term_manager, debug=True)
application.listen(8888)
try:
# If we restarted or something, then be sure to cause all tunnels to reconnect
reset_server_list()
ssh_procs = kill_all_tunnels()
logging.info("Killed %d SSH tunnels"%(len(ssh_procs)))
logging.info("All systems operational, commander")
IOLoop.current().start()
except KeyboardInterrupt:
logging.info("\nShutting down due to SIGINT")
finally:
term_manager.shutdown()
IOLoop.current().close() | server_list[data['hostname']].update(data)
data = server_list[data['hostname']]
| random_line_split |
webmux.py | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import logging
import os, os.path, socket
import sys, subprocess, threading, time
import requests, re
import tornado.web
from tornado.netutil import bind_unix_socket
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from tornado.escape import json_decode
import tornado.options
import terminado
import traceback
STATIC_DIR = os.path.join(os.path.dirname(terminado.__file__), "_static")
TEMPLATE_DIR = os.path.dirname(__file__)
# This is the port we'll start handing things out at
port_base = 2023
server_list = {}
def | ():
global server_list
while server_list['ivolethe']['global_ip'] == 'webmux.cflo.at':
try:
findTags = re.compile(r'<.*?>')
findIP = re.compile(r'\d+\.\d+\.\d+\.\d+')
html = requests.get('http://checkip.dyndns.org' ).text()
ipaddress = findIP.search(findTags.sub('', html))
if ipaddress is not None:
server_list['ivolethe']['global_ip'] = ipaddress.group(0)
logging.info("Found global IP to be %s"%(server_list['ivolethe']['global_ip']))
except:
pass
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("1.1.1.1", 80))
return s.getsockname()[0]
def reset_server_list():
global server_list
server_list = {
'ivolethe': {
'hostname': 'ivolethe',
'host_port': 22,
'webmux_port': 22,
'global_ip': 'webmux.cflo.at',
'local_ip': get_local_ip(),
'user': 'sabae',
'direct': True,
'last_direct_try': 1e100,
}
}
t = threading.Thread(target=get_global_ip)
t.daemon = True
t.start()
def kill_all_tunnels():
"""
Sometimes we just need to kill all the tunnels that have come in ever, so we
don't rely upon our list, we instead ask `lsof` to look for all processes
that are listening on the first 100 ports of our port_base and kill 'em all.
"""
lsof_cmd = "lsof -i:%d-%d -P -n"%(port_base, port_base+100)
try:
lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
return []
except:
traceback.print_exc(file=sys.stdout)
logging.warning("Unable to probe active tunnels")
return []
ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\n')[1:] if l]))
for p in ssh_procs:
subprocess.call(["kill", p])
return ssh_procs
class WebmuxTermManager(terminado.NamedTermManager):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(WebmuxTermManager, self).__init__(**kwargs)
def get_terminal(self, port_number):
from terminado.management import MaxTerminalsReached
# This is important lel
assert port_number is not None
if port_number in self.terminals:
return self.terminals[port_number]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Find server mapped to this port
name = next(filter(lambda n: server_list[n]['webmux_port'] == int(port_number), server_list.keys()))
s = server_list[name]
# Create new terminal
logging.info("Attempting to connect to: %s@%s:%d", s['user'], name, s['webmux_port'])
self.shell_command = ["ssh", "-C", "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no", "-p", port_number, s['user']+"@webmux.cflo.at"]
term = self.new_terminal()
term.term_name = port_number
self.terminals[port_number] = term
self.start_reading(term)
return term
class IndexPageHandler(tornado.web.RequestHandler):
"""Render the index page"""
def get(self):
logging.info("Hit the index page")
return self.render("index.html", static=self.static_url, server_list=server_list)
class RegistrationPageHandler(tornado.web.RequestHandler):
"""Return a port number for a hostname"""
def post(self):
try:
data = json_decode(self.request.body)
except:
logging.warn("Couldn't decode JSON body \"%s\" from IP %s"%(self.request.body, self.request.headers.get('X-Real-Ip')))
return
# Always update the 'global_ip'
data['global_ip'] = self.request.headers.get("X-Real-IP")
# Convert `host_port` to an integer
data['host_port'] = int(data['host_port'])
# If this hostname does not already exist in server_list, then initialize some sane defaults for `data`
# before we put it into `server_list`.
if not data['hostname'] in server_list:
port_number = max([server_list[k]['webmux_port'] for k in server_list] + [port_base - 1]) + 1
data['webmux_port'] = port_number
data['direct'] = False
data['last_direct_try'] = 0
server_list[data['hostname']] = data
else:
# Otherwise update server_list with the given data
server_list[data['hostname']].update(data)
data = server_list[data['hostname']]
# Log out a little bit
logging.info("Registered %s at %s:%d on webmux port %d"%(data['hostname'], data['global_ip'], data['host_port'], data['webmux_port']))
self.write(str(data['webmux_port']))
class ResetPageHandler(tornado.web.RequestHandler):
"""Reset all SSH connections forwarding ports"""
def get(self):
ssh_procs = kill_all_tunnels()
reset_server_list()
logging.info("Killed %d live SSH tunnels"%(len(ssh_procs)))
self.write("Killed %d live SSH tunnels"%(len(ssh_procs)))
class TerminalPageHandler(tornado.web.RequestHandler):
def get_host(self, port_number):
for hostname in server_list:
if server_list[hostname]['webmux_port'] == port_number:
return hostname
return "host on port " + port_number
"""Render the /shell/[\\d]+ pages"""
def get(self, port_number):
return self.render("term.html", static=self.static_url,
ws_url_path="/_websocket/"+port_number,
hostname=self.get_host(port_number))
def sabanetify(hostname):
import hashlib
h = hashlib.sha256(hostname.encode('utf-8')).hexdigest()[:16]
return "fd37:5040::" + ":".join([h[idx:idx+4] for idx in range(0, len(h), 4)])
class BashPageHandler(tornado.web.RequestHandler):
"""Render the /bash page"""
def get(self):
global server_list
commands = "#webmuxbash\n"
# Add some helpful tools at the beginning
commands += """
# Helper function to see if we're on the same global subnet or not,
# (just checks if the X's are the same in X.X.X.Z, this is good enough
# 99% of the time)
same_global_subnet() {
if [[ -z "${GLOBAL_IP}" ]]; then
GLOBAL_IP="$(curl -s http://whatismyip.akamai.com)"
fi
[[ ${GLOBAL_IP%.*} == ${1%.*} ]]
}
# Check if an interface is "up"
wireguard_up()
{
if [[ $(uname 2>/dev/null) == "Darwin" ]]; then
[[ -n $(ifconfig 2>/dev/null | grep -e "^utun[^ ]: flags=.*UP[,>]" -A 4 | grep -e "inet6 fd37:5040::") ]]
else
[[ -n $(ip address show $(wg show interfaces 2>/dev/null) up 2>/dev/null) ]]
fi
}
"""
for name in server_list:
s = server_list[name]
build_command = lambda name, prog: "function %s() { title %s; tmux_escape %s \"$@\"; title; }\n"%(name, name, prog)
ssh_cmd = "ssh -A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
# Add .global for connecting to global host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['global_ip'])
commands += build_command(name+".global", prog)
# Add .local for connecting to local host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['local_ip'])
commands += build_command(name+".local", prog)
# Add .webmux command for connecting to webmux reverse-tunnel
prog = ssh_cmd + "-p %d %s@webmux.cflo.at"%(s['webmux_port'], s['user'])
commands += build_command(name+".webmux", prog)
# Add .sabanet command for connecting over wireguard
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], sabanetify(name))
commands += build_command(name+".sabanet", prog)
commands += """
function %s()
{
if wireguard_up; then
%s.sabanet "$@";
elif same_global_subnet "%s"; then
%s.local "$@";
else
%s.webmux "$@";
fi;
}
"""%(name, name, s['global_ip'], name, name)
self.write('\n'.join([l.lstrip() for l in commands.split('\n')]))
if __name__ == "__main__":
# Parse things like --loglevel
tornado.options.parse_command_line()
term_manager = WebmuxTermManager(shell_command=["echo"], max_terminals=100)
handlers = [
(r"/", IndexPageHandler),
(r"/bash", BashPageHandler),
(r"/reset", ResetPageHandler),
(r"/register", RegistrationPageHandler),
(r"/_websocket/(\w+)", terminado.TermSocket, {'term_manager': term_manager}),
(r"/shell/([\d]+)/?", TerminalPageHandler),
(r"/webmux_static/(.*)", tornado.web.StaticFileHandler, {'path':os.path.join(TEMPLATE_DIR,"webmux_static")}),
]
application = tornado.web.Application(handlers, static_path=STATIC_DIR,
template_path=TEMPLATE_DIR,
term_manager=term_manager, debug=True)
application.listen(8888)
try:
# If we restarted or something, then be sure to cause all tunnels to reconnect
reset_server_list()
ssh_procs = kill_all_tunnels()
logging.info("Killed %d SSH tunnels"%(len(ssh_procs)))
logging.info("All systems operational, commander")
IOLoop.current().start()
except KeyboardInterrupt:
logging.info("\nShutting down due to SIGINT")
finally:
term_manager.shutdown()
IOLoop.current().close()
| get_global_ip | identifier_name |
webmux.py | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import logging
import os, os.path, socket
import sys, subprocess, threading, time
import requests, re
import tornado.web
from tornado.netutil import bind_unix_socket
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from tornado.escape import json_decode
import tornado.options
import terminado
import traceback
STATIC_DIR = os.path.join(os.path.dirname(terminado.__file__), "_static")
TEMPLATE_DIR = os.path.dirname(__file__)
# This is the port we'll start handing things out at
port_base = 2023
server_list = {}
def get_global_ip():
global server_list
while server_list['ivolethe']['global_ip'] == 'webmux.cflo.at':
try:
findTags = re.compile(r'<.*?>')
findIP = re.compile(r'\d+\.\d+\.\d+\.\d+')
html = requests.get('http://checkip.dyndns.org' ).text()
ipaddress = findIP.search(findTags.sub('', html))
if ipaddress is not None:
server_list['ivolethe']['global_ip'] = ipaddress.group(0)
logging.info("Found global IP to be %s"%(server_list['ivolethe']['global_ip']))
except:
pass
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("1.1.1.1", 80))
return s.getsockname()[0]
def reset_server_list():
global server_list
server_list = {
'ivolethe': {
'hostname': 'ivolethe',
'host_port': 22,
'webmux_port': 22,
'global_ip': 'webmux.cflo.at',
'local_ip': get_local_ip(),
'user': 'sabae',
'direct': True,
'last_direct_try': 1e100,
}
}
t = threading.Thread(target=get_global_ip)
t.daemon = True
t.start()
def kill_all_tunnels():
"""
Sometimes we just need to kill all the tunnels that have come in ever, so we
don't rely upon our list, we instead ask `lsof` to look for all processes
that are listening on the first 100 ports of our port_base and kill 'em all.
"""
lsof_cmd = "lsof -i:%d-%d -P -n"%(port_base, port_base+100)
try:
lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
return []
except:
traceback.print_exc(file=sys.stdout)
logging.warning("Unable to probe active tunnels")
return []
ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\n')[1:] if l]))
for p in ssh_procs:
subprocess.call(["kill", p])
return ssh_procs
class WebmuxTermManager(terminado.NamedTermManager):
|
class IndexPageHandler(tornado.web.RequestHandler):
"""Render the index page"""
def get(self):
logging.info("Hit the index page")
return self.render("index.html", static=self.static_url, server_list=server_list)
class RegistrationPageHandler(tornado.web.RequestHandler):
"""Return a port number for a hostname"""
def post(self):
try:
data = json_decode(self.request.body)
except:
logging.warn("Couldn't decode JSON body \"%s\" from IP %s"%(self.request.body, self.request.headers.get('X-Real-Ip')))
return
# Always update the 'global_ip'
data['global_ip'] = self.request.headers.get("X-Real-IP")
# Convert `host_port` to an integer
data['host_port'] = int(data['host_port'])
# If this hostname does not already exist in server_list, then initialize some sane defaults for `data`
# before we put it into `server_list`.
if not data['hostname'] in server_list:
port_number = max([server_list[k]['webmux_port'] for k in server_list] + [port_base - 1]) + 1
data['webmux_port'] = port_number
data['direct'] = False
data['last_direct_try'] = 0
server_list[data['hostname']] = data
else:
# Otherwise update server_list with the given data
server_list[data['hostname']].update(data)
data = server_list[data['hostname']]
# Log out a little bit
logging.info("Registered %s at %s:%d on webmux port %d"%(data['hostname'], data['global_ip'], data['host_port'], data['webmux_port']))
self.write(str(data['webmux_port']))
class ResetPageHandler(tornado.web.RequestHandler):
"""Reset all SSH connections forwarding ports"""
def get(self):
ssh_procs = kill_all_tunnels()
reset_server_list()
logging.info("Killed %d live SSH tunnels"%(len(ssh_procs)))
self.write("Killed %d live SSH tunnels"%(len(ssh_procs)))
class TerminalPageHandler(tornado.web.RequestHandler):
def get_host(self, port_number):
for hostname in server_list:
if server_list[hostname]['webmux_port'] == port_number:
return hostname
return "host on port " + port_number
"""Render the /shell/[\\d]+ pages"""
def get(self, port_number):
return self.render("term.html", static=self.static_url,
ws_url_path="/_websocket/"+port_number,
hostname=self.get_host(port_number))
def sabanetify(hostname):
import hashlib
h = hashlib.sha256(hostname.encode('utf-8')).hexdigest()[:16]
return "fd37:5040::" + ":".join([h[idx:idx+4] for idx in range(0, len(h), 4)])
class BashPageHandler(tornado.web.RequestHandler):
"""Render the /bash page"""
def get(self):
global server_list
commands = "#webmuxbash\n"
# Add some helpful tools at the beginning
commands += """
# Helper function to see if we're on the same global subnet or not,
# (just checks if the X's are the same in X.X.X.Z, this is good enough
# 99% of the time)
same_global_subnet() {
if [[ -z "${GLOBAL_IP}" ]]; then
GLOBAL_IP="$(curl -s http://whatismyip.akamai.com)"
fi
[[ ${GLOBAL_IP%.*} == ${1%.*} ]]
}
# Check if an interface is "up"
wireguard_up()
{
if [[ $(uname 2>/dev/null) == "Darwin" ]]; then
[[ -n $(ifconfig 2>/dev/null | grep -e "^utun[^ ]: flags=.*UP[,>]" -A 4 | grep -e "inet6 fd37:5040::") ]]
else
[[ -n $(ip address show $(wg show interfaces 2>/dev/null) up 2>/dev/null) ]]
fi
}
"""
for name in server_list:
s = server_list[name]
build_command = lambda name, prog: "function %s() { title %s; tmux_escape %s \"$@\"; title; }\n"%(name, name, prog)
ssh_cmd = "ssh -A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
# Add .global for connecting to global host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['global_ip'])
commands += build_command(name+".global", prog)
# Add .local for connecting to local host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['local_ip'])
commands += build_command(name+".local", prog)
# Add .webmux command for connecting to webmux reverse-tunnel
prog = ssh_cmd + "-p %d %s@webmux.cflo.at"%(s['webmux_port'], s['user'])
commands += build_command(name+".webmux", prog)
# Add .sabanet command for connecting over wireguard
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], sabanetify(name))
commands += build_command(name+".sabanet", prog)
commands += """
function %s()
{
if wireguard_up; then
%s.sabanet "$@";
elif same_global_subnet "%s"; then
%s.local "$@";
else
%s.webmux "$@";
fi;
}
"""%(name, name, s['global_ip'], name, name)
self.write('\n'.join([l.lstrip() for l in commands.split('\n')]))
if __name__ == "__main__":
# Parse things like --loglevel
tornado.options.parse_command_line()
term_manager = WebmuxTermManager(shell_command=["echo"], max_terminals=100)
handlers = [
(r"/", IndexPageHandler),
(r"/bash", BashPageHandler),
(r"/reset", ResetPageHandler),
(r"/register", RegistrationPageHandler),
(r"/_websocket/(\w+)", terminado.TermSocket, {'term_manager': term_manager}),
(r"/shell/([\d]+)/?", TerminalPageHandler),
(r"/webmux_static/(.*)", tornado.web.StaticFileHandler, {'path':os.path.join(TEMPLATE_DIR,"webmux_static")}),
]
application = tornado.web.Application(handlers, static_path=STATIC_DIR,
template_path=TEMPLATE_DIR,
term_manager=term_manager, debug=True)
application.listen(8888)
try:
# If we restarted or something, then be sure to cause all tunnels to reconnect
reset_server_list()
ssh_procs = kill_all_tunnels()
logging.info("Killed %d SSH tunnels"%(len(ssh_procs)))
logging.info("All systems operational, commander")
IOLoop.current().start()
except KeyboardInterrupt:
logging.info("\nShutting down due to SIGINT")
finally:
term_manager.shutdown()
IOLoop.current().close()
| """Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(WebmuxTermManager, self).__init__(**kwargs)
def get_terminal(self, port_number):
from terminado.management import MaxTerminalsReached
# This is important lel
assert port_number is not None
if port_number in self.terminals:
return self.terminals[port_number]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Find server mapped to this port
name = next(filter(lambda n: server_list[n]['webmux_port'] == int(port_number), server_list.keys()))
s = server_list[name]
# Create new terminal
logging.info("Attempting to connect to: %s@%s:%d", s['user'], name, s['webmux_port'])
self.shell_command = ["ssh", "-C", "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no", "-p", port_number, s['user']+"@webmux.cflo.at"]
term = self.new_terminal()
term.term_name = port_number
self.terminals[port_number] = term
self.start_reading(term)
return term | identifier_body |
server.py | #######################################################
# Copyright (C) 2020 Sam Pickell and Aishwarya Vissom
# Last Updated: Apr. 20, 2020
# UML COMP 5610 Computer Network and Security
#
# This is a working implementation of an
# encrypted messaging client/server program
# using DES, RSA, and PKC. This is our final
# project for UML COMP 5610. The book we used for
# reference was "Introduction to Network Security:
# Theory and Practice 2nd Edition" by Jie Wang
# and Zachary Kissel.
########################################################
import random
from datetime import datetime
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
import socket
def convert_M(M_List):
new_list = []
# Convert to bits
for i in range(len(M_List)):
new_list.append(format(ord(M_List[i]), '08b'))
return "".join(new_list)
def convert_K(K_List):
new_list = []
# Convert to bits
for i in range(len(K_List)):
new_list.append(format(ord(K_List[i]), '07b'))
if K_List[i].count("1") % 2 == 0:
new_list.append("0")
else:
new_list.append("1")
return "".join(new_list)
def apply_IP(M):
rev_M = list(M[len(M)::-1])
my_mat = [["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"]]
counter = 0
# Create "A" Matrix
counting_list = [6, 4, 2, 0, 7, 5, 3, 1]
for i in range(len(counting_list)):
for j in range(8):
my_mat[counting_list[i]][j] = rev_M[counter]
counter += 1
# Apply IP(M)
final_list = []
for i in range(len(counting_list)):
for j in range(len(counting_list)):
final_list.append(my_mat[counting_list[j]][counting_list[i]])
return "".join(final_list)
def apply_IP_C(C):
final_list = [C[39], C[7], C[47], C[15], C[55], C[23], C[63], C[31], C[38], C[6], C[46], C[14], C[54], C[22], C[62],
C[30],
C[37], C[5], C[45], C[13], C[53], C[21], C[61], C[29], C[36], C[4], C[44], C[12], C[52], C[20], C[60],
C[28],
C[35], C[3], C[43], C[11], C[51], C[19], C[59], C[27], C[34], C[2], C[42], C[10], C[50], C[18], C[58],
C[26],
C[33], C[1], C[41], C[9], C[49], C[17], C[57], C[25], C[32], C[0], C[40], C[8], C[48], C[16], C[56],
C[24]]
return "".join(final_list)
def apply_IPKey(my_key):
my_key_list = list(my_key)
smaller_key = []
counter = 1
# Convert every 8th bit from the key to an "8", to be removed later
for i in range(len(my_key_list)):
if counter % 8 == 0:
smaller_key.append("8")
counter = 1
else:
|
# Apply the IP Key encryption algorithm
final_list = []
next_index = 56
for i in range(28):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 65)
next_index = 62
for i in range(28, 52):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 63)
next_index = 27
for i in range(52, 56):
final_list.append(smaller_key[next_index])
next_index = next_index - 8
return "".join(final_list)
def string_l_shift(my_str):
my_list = list(my_str)
first_char = my_list[0]
my_list.pop(0)
my_list.append(first_char)
return "".join(my_list)
def P_Key(my_U, my_V):
my_list = list(my_U + my_V)
final_list = []
my_perm = [13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
for i in range(len(my_perm)):
final_list.append(my_list[my_perm[i]])
return "".join(final_list)
def XOR_Encrypt(my_left, my_right, my_key):
# Perform the major part of Step 2 of DES Encryption
my_EP = string_xor(EP_fun(my_right), my_key)
my_S = S_fun(my_EP)
my_P = P_fun(my_S)
return string_xor(my_left, my_P)
def EP_fun(my_string):
my_list = list(my_string)
result = []
next_index = 31
counter = 0
# Perform expansion permutation
for i in range(48):
result.append(my_list[next_index])
counter += 1
if next_index == 31:
next_index = 0
else:
next_index += 1
if counter == 6:
counter = 0
next_index -= 2
return "".join(result)
def string_xor(s1, s2):
l1 = list(s1)
l2 = list(s2)
result = []
# Perform XOR
for i in range(len(l1)):
if l1[i] == l2[i]:
result.append("0")
else:
result.append("1")
return "".join(result)
def S_fun(my_string):
# S-Boxes (yes, all of them)
S = [[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
[[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
[[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
[[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]]
Y = []
counter = 0
# Create 8 6-bit Blocks
for i in range(8):
Y.append(my_string[counter:(counter + 6)])
counter += 6
# Convert to 4-bit Blocks
result = []
for i in range(len(Y)):
current_block = list(Y[i])
x_coord = int((current_block[0] + current_block[5]), 2)
y_coord = int((Y[i][1:5]), 2)
base_10_num = S[i][x_coord][y_coord]
val = format(base_10_num, '04b')
result.append(val)
return "".join(result)
def P_fun(my_string):
my_list = list(my_string)
order_list = [15, 6, 19, 10, 28, 11, 27, 16, 0, 14, 22, 25, 4, 17, 30, 9,
1, 7, 23, 13, 31, 26, 2, 8, 18, 12, 29, 5, 21, 10, 3, 24]
# Perform P(V) permutation
result = []
for i in range(len(order_list)):
result.append(my_list[order_list[i]])
return "".join(result)
# You must pass the message to be encrypted (string), as well as an encryption key (list of 8 characters)
# It will return the encoded string (string), as well as a decryption key (list)
def DES_Encrypt(user_input, list_K):
# Encryption
encrypted_message = []
# Make sure the message is divisible by 8
if len(user_input) % 8 != 0:
overage = 8 - (len(user_input) % 8)
for i in range(overage):
user_input = user_input + " "
# The bulk of the encryption
for i in range(len(user_input) // 8):
M = [user_input[(i * 8)], user_input[(i * 8) + 1], user_input[(i * 8) + 2], user_input[(i * 8) + 3],
user_input[(i * 8) + 4], user_input[(i * 8) + 5], user_input[(i * 8) + 6], user_input[(i * 8) + 7]]
M = convert_M(M)
K = convert_K(list_K)
IP_M = apply_IP(M)
IP_Key = apply_IPKey(K)
# Containers for U sub, V sub, K sub
U_List = [IP_Key[0:28]]
V_List = [IP_Key[28:56]]
K_List = ["-1"]
# Generate the keys we need
for j in range(1, 16):
U_List.append(string_l_shift(U_List[j - 1]))
V_List.append(string_l_shift(V_List[j - 1]))
K_List.append(P_Key(U_List[j], V_List[j]))
# Begin FCS Encryption
L0 = IP_M[0:32]
R0 = IP_M[32:len(IP_M)]
L1 = R0
R1 = XOR_Encrypt(L0, R0, K_List[1])
for j in range(2, 16):
L0 = L1
R0 = R1
L1 = R0
R1 = XOR_Encrypt(L0, R0, K_List[j])
encrypted_message.append(R1)
encrypted_message.append(L1)
return "".join(encrypted_message), K_List
# You must pass the encrypted message (string), as well as an decryption key (list of 8 characters)
# It will return the decrypted message
def DES_Decrypt(encrypted_message, K_List):
# Decryption
decrypted_message = []
encrypted_message = "".join(encrypted_message)
# Reverse of Encryption
for i in range((len(encrypted_message)) // 64):
L_Prime_0 = encrypted_message[(i * 64):(i * 64 + 32)]
R_Prime_0 = encrypted_message[(i * 64 + 32):(i * 64 + 64)]
for j in range(1, 16):
R_Prime_1 = XOR_Encrypt(L_Prime_0, R_Prime_0, K_List[(15 - j + 1)])
L_Prime_1 = R_Prime_0
R_Prime_0 = R_Prime_1
L_Prime_0 = L_Prime_1
decrypted_message.append(R_Prime_0)
decrypted_message.append(L_Prime_0)
decrypted_message = "".join(decrypted_message)
# Convert back to ascii
back_2_ascii = []
for i in range(len(decrypted_message) // 64):
back_2_ascii.append(apply_IP_C(decrypted_message[(i * 64):((i + 1) * 64)]))
# Convert back to message
converted_message = []
for i in range(len(back_2_ascii)):
my_char = ""
for j in range(8):
my_char += chr(int(back_2_ascii[i][(j * 8):((j + 1) * 8)], 2))
converted_message.append(my_char)
final_message = "".join(converted_message)
return final_message
# Generates a user specified length list of random characters
def rand_char_key(num):
my_list = []
for i in range(num):
my_rand = random.randint(33, 126)
my_list.append(chr(my_rand))
return my_list
# Simplified version of 3DES/2, see p.59 of the textbook listed in the header
def ThreeDESTwo(my_string):
K1 = rand_char_key(8)
enc, K2 = DES_Encrypt(my_string, K1)
dec = DES_Decrypt(enc, K2)
enc, K2 = DES_Encrypt(dec, K1)
return enc, K2
# Simplified version of the ANSI x9.17 PRNG Standard, see p.83 of the book listed in the header
def ansi_x917_std():
for i in range(4):
T = str(datetime.now())
if i == 0:
V = ThreeDESTwo("".join(rand_char_key(len(T))))
R = ThreeDESTwo(string_xor(V, ThreeDESTwo(T)))
V = ThreeDESTwo(string_xor(R, ThreeDESTwo(T)))
# The last element of the list
return R[-1][-1]
def main():
# connect to the client
s = socket.socket()
host = socket.gethostname()
print("Server Will start on host : ", host)
port = 8080
s.bind((host, port))
print("")
print("Server done binding to host and port Successfully")
print("")
print("Server is waiting for incoming Connection ")
s.listen(1)
conn, addr = s.accept()
print(addr, "Has connected to server and is now online ...")
print("")
# generate public key for server
server_key_pair = RSA.generate(1024)
s_public_key = server_key_pair.publickey()
print(s_public_key, "server public key")
# exchange the public keys with the client
# send the public key to the client
server_pub = s_public_key.exportKey("PEM")
conn.send(server_pub)
# Receive client public key
c_public_key = RSA.importKey((conn.recv(1024)).decode())
print("client public key received: ", c_public_key)
# receive secret key from the client
secret_key = ((PKCS1_OAEP.new(server_key_pair)).decrypt(conn.recv(1024))).decode()
# The decrypted secret key is used in DES.
while True:
out_mes1 = input("server: ")
out_mes, dec = DES_Encrypt(out_mes1, secret_key)
o_m = out_mes.encode()
conn.send(o_m)
for i in range(len(dec)):
dec1 = dec[i].encode()
conn.send(dec1)
# incoming message
incoming_message1 = conn.recv(2048)
in_mess = incoming_message1.decode()
dec_k2 = {}
dec_k1 = []
dec = []
for i in range(0, 16):
dec_k1 = conn.recv(2048)
dec_k2[i] = dec_k1.decode()
dec.append(dec_k2[i])
incoming_message = DES_Decrypt(in_mess, dec)
print("CLIENT: ", incoming_message)
print(" ")
main()
| smaller_key.append(my_key_list[i])
counter += 1 | conditional_block |
server.py | #######################################################
# Copyright (C) 2020 Sam Pickell and Aishwarya Vissom
# Last Updated: Apr. 20, 2020
# UML COMP 5610 Computer Network and Security
#
# This is a working implementation of an
# encrypted messaging client/server program
# using DES, RSA, and PKC. This is our final
# project for UML COMP 5610. The book we used for
# reference was "Introduction to Network Security:
# Theory and Practice 2nd Edition" by Jie Wang
# and Zachary Kissel.
########################################################
import random
from datetime import datetime
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
import socket
def convert_M(M_List):
new_list = []
# Convert to bits
for i in range(len(M_List)):
new_list.append(format(ord(M_List[i]), '08b'))
return "".join(new_list)
def | (K_List):
new_list = []
# Convert to bits
for i in range(len(K_List)):
new_list.append(format(ord(K_List[i]), '07b'))
if K_List[i].count("1") % 2 == 0:
new_list.append("0")
else:
new_list.append("1")
return "".join(new_list)
def apply_IP(M):
rev_M = list(M[len(M)::-1])
my_mat = [["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"]]
counter = 0
# Create "A" Matrix
counting_list = [6, 4, 2, 0, 7, 5, 3, 1]
for i in range(len(counting_list)):
for j in range(8):
my_mat[counting_list[i]][j] = rev_M[counter]
counter += 1
# Apply IP(M)
final_list = []
for i in range(len(counting_list)):
for j in range(len(counting_list)):
final_list.append(my_mat[counting_list[j]][counting_list[i]])
return "".join(final_list)
def apply_IP_C(C):
final_list = [C[39], C[7], C[47], C[15], C[55], C[23], C[63], C[31], C[38], C[6], C[46], C[14], C[54], C[22], C[62],
C[30],
C[37], C[5], C[45], C[13], C[53], C[21], C[61], C[29], C[36], C[4], C[44], C[12], C[52], C[20], C[60],
C[28],
C[35], C[3], C[43], C[11], C[51], C[19], C[59], C[27], C[34], C[2], C[42], C[10], C[50], C[18], C[58],
C[26],
C[33], C[1], C[41], C[9], C[49], C[17], C[57], C[25], C[32], C[0], C[40], C[8], C[48], C[16], C[56],
C[24]]
return "".join(final_list)
def apply_IPKey(my_key):
my_key_list = list(my_key)
smaller_key = []
counter = 1
# Convert every 8th bit from the key to an "8", to be removed later
for i in range(len(my_key_list)):
if counter % 8 == 0:
smaller_key.append("8")
counter = 1
else:
smaller_key.append(my_key_list[i])
counter += 1
# Apply the IP Key encryption algorithm
final_list = []
next_index = 56
for i in range(28):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 65)
next_index = 62
for i in range(28, 52):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 63)
next_index = 27
for i in range(52, 56):
final_list.append(smaller_key[next_index])
next_index = next_index - 8
return "".join(final_list)
def string_l_shift(my_str):
my_list = list(my_str)
first_char = my_list[0]
my_list.pop(0)
my_list.append(first_char)
return "".join(my_list)
def P_Key(my_U, my_V):
my_list = list(my_U + my_V)
final_list = []
my_perm = [13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
for i in range(len(my_perm)):
final_list.append(my_list[my_perm[i]])
return "".join(final_list)
def XOR_Encrypt(my_left, my_right, my_key):
# Perform the major part of Step 2 of DES Encryption
my_EP = string_xor(EP_fun(my_right), my_key)
my_S = S_fun(my_EP)
my_P = P_fun(my_S)
return string_xor(my_left, my_P)
def EP_fun(my_string):
my_list = list(my_string)
result = []
next_index = 31
counter = 0
# Perform expansion permutation
for i in range(48):
result.append(my_list[next_index])
counter += 1
if next_index == 31:
next_index = 0
else:
next_index += 1
if counter == 6:
counter = 0
next_index -= 2
return "".join(result)
def string_xor(s1, s2):
l1 = list(s1)
l2 = list(s2)
result = []
# Perform XOR
for i in range(len(l1)):
if l1[i] == l2[i]:
result.append("0")
else:
result.append("1")
return "".join(result)
def S_fun(my_string):
# S-Boxes (yes, all of them)
S = [[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
[[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
[[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
[[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]]
Y = []
counter = 0
# Create 8 6-bit Blocks
for i in range(8):
Y.append(my_string[counter:(counter + 6)])
counter += 6
# Convert to 4-bit Blocks
result = []
for i in range(len(Y)):
current_block = list(Y[i])
x_coord = int((current_block[0] + current_block[5]), 2)
y_coord = int((Y[i][1:5]), 2)
base_10_num = S[i][x_coord][y_coord]
val = format(base_10_num, '04b')
result.append(val)
return "".join(result)
def P_fun(my_string):
my_list = list(my_string)
order_list = [15, 6, 19, 10, 28, 11, 27, 16, 0, 14, 22, 25, 4, 17, 30, 9,
1, 7, 23, 13, 31, 26, 2, 8, 18, 12, 29, 5, 21, 10, 3, 24]
# Perform P(V) permutation
result = []
for i in range(len(order_list)):
result.append(my_list[order_list[i]])
return "".join(result)
# You must pass the message to be encrypted (string), as well as an encryption key (list of 8 characters)
# It will return the encoded string (string), as well as a decryption key (list)
def DES_Encrypt(user_input, list_K):
# Encryption
encrypted_message = []
# Make sure the message is divisible by 8
if len(user_input) % 8 != 0:
overage = 8 - (len(user_input) % 8)
for i in range(overage):
user_input = user_input + " "
# The bulk of the encryption
for i in range(len(user_input) // 8):
M = [user_input[(i * 8)], user_input[(i * 8) + 1], user_input[(i * 8) + 2], user_input[(i * 8) + 3],
user_input[(i * 8) + 4], user_input[(i * 8) + 5], user_input[(i * 8) + 6], user_input[(i * 8) + 7]]
M = convert_M(M)
K = convert_K(list_K)
IP_M = apply_IP(M)
IP_Key = apply_IPKey(K)
# Containers for U sub, V sub, K sub
U_List = [IP_Key[0:28]]
V_List = [IP_Key[28:56]]
K_List = ["-1"]
# Generate the keys we need
for j in range(1, 16):
U_List.append(string_l_shift(U_List[j - 1]))
V_List.append(string_l_shift(V_List[j - 1]))
K_List.append(P_Key(U_List[j], V_List[j]))
# Begin FCS Encryption
L0 = IP_M[0:32]
R0 = IP_M[32:len(IP_M)]
L1 = R0
R1 = XOR_Encrypt(L0, R0, K_List[1])
for j in range(2, 16):
L0 = L1
R0 = R1
L1 = R0
R1 = XOR_Encrypt(L0, R0, K_List[j])
encrypted_message.append(R1)
encrypted_message.append(L1)
return "".join(encrypted_message), K_List
# You must pass the encrypted message (string), as well as an decryption key (list of 8 characters)
# It will return the decrypted message
def DES_Decrypt(encrypted_message, K_List):
# Decryption
decrypted_message = []
encrypted_message = "".join(encrypted_message)
# Reverse of Encryption
for i in range((len(encrypted_message)) // 64):
L_Prime_0 = encrypted_message[(i * 64):(i * 64 + 32)]
R_Prime_0 = encrypted_message[(i * 64 + 32):(i * 64 + 64)]
for j in range(1, 16):
R_Prime_1 = XOR_Encrypt(L_Prime_0, R_Prime_0, K_List[(15 - j + 1)])
L_Prime_1 = R_Prime_0
R_Prime_0 = R_Prime_1
L_Prime_0 = L_Prime_1
decrypted_message.append(R_Prime_0)
decrypted_message.append(L_Prime_0)
decrypted_message = "".join(decrypted_message)
# Convert back to ascii
back_2_ascii = []
for i in range(len(decrypted_message) // 64):
back_2_ascii.append(apply_IP_C(decrypted_message[(i * 64):((i + 1) * 64)]))
# Convert back to message
converted_message = []
for i in range(len(back_2_ascii)):
my_char = ""
for j in range(8):
my_char += chr(int(back_2_ascii[i][(j * 8):((j + 1) * 8)], 2))
converted_message.append(my_char)
final_message = "".join(converted_message)
return final_message
# Generates a user specified length list of random characters
def rand_char_key(num):
my_list = []
for i in range(num):
my_rand = random.randint(33, 126)
my_list.append(chr(my_rand))
return my_list
# Simplified version of 3DES/2, see p.59 of the textbook listed in the header
def ThreeDESTwo(my_string):
K1 = rand_char_key(8)
enc, K2 = DES_Encrypt(my_string, K1)
dec = DES_Decrypt(enc, K2)
enc, K2 = DES_Encrypt(dec, K1)
return enc, K2
# Simplified version of the ANSI x9.17 PRNG Standard, see p.83 of the book listed in the header
def ansi_x917_std():
for i in range(4):
T = str(datetime.now())
if i == 0:
V = ThreeDESTwo("".join(rand_char_key(len(T))))
R = ThreeDESTwo(string_xor(V, ThreeDESTwo(T)))
V = ThreeDESTwo(string_xor(R, ThreeDESTwo(T)))
# The last element of the list
return R[-1][-1]
def main():
# connect to the client
s = socket.socket()
host = socket.gethostname()
print("Server Will start on host : ", host)
port = 8080
s.bind((host, port))
print("")
print("Server done binding to host and port Successfully")
print("")
print("Server is waiting for incoming Connection ")
s.listen(1)
conn, addr = s.accept()
print(addr, "Has connected to server and is now online ...")
print("")
# generate public key for server
server_key_pair = RSA.generate(1024)
s_public_key = server_key_pair.publickey()
print(s_public_key, "server public key")
# exchange the public keys with the client
# send the public key to the client
server_pub = s_public_key.exportKey("PEM")
conn.send(server_pub)
# Receive client public key
c_public_key = RSA.importKey((conn.recv(1024)).decode())
print("client public key received: ", c_public_key)
# receive secret key from the client
secret_key = ((PKCS1_OAEP.new(server_key_pair)).decrypt(conn.recv(1024))).decode()
# The decrypted secret key is used in DES.
while True:
out_mes1 = input("server: ")
out_mes, dec = DES_Encrypt(out_mes1, secret_key)
o_m = out_mes.encode()
conn.send(o_m)
for i in range(len(dec)):
dec1 = dec[i].encode()
conn.send(dec1)
# incoming message
incoming_message1 = conn.recv(2048)
in_mess = incoming_message1.decode()
dec_k2 = {}
dec_k1 = []
dec = []
for i in range(0, 16):
dec_k1 = conn.recv(2048)
dec_k2[i] = dec_k1.decode()
dec.append(dec_k2[i])
incoming_message = DES_Decrypt(in_mess, dec)
print("CLIENT: ", incoming_message)
print(" ")
main()
| convert_K | identifier_name |
server.py | #######################################################
# Copyright (C) 2020 Sam Pickell and Aishwarya Vissom
# Last Updated: Apr. 20, 2020
# UML COMP 5610 Computer Network and Security
#
# This is a working implementation of an
# encrypted messaging client/server program
# using DES, RSA, and PKC. This is our final
# project for UML COMP 5610. The book we used for
# reference was "Introduction to Network Security:
# Theory and Practice 2nd Edition" by Jie Wang
# and Zachary Kissel.
########################################################
import random
from datetime import datetime
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
import socket
def convert_M(M_List):
new_list = []
# Convert to bits
for i in range(len(M_List)):
new_list.append(format(ord(M_List[i]), '08b'))
return "".join(new_list)
def convert_K(K_List):
new_list = []
# Convert to bits
for i in range(len(K_List)):
new_list.append(format(ord(K_List[i]), '07b'))
if K_List[i].count("1") % 2 == 0:
new_list.append("0")
else:
new_list.append("1")
return "".join(new_list)
def apply_IP(M):
rev_M = list(M[len(M)::-1])
my_mat = [["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"]]
counter = 0
# Create "A" Matrix
counting_list = [6, 4, 2, 0, 7, 5, 3, 1]
for i in range(len(counting_list)):
for j in range(8):
my_mat[counting_list[i]][j] = rev_M[counter]
counter += 1
# Apply IP(M)
final_list = []
for i in range(len(counting_list)):
for j in range(len(counting_list)):
final_list.append(my_mat[counting_list[j]][counting_list[i]])
return "".join(final_list)
def apply_IP_C(C):
|
def apply_IPKey(my_key):
my_key_list = list(my_key)
smaller_key = []
counter = 1
# Convert every 8th bit from the key to an "8", to be removed later
for i in range(len(my_key_list)):
if counter % 8 == 0:
smaller_key.append("8")
counter = 1
else:
smaller_key.append(my_key_list[i])
counter += 1
# Apply the IP Key encryption algorithm
final_list = []
next_index = 56
for i in range(28):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 65)
next_index = 62
for i in range(28, 52):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 63)
next_index = 27
for i in range(52, 56):
final_list.append(smaller_key[next_index])
next_index = next_index - 8
return "".join(final_list)
def string_l_shift(my_str):
my_list = list(my_str)
first_char = my_list[0]
my_list.pop(0)
my_list.append(first_char)
return "".join(my_list)
def P_Key(my_U, my_V):
my_list = list(my_U + my_V)
final_list = []
my_perm = [13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
for i in range(len(my_perm)):
final_list.append(my_list[my_perm[i]])
return "".join(final_list)
def XOR_Encrypt(my_left, my_right, my_key):
# Perform the major part of Step 2 of DES Encryption
my_EP = string_xor(EP_fun(my_right), my_key)
my_S = S_fun(my_EP)
my_P = P_fun(my_S)
return string_xor(my_left, my_P)
def EP_fun(my_string):
my_list = list(my_string)
result = []
next_index = 31
counter = 0
# Perform expansion permutation
for i in range(48):
result.append(my_list[next_index])
counter += 1
if next_index == 31:
next_index = 0
else:
next_index += 1
if counter == 6:
counter = 0
next_index -= 2
return "".join(result)
def string_xor(s1, s2):
l1 = list(s1)
l2 = list(s2)
result = []
# Perform XOR
for i in range(len(l1)):
if l1[i] == l2[i]:
result.append("0")
else:
result.append("1")
return "".join(result)
def S_fun(my_string):
# S-Boxes (yes, all of them)
S = [[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
[[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
[[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
[[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]]
Y = []
counter = 0
# Create 8 6-bit Blocks
for i in range(8):
Y.append(my_string[counter:(counter + 6)])
counter += 6
# Convert to 4-bit Blocks
result = []
for i in range(len(Y)):
current_block = list(Y[i])
x_coord = int((current_block[0] + current_block[5]), 2)
y_coord = int((Y[i][1:5]), 2)
base_10_num = S[i][x_coord][y_coord]
val = format(base_10_num, '04b')
result.append(val)
return "".join(result)
def P_fun(my_string):
my_list = list(my_string)
order_list = [15, 6, 19, 10, 28, 11, 27, 16, 0, 14, 22, 25, 4, 17, 30, 9,
1, 7, 23, 13, 31, 26, 2, 8, 18, 12, 29, 5, 21, 10, 3, 24]
# Perform P(V) permutation
result = []
for i in range(len(order_list)):
result.append(my_list[order_list[i]])
return "".join(result)
# You must pass the message to be encrypted (string), as well as an encryption key (list of 8 characters)
# It will return the encoded string (string), as well as a decryption key (list)
def DES_Encrypt(user_input, list_K):
# Encryption
encrypted_message = []
# Make sure the message is divisible by 8
if len(user_input) % 8 != 0:
overage = 8 - (len(user_input) % 8)
for i in range(overage):
user_input = user_input + " "
# The bulk of the encryption
for i in range(len(user_input) // 8):
M = [user_input[(i * 8)], user_input[(i * 8) + 1], user_input[(i * 8) + 2], user_input[(i * 8) + 3],
user_input[(i * 8) + 4], user_input[(i * 8) + 5], user_input[(i * 8) + 6], user_input[(i * 8) + 7]]
M = convert_M(M)
K = convert_K(list_K)
IP_M = apply_IP(M)
IP_Key = apply_IPKey(K)
# Containers for U sub, V sub, K sub
U_List = [IP_Key[0:28]]
V_List = [IP_Key[28:56]]
K_List = ["-1"]
# Generate the keys we need
for j in range(1, 16):
U_List.append(string_l_shift(U_List[j - 1]))
V_List.append(string_l_shift(V_List[j - 1]))
K_List.append(P_Key(U_List[j], V_List[j]))
# Begin FCS Encryption
L0 = IP_M[0:32]
R0 = IP_M[32:len(IP_M)]
L1 = R0
R1 = XOR_Encrypt(L0, R0, K_List[1])
for j in range(2, 16):
L0 = L1
R0 = R1
L1 = R0
R1 = XOR_Encrypt(L0, R0, K_List[j])
encrypted_message.append(R1)
encrypted_message.append(L1)
return "".join(encrypted_message), K_List
# You must pass the encrypted message (string), as well as an decryption key (list of 8 characters)
# It will return the decrypted message
def DES_Decrypt(encrypted_message, K_List):
# Decryption
decrypted_message = []
encrypted_message = "".join(encrypted_message)
# Reverse of Encryption
for i in range((len(encrypted_message)) // 64):
L_Prime_0 = encrypted_message[(i * 64):(i * 64 + 32)]
R_Prime_0 = encrypted_message[(i * 64 + 32):(i * 64 + 64)]
for j in range(1, 16):
R_Prime_1 = XOR_Encrypt(L_Prime_0, R_Prime_0, K_List[(15 - j + 1)])
L_Prime_1 = R_Prime_0
R_Prime_0 = R_Prime_1
L_Prime_0 = L_Prime_1
decrypted_message.append(R_Prime_0)
decrypted_message.append(L_Prime_0)
decrypted_message = "".join(decrypted_message)
# Convert back to ascii
back_2_ascii = []
for i in range(len(decrypted_message) // 64):
back_2_ascii.append(apply_IP_C(decrypted_message[(i * 64):((i + 1) * 64)]))
# Convert back to message
converted_message = []
for i in range(len(back_2_ascii)):
my_char = ""
for j in range(8):
my_char += chr(int(back_2_ascii[i][(j * 8):((j + 1) * 8)], 2))
converted_message.append(my_char)
final_message = "".join(converted_message)
return final_message
# Generates a user specified length list of random characters
def rand_char_key(num):
my_list = []
for i in range(num):
my_rand = random.randint(33, 126)
my_list.append(chr(my_rand))
return my_list
# Simplified version of 3DES/2, see p.59 of the textbook listed in the header
def ThreeDESTwo(my_string):
K1 = rand_char_key(8)
enc, K2 = DES_Encrypt(my_string, K1)
dec = DES_Decrypt(enc, K2)
enc, K2 = DES_Encrypt(dec, K1)
return enc, K2
# Simplified version of the ANSI x9.17 PRNG Standard, see p.83 of the book listed in the header
def ansi_x917_std():
for i in range(4):
T = str(datetime.now())
if i == 0:
V = ThreeDESTwo("".join(rand_char_key(len(T))))
R = ThreeDESTwo(string_xor(V, ThreeDESTwo(T)))
V = ThreeDESTwo(string_xor(R, ThreeDESTwo(T)))
# The last element of the list
return R[-1][-1]
def main():
# connect to the client
s = socket.socket()
host = socket.gethostname()
print("Server Will start on host : ", host)
port = 8080
s.bind((host, port))
print("")
print("Server done binding to host and port Successfully")
print("")
print("Server is waiting for incoming Connection ")
s.listen(1)
conn, addr = s.accept()
print(addr, "Has connected to server and is now online ...")
print("")
# generate public key for server
server_key_pair = RSA.generate(1024)
s_public_key = server_key_pair.publickey()
print(s_public_key, "server public key")
# exchange the public keys with the client
# send the public key to the client
server_pub = s_public_key.exportKey("PEM")
conn.send(server_pub)
# Receive client public key
c_public_key = RSA.importKey((conn.recv(1024)).decode())
print("client public key received: ", c_public_key)
# receive secret key from the client
secret_key = ((PKCS1_OAEP.new(server_key_pair)).decrypt(conn.recv(1024))).decode()
# The decrypted secret key is used in DES.
while True:
out_mes1 = input("server: ")
out_mes, dec = DES_Encrypt(out_mes1, secret_key)
o_m = out_mes.encode()
conn.send(o_m)
for i in range(len(dec)):
dec1 = dec[i].encode()
conn.send(dec1)
# incoming message
incoming_message1 = conn.recv(2048)
in_mess = incoming_message1.decode()
dec_k2 = {}
dec_k1 = []
dec = []
for i in range(0, 16):
dec_k1 = conn.recv(2048)
dec_k2[i] = dec_k1.decode()
dec.append(dec_k2[i])
incoming_message = DES_Decrypt(in_mess, dec)
print("CLIENT: ", incoming_message)
print(" ")
main()
| final_list = [C[39], C[7], C[47], C[15], C[55], C[23], C[63], C[31], C[38], C[6], C[46], C[14], C[54], C[22], C[62],
C[30],
C[37], C[5], C[45], C[13], C[53], C[21], C[61], C[29], C[36], C[4], C[44], C[12], C[52], C[20], C[60],
C[28],
C[35], C[3], C[43], C[11], C[51], C[19], C[59], C[27], C[34], C[2], C[42], C[10], C[50], C[18], C[58],
C[26],
C[33], C[1], C[41], C[9], C[49], C[17], C[57], C[25], C[32], C[0], C[40], C[8], C[48], C[16], C[56],
C[24]]
return "".join(final_list) | identifier_body |
server.py | #######################################################
# Copyright (C) 2020 Sam Pickell and Aishwarya Vissom
# Last Updated: Apr. 20, 2020
# UML COMP 5610 Computer Network and Security
#
# This is a working implementation of an
# encrypted messaging client/server program
# using DES, RSA, and PKC. This is our final
# project for UML COMP 5610. The book we used for
# reference was "Introduction to Network Security:
# Theory and Practice 2nd Edition" by Jie Wang
# and Zachary Kissel.
########################################################
import random
from datetime import datetime
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
import socket
def convert_M(M_List):
new_list = []
# Convert to bits
for i in range(len(M_List)):
new_list.append(format(ord(M_List[i]), '08b'))
return "".join(new_list)
def convert_K(K_List):
new_list = []
# Convert to bits
for i in range(len(K_List)):
new_list.append(format(ord(K_List[i]), '07b'))
if K_List[i].count("1") % 2 == 0:
new_list.append("0")
else: | return "".join(new_list)
def apply_IP(M):
rev_M = list(M[len(M)::-1])
my_mat = [["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0", "0", "0", "0"], ["0", "0", "0", "0", "0", "0", "0", "0"]]
counter = 0
# Create "A" Matrix
counting_list = [6, 4, 2, 0, 7, 5, 3, 1]
for i in range(len(counting_list)):
for j in range(8):
my_mat[counting_list[i]][j] = rev_M[counter]
counter += 1
# Apply IP(M)
final_list = []
for i in range(len(counting_list)):
for j in range(len(counting_list)):
final_list.append(my_mat[counting_list[j]][counting_list[i]])
return "".join(final_list)
def apply_IP_C(C):
final_list = [C[39], C[7], C[47], C[15], C[55], C[23], C[63], C[31], C[38], C[6], C[46], C[14], C[54], C[22], C[62],
C[30],
C[37], C[5], C[45], C[13], C[53], C[21], C[61], C[29], C[36], C[4], C[44], C[12], C[52], C[20], C[60],
C[28],
C[35], C[3], C[43], C[11], C[51], C[19], C[59], C[27], C[34], C[2], C[42], C[10], C[50], C[18], C[58],
C[26],
C[33], C[1], C[41], C[9], C[49], C[17], C[57], C[25], C[32], C[0], C[40], C[8], C[48], C[16], C[56],
C[24]]
return "".join(final_list)
def apply_IPKey(my_key):
my_key_list = list(my_key)
smaller_key = []
counter = 1
# Convert every 8th bit from the key to an "8", to be removed later
for i in range(len(my_key_list)):
if counter % 8 == 0:
smaller_key.append("8")
counter = 1
else:
smaller_key.append(my_key_list[i])
counter += 1
# Apply the IP Key encryption algorithm
final_list = []
next_index = 56
for i in range(28):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 65)
next_index = 62
for i in range(28, 52):
final_list.append(smaller_key[next_index])
next_index = ((next_index - 8) % 63)
next_index = 27
for i in range(52, 56):
final_list.append(smaller_key[next_index])
next_index = next_index - 8
return "".join(final_list)
def string_l_shift(my_str):
my_list = list(my_str)
first_char = my_list[0]
my_list.pop(0)
my_list.append(first_char)
return "".join(my_list)
def P_Key(my_U, my_V):
my_list = list(my_U + my_V)
final_list = []
my_perm = [13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
for i in range(len(my_perm)):
final_list.append(my_list[my_perm[i]])
return "".join(final_list)
def XOR_Encrypt(my_left, my_right, my_key):
# Perform the major part of Step 2 of DES Encryption
my_EP = string_xor(EP_fun(my_right), my_key)
my_S = S_fun(my_EP)
my_P = P_fun(my_S)
return string_xor(my_left, my_P)
def EP_fun(my_string):
my_list = list(my_string)
result = []
next_index = 31
counter = 0
# Perform expansion permutation
for i in range(48):
result.append(my_list[next_index])
counter += 1
if next_index == 31:
next_index = 0
else:
next_index += 1
if counter == 6:
counter = 0
next_index -= 2
return "".join(result)
def string_xor(s1, s2):
l1 = list(s1)
l2 = list(s2)
result = []
# Perform XOR
for i in range(len(l1)):
if l1[i] == l2[i]:
result.append("0")
else:
result.append("1")
return "".join(result)
def S_fun(my_string):
# S-Boxes (yes, all of them)
S = [[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
[[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
[[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
[[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]]
Y = []
counter = 0
# Create 8 6-bit Blocks
for i in range(8):
Y.append(my_string[counter:(counter + 6)])
counter += 6
# Convert to 4-bit Blocks
result = []
for i in range(len(Y)):
current_block = list(Y[i])
x_coord = int((current_block[0] + current_block[5]), 2)
y_coord = int((Y[i][1:5]), 2)
base_10_num = S[i][x_coord][y_coord]
val = format(base_10_num, '04b')
result.append(val)
return "".join(result)
def P_fun(my_string):
my_list = list(my_string)
order_list = [15, 6, 19, 10, 28, 11, 27, 16, 0, 14, 22, 25, 4, 17, 30, 9,
1, 7, 23, 13, 31, 26, 2, 8, 18, 12, 29, 5, 21, 10, 3, 24]
# Perform P(V) permutation
result = []
for i in range(len(order_list)):
result.append(my_list[order_list[i]])
return "".join(result)
# You must pass the message to be encrypted (string), as well as an encryption key (list of 8 characters)
# It will return the encoded string (string), as well as a decryption key (list)
def DES_Encrypt(user_input, list_K):
# Encryption
encrypted_message = []
# Make sure the message is divisible by 8
if len(user_input) % 8 != 0:
overage = 8 - (len(user_input) % 8)
for i in range(overage):
user_input = user_input + " "
# The bulk of the encryption
for i in range(len(user_input) // 8):
M = [user_input[(i * 8)], user_input[(i * 8) + 1], user_input[(i * 8) + 2], user_input[(i * 8) + 3],
user_input[(i * 8) + 4], user_input[(i * 8) + 5], user_input[(i * 8) + 6], user_input[(i * 8) + 7]]
M = convert_M(M)
K = convert_K(list_K)
IP_M = apply_IP(M)
IP_Key = apply_IPKey(K)
# Containers for U sub, V sub, K sub
U_List = [IP_Key[0:28]]
V_List = [IP_Key[28:56]]
K_List = ["-1"]
# Generate the keys we need
for j in range(1, 16):
U_List.append(string_l_shift(U_List[j - 1]))
V_List.append(string_l_shift(V_List[j - 1]))
K_List.append(P_Key(U_List[j], V_List[j]))
# Begin FCS Encryption
L0 = IP_M[0:32]
R0 = IP_M[32:len(IP_M)]
L1 = R0
R1 = XOR_Encrypt(L0, R0, K_List[1])
for j in range(2, 16):
L0 = L1
R0 = R1
L1 = R0
R1 = XOR_Encrypt(L0, R0, K_List[j])
encrypted_message.append(R1)
encrypted_message.append(L1)
return "".join(encrypted_message), K_List
# You must pass the encrypted message (string), as well as an decryption key (list of 8 characters)
# It will return the decrypted message
def DES_Decrypt(encrypted_message, K_List):
# Decryption
decrypted_message = []
encrypted_message = "".join(encrypted_message)
# Reverse of Encryption
for i in range((len(encrypted_message)) // 64):
L_Prime_0 = encrypted_message[(i * 64):(i * 64 + 32)]
R_Prime_0 = encrypted_message[(i * 64 + 32):(i * 64 + 64)]
for j in range(1, 16):
R_Prime_1 = XOR_Encrypt(L_Prime_0, R_Prime_0, K_List[(15 - j + 1)])
L_Prime_1 = R_Prime_0
R_Prime_0 = R_Prime_1
L_Prime_0 = L_Prime_1
decrypted_message.append(R_Prime_0)
decrypted_message.append(L_Prime_0)
decrypted_message = "".join(decrypted_message)
# Convert back to ascii
back_2_ascii = []
for i in range(len(decrypted_message) // 64):
back_2_ascii.append(apply_IP_C(decrypted_message[(i * 64):((i + 1) * 64)]))
# Convert back to message
converted_message = []
for i in range(len(back_2_ascii)):
my_char = ""
for j in range(8):
my_char += chr(int(back_2_ascii[i][(j * 8):((j + 1) * 8)], 2))
converted_message.append(my_char)
final_message = "".join(converted_message)
return final_message
# Generates a user specified length list of random characters
def rand_char_key(num):
my_list = []
for i in range(num):
my_rand = random.randint(33, 126)
my_list.append(chr(my_rand))
return my_list
# Simplified version of 3DES/2, see p.59 of the textbook listed in the header
def ThreeDESTwo(my_string):
K1 = rand_char_key(8)
enc, K2 = DES_Encrypt(my_string, K1)
dec = DES_Decrypt(enc, K2)
enc, K2 = DES_Encrypt(dec, K1)
return enc, K2
# Simplified version of the ANSI x9.17 PRNG Standard, see p.83 of the book listed in the header
def ansi_x917_std():
for i in range(4):
T = str(datetime.now())
if i == 0:
V = ThreeDESTwo("".join(rand_char_key(len(T))))
R = ThreeDESTwo(string_xor(V, ThreeDESTwo(T)))
V = ThreeDESTwo(string_xor(R, ThreeDESTwo(T)))
# The last element of the list
return R[-1][-1]
def main():
# connect to the client
s = socket.socket()
host = socket.gethostname()
print("Server Will start on host : ", host)
port = 8080
s.bind((host, port))
print("")
print("Server done binding to host and port Successfully")
print("")
print("Server is waiting for incoming Connection ")
s.listen(1)
conn, addr = s.accept()
print(addr, "Has connected to server and is now online ...")
print("")
# generate public key for server
server_key_pair = RSA.generate(1024)
s_public_key = server_key_pair.publickey()
print(s_public_key, "server public key")
# exchange the public keys with the client
# send the public key to the client
server_pub = s_public_key.exportKey("PEM")
conn.send(server_pub)
# Receive client public key
c_public_key = RSA.importKey((conn.recv(1024)).decode())
print("client public key received: ", c_public_key)
# receive secret key from the client
secret_key = ((PKCS1_OAEP.new(server_key_pair)).decrypt(conn.recv(1024))).decode()
# The decrypted secret key is used in DES.
while True:
out_mes1 = input("server: ")
out_mes, dec = DES_Encrypt(out_mes1, secret_key)
o_m = out_mes.encode()
conn.send(o_m)
for i in range(len(dec)):
dec1 = dec[i].encode()
conn.send(dec1)
# incoming message
incoming_message1 = conn.recv(2048)
in_mess = incoming_message1.decode()
dec_k2 = {}
dec_k1 = []
dec = []
for i in range(0, 16):
dec_k1 = conn.recv(2048)
dec_k2[i] = dec_k1.decode()
dec.append(dec_k2[i])
incoming_message = DES_Decrypt(in_mess, dec)
print("CLIENT: ", incoming_message)
print(" ")
main() | new_list.append("1")
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.