file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
gossip.rs | use crate::clock::{Clock, HybridTimestamp};
use crate::event_emitter::EventEmitter;
use crate::proto::gossip::*;
use crate::proto::gossip_grpc::*;
use crate::proto::PeerState;
use crate::rpc_client::RpcClient;
use failure::{err_msg, format_err, Error};
use futures::prelude::*;
use futures::sync::{mpsc, oneshot};
use grpcio::{RpcContext, Service, UnarySink};
use log::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use std::time::{Duration, Instant};
use tokio::timer::Interval;
#[derive(Clone)]
pub struct GossipServer {
state: GossipState,
sender: mpsc::Sender<GossipEvent>,
}
impl Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
}
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher, | peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
}
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)| !self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn publish_peer_discovered(&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id != 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id != 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, client.clone());
drop(locked_state);
let gossip_stream = Interval::new(Instant::now(), Duration::from_secs(5))
.map(|_| ClientEvent::GossipTick)
.map_err(|err| error!("Error in gossip tick: {:?}", err));
let close_stream = receiver
.into_stream()
.map(|_: ()| ClientEvent::Done)
.map_err(|_| ());
let (sender, receiver) = mpsc::channel(64);
let producer = gossip_stream
.select(close_stream)
.take_while(|item| Ok(*item != ClientEvent::Done))
.for_each(move |event| sender.clone().send(event).map_err(|_| ()).map(|_| ()));
let consumer = ClientContext::new(state.new_ref(), client).run(receiver);
tokio::spawn(consumer);
tokio::spawn(producer);
}
impl ClientContext {
pub fn new(state: GossipStateRef, client: RpcClient) -> Self {
Self { state, client }
}
pub fn run(self, receiver: mpsc::Receiver<ClientEvent>) -> impl Future<Item = (), Error = ()> {
// TODO: should age out nodes that have been failing gossip for a while
// TODO: should have a separate heartbeat loop for tracking peer offsets
receiver.for_each(move |_event| {
let state = self.state.upgrade();
let current_gossip = state.get_current();
self.client
.gossip(¤t_gossip)
.map(move |gossip| {
if gossip.get_node_id() != current_gossip.get_node_id() {
state.update_clock(gossip.get_updated_at().into());
}
state.merge_gossip(gossip)
})
.then(move |_| Ok(()))
})
}
} | event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(), | random_line_split |
ParseEssential.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 13:05:33 2019
@author: CLARG38
"""
import string, re,sys
import os
from collections import defaultdict
import glob
from cPickle import dump,load
import pandas as pd
import platform
class Annotation():
"""The class servers as a container for some properties and will be
the parent node and container for all Gene instances"""
if platform.node().startswith("greg-clarks-macbook"):
prefix="/Users/clarkgr1/Desktop/aws"
elif platform.node().startswith("WIN"):
prefix="C:/Users/CLARG38/Downloads/PlayDate/"
def __init__(self,genelist=[]):
self.genelist=genelist
self.definedgenes=bool(len(genelist))
self.fileprefix='_partial' if len(genelist) > 0 else '_all'
self.chromosomes=list(map(lambda s: str(s),range(1,20))) + ['X','Y','MT']
self.genes={}
self.uid2mgi={}
self.symbol2mgi={}
self.protein2mgi={}
self.ensembl2mgi={}
self.reannotate=False
self.link_ids_=False
self.miRNA_=False
self.lcRNA_=False
self.encode_=False
self.viability_=False #functional and viability info
self.ppi_=False
self.regflag=False
self.seqflag=False
self.OOGE_added=False
##Collections from excel spreadsheets are put into data-frames
##Will be reconciled/normalized later
self.lncRNA=pd.DataFrame()
def link_ids(self):
filename=os.path.join(self.prefix,'Processed',self.fileprefix+'_GeneInit.pkl')
if os.path.exists(filename) and not self.reannotate:
print "Gene info exists, retrieving it..."
self.genes=load(open(filename,'rb'))
if len(self.genes) < len(self.genelist):
print "We need to add more"
else:
self.link_ids_=True
return
##We have self.genes to act as a container for Gene instances
markers=os.path.join(self.prefix,"MRK_Sequence.rpt")
ncbiInfo=os.path.join(self.prefix,"MGI_Gene_Model_Coord.rpt")
refprot=re.compile("ENSMUSP[0-9]{3,15}",flags=re.I|re.X)
ensgene=re.compile("ENSMUSG[0-9]{3,15}",flags=re.I|re.X)
with open(markers) as mrk:
mrk.next()
#skip the header
for line in mrk:
data=line.split("\t")
mgi,symbol,refseq=data[0],data[1],data[-4]
##Master Annotation
self.symbol2mgi[symbol]=mgi
refseq=filter(lambda l: refprot.search(l),refseq.split("|"))
if self.definedgenes:
if mgi in self.genelist or symbol in self.genelist:
ins=Gene(symbol)
ins.mgi=mgi
ins.symbol=symbol
ins.proteins=refseq
for protein in refseq:
self.protein2mgi[protein]=mgi
self.genes[mgi]=ins
else:
##This means doing ALL genes
ins=Gene(symbol)
ins.mgi=mgi
ins.symbol=symbol
ins.proteins=refseq
for protein in refseq:
self.protein2mgi[protein]=mgi
self.genes[mgi]=ins
with open(ncbiInfo) as ncbi:
ncbi.next()
##skip the header
for line in ncbi:
data=line.strip().split("\t")
mgi,symbol,uid,ens,chrom,start,end,strand=map(lambda j: data[j],[0,2,5,10,11,12,13,14])
##We must have defined mgi via markers, or skip iteration
try: self.genes[mgi]
except KeyError: continue
##Master Annotation
self.uid2mgi[uid]=mgi
if ensgene.search(ens):
self.ensembl2mgi[ens]=mgi
self.genes[mgi].ensembl=ens
self.genes[mgi].uid=uid
self.genes[mgi].chromosome=chrom
try:
self.genes[mgi].chromosome=chrom
self.genes[mgi].start=int(start)
self.genes[mgi].end=int(end)
self.genes[mgi].length=abs(int(end)-int(start))+1
self.genes[mgi].strand=strand
except ValueError:
try:
self.genes[mgi].chromosome=data[6]
self.genes[mgi].start=int(data[7])
self.genes[mgi].end=int(data[8])
self.genes[mgi].length=abs(int(data[8])-int(data[7]))+1
self.genes[mgi].strand=data[9]
self.genes[mgi].co_ord='ensembl'
except ValueError:
##Strike abhorrent abberation
self.genes[mgi].co_ord=None
self.genes={i:j for i,j in self.genes.iteritems() if j.chromosome and j.co_ord}
if not self.definedgenes:
ioA=open(os.path.join(self.prefix,"Processed",'symbol2mgi.pkl'),'wb')
ioB=open(os.path.join(self.prefix,"Processed",'uid2mgi.pkl'),'wb')
ioC=open(os.path.join(self.prefix,"Processed",'protein2mgi.pkl'),'wb')
ioD=open(os.path.join(self.prefix,"Processed",'ensembl2mgi.pkl'),'wb')
dump(self.symbol2mgi,ioA)
dump(self.uid2mgi,ioB)
dump(self.protein2mgi,ioC)
dump(self.ensembl2mgi,ioD)
ioA.close()
ioB.close()
ioC.close()
ioD.close()
filehandler=open(filename,'wb')
dump(self.genes,filehandler)
filehandler.close()
self.link_ids_=True
def add_viability(self):
assert self.link_ids_, "Must link ids prior to annotation at gene level"
##Functional annotation from ncbi
with open(os.path.join(self.prefix,"generifs_basic")) as rif:
for line in rif:
data=line.strip().split("\t")
if data[0] == "10090": ## ncbi taxonomy id for mouse
if re.search("lethal|fatal|dead|viable|essential",data[-1]) and not re.search("protect|resistance|rescue",data[-1]):
|
##Viability information from impc
with open(os.path.join(self.prefix,"Viability.csv")) as vb:
for line in vb:
data=line.strip().split(",")
if data[-1] not in ['Viable','Subviable','Lethal']:
continue
try:
self.genes[data[-2]].viability=data[-1]
except KeyError:
pass
self.viability_=True
def add_OOGE(self):
self.OOGE_added=True
with open(os.path.join(self.prefix,"Mus_musculus_OOGE.csv")) as deg:
header=deg.next().strip().split(",")
##locus,symbols,datasets,datasetIDs,essentiality status,essentiality consensus
for line in deg:
data=line.strip().split(",")
ensb,symbol,gclass=data[0],data[1],data[-2]
try:
mgi=self.symbol2mgi[symbol]
except KeyError:
try:
mgi=self.ensembl2mgi[ensb]
except KeyError:
mgi=None
if not mgi:
continue
try:
self.genes[mgi]
except KeyError:
continue
if gclass == "NE":
self.genes[mgi].OOGEclass=0
else:
self.genes[mgi].OOGEclass=1
def add_DEG(self):
assert self.OOGE_added,"Must run MM to augment gene identifier relationships"
##
with open(os.path.join(self.prefix,"DEG_essential_mouse.csv")) as deg:
##no header here
for line in deg:
data=line.strip().split("\t")
ensb,gclass=data[3:5]
try:
mgi=self.ensembl2mgi[ensb]
self.genes[mgi]
if gclass == "NE":
self.genes[mgi].DEGclass=0
else:
self.genes[mgi].DEGclass=1
except KeyError:
continue
def add_OOGE_human(self):
assert self.OOGE_added,"Must run MM to augment gene identifier relationships"
##locus,symbols,datasets,datasetIDs,essentiality status,essentiality consensus
def clean_class(split):
line=split.strip("\"")
cleanline=line.split(",")
classes=list(set(cleanline))
if len(set(classes)) == 1:
if classes[0] == "NE":
return 0
elif classes[0] == "E":
return 1
else:
return classes.count("E")/float(len(classes))
with open(os.path.join(self.prefix,"Homo_sapiens_OOGE.csv")) as deg:
##no header here
for line in deg:
data=line.strip().split(",")
#print data
symbol,gclass=data[1],data[-2]
symbol=symbol.capitalize()
try:
mgi=self.symbol2mgi[symbol]
if data[-1].strip() == "Nonessential":
gclass=0
elif data[-1].strip() == "Essential":
gclass=1
else:
dinfo=line.split("\"")[-2].split(",")
gclass=round(dinfo.count("E")/float(len(dinfo)),3)
self.genes[mgi].HS_OOGEclass=gclass
#print symbol,mgi,self.genes[mgi].HS_OOGEclass
except KeyError:
continue
class Gene(Annotation):
"""Creating a data construct that will capture all information about a gene.
However we take a protein-centric view because there are multiple proteins found
for each gene identifier"""
refprot=re.compile("ENSMUSP[0-9]{3,15}",flags=re.I|re.X)
def __init__(self,symbol):
##ids obtain from ncbi and mgi
self.id=symbol
self.proteins=[]
self.mgi=None
self.symbol=None
self.uid=None
self.ensembl=None
##ncbi co-ordinates
self.co_ord='ncbi'
self.chromosome=0
self.start=0
self.end=0
self.strand=''
##metrics based on protein network
self.degree=None
self.dc=None
self.annotation=None
self.viability=None
self.regulatory=[]
self.DEGclass=None
self.OOGEclass=None
self.HS_OOGEclass=None
def grab_sequences(excelfile):
filedf=pd.read_excel(excelfile)
##get list of genes - convert to string from numpy returned unicode
symbols=list(map(lambda s: str(s),filedf['Gene Marker Symbol_x'].values))
return symbols
if __name__ == "__main__":
##If we add a list of genes, we compile for only these
#
inputfile="C:/Users/CLARG38/Downloads/PlayDate/Aggregated.xlsx"
genelist=grab_sequences(inputfile)
ann=Annotation(genelist)
ann.fileprefix='essential'
ann.link_ids()
ann.add_OOGE()
ann.add_viability()
ann.add_DEG()
ann.add_OOGE_human()
vb=[]
lt=[]
data=[]
col=['MGI_ID','Symbol','EnsemblID','Viability','DEG_essentiality','Human_Essentiality(OOGE)','GeneRif_keyword']
for gene,i in ann.genes.iteritems():
if not i.viability and (i.OOGEclass or i.HS_OOGEclass):
print gene,i.symbol,i.ensembl,i.viability,i.annotation,i.DEGclass,i.OOGEclass,i.HS_OOGEclass
data.append([gene,i.symbol,i.ensembl,i.viability,i.DEGclass,i.HS_OOGEclass,i.annotation])
df=pd.DataFrame(data,columns=col)
df.to_excel(os.path.join(ann.prefix,'Processed','GeneInformation.xlsx'),index=False)
| try:
##We have to convert from uid to mgi (our chosen ID) first
mtch=re.search("lethal|fatal|dead|viable|essential",data[-1])
#print mtch.group(0),data[-1].split()
ant=mtch.group(0)
if not ant:
print "\n\n\n\n"
print re.search("lethal|fatal|dead|viable|essential",data[-1])
lclmgi=self.uid2mgi[data[1].strip()]
self.genes[lclmgi].annotation=ant
except KeyError:
pass | conditional_block |
ParseEssential.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 13:05:33 2019
@author: CLARG38
"""
import string, re,sys
import os
from collections import defaultdict
import glob
from cPickle import dump,load
import pandas as pd
import platform
class Annotation():
"""The class servers as a container for some properties and will be
the parent node and container for all Gene instances"""
if platform.node().startswith("greg-clarks-macbook"):
prefix="/Users/clarkgr1/Desktop/aws"
elif platform.node().startswith("WIN"):
prefix="C:/Users/CLARG38/Downloads/PlayDate/"
def __init__(self,genelist=[]):
self.genelist=genelist
self.definedgenes=bool(len(genelist))
self.fileprefix='_partial' if len(genelist) > 0 else '_all'
self.chromosomes=list(map(lambda s: str(s),range(1,20))) + ['X','Y','MT']
self.genes={}
self.uid2mgi={}
self.symbol2mgi={}
self.protein2mgi={}
self.ensembl2mgi={}
self.reannotate=False
self.link_ids_=False
self.miRNA_=False
self.lcRNA_=False
self.encode_=False
self.viability_=False #functional and viability info
self.ppi_=False
self.regflag=False
self.seqflag=False
self.OOGE_added=False
##Collections from excel spreadsheets are put into data-frames
##Will be reconciled/normalized later
self.lncRNA=pd.DataFrame()
def link_ids(self):
filename=os.path.join(self.prefix,'Processed',self.fileprefix+'_GeneInit.pkl')
if os.path.exists(filename) and not self.reannotate:
print "Gene info exists, retrieving it..."
self.genes=load(open(filename,'rb'))
if len(self.genes) < len(self.genelist):
print "We need to add more"
else:
self.link_ids_=True
return
##We have self.genes to act as a container for Gene instances
markers=os.path.join(self.prefix,"MRK_Sequence.rpt")
ncbiInfo=os.path.join(self.prefix,"MGI_Gene_Model_Coord.rpt")
refprot=re.compile("ENSMUSP[0-9]{3,15}",flags=re.I|re.X)
ensgene=re.compile("ENSMUSG[0-9]{3,15}",flags=re.I|re.X)
with open(markers) as mrk:
mrk.next()
#skip the header
for line in mrk:
data=line.split("\t")
mgi,symbol,refseq=data[0],data[1],data[-4]
##Master Annotation
self.symbol2mgi[symbol]=mgi
refseq=filter(lambda l: refprot.search(l),refseq.split("|"))
if self.definedgenes:
if mgi in self.genelist or symbol in self.genelist:
ins=Gene(symbol)
ins.mgi=mgi
ins.symbol=symbol
ins.proteins=refseq
for protein in refseq:
self.protein2mgi[protein]=mgi
self.genes[mgi]=ins
else:
##This means doing ALL genes
ins=Gene(symbol)
ins.mgi=mgi
ins.symbol=symbol
ins.proteins=refseq
for protein in refseq:
self.protein2mgi[protein]=mgi
self.genes[mgi]=ins
with open(ncbiInfo) as ncbi:
ncbi.next()
##skip the header
for line in ncbi:
data=line.strip().split("\t")
mgi,symbol,uid,ens,chrom,start,end,strand=map(lambda j: data[j],[0,2,5,10,11,12,13,14])
##We must have defined mgi via markers, or skip iteration
try: self.genes[mgi]
except KeyError: continue
##Master Annotation
self.uid2mgi[uid]=mgi
if ensgene.search(ens):
self.ensembl2mgi[ens]=mgi
self.genes[mgi].ensembl=ens
self.genes[mgi].uid=uid
self.genes[mgi].chromosome=chrom
try:
self.genes[mgi].chromosome=chrom
self.genes[mgi].start=int(start)
self.genes[mgi].end=int(end)
self.genes[mgi].length=abs(int(end)-int(start))+1
self.genes[mgi].strand=strand
except ValueError:
try:
self.genes[mgi].chromosome=data[6]
self.genes[mgi].start=int(data[7])
self.genes[mgi].end=int(data[8])
self.genes[mgi].length=abs(int(data[8])-int(data[7]))+1
self.genes[mgi].strand=data[9]
self.genes[mgi].co_ord='ensembl'
except ValueError:
##Strike abhorrent abberation
self.genes[mgi].co_ord=None
self.genes={i:j for i,j in self.genes.iteritems() if j.chromosome and j.co_ord}
if not self.definedgenes:
ioA=open(os.path.join(self.prefix,"Processed",'symbol2mgi.pkl'),'wb')
ioB=open(os.path.join(self.prefix,"Processed",'uid2mgi.pkl'),'wb')
ioC=open(os.path.join(self.prefix,"Processed",'protein2mgi.pkl'),'wb')
ioD=open(os.path.join(self.prefix,"Processed",'ensembl2mgi.pkl'),'wb')
dump(self.symbol2mgi,ioA)
dump(self.uid2mgi,ioB)
dump(self.protein2mgi,ioC)
dump(self.ensembl2mgi,ioD)
ioA.close()
ioB.close()
ioC.close()
ioD.close()
filehandler=open(filename,'wb')
dump(self.genes,filehandler)
filehandler.close()
self.link_ids_=True
def add_viability(self):
assert self.link_ids_, "Must link ids prior to annotation at gene level"
##Functional annotation from ncbi
with open(os.path.join(self.prefix,"generifs_basic")) as rif:
for line in rif:
data=line.strip().split("\t")
if data[0] == "10090": ## ncbi taxonomy id for mouse
if re.search("lethal|fatal|dead|viable|essential",data[-1]) and not re.search("protect|resistance|rescue",data[-1]):
try:
##We have to convert from uid to mgi (our chosen ID) first
mtch=re.search("lethal|fatal|dead|viable|essential",data[-1])
#print mtch.group(0),data[-1].split()
ant=mtch.group(0)
if not ant:
print "\n\n\n\n"
print re.search("lethal|fatal|dead|viable|essential",data[-1])
lclmgi=self.uid2mgi[data[1].strip()]
self.genes[lclmgi].annotation=ant
except KeyError:
pass
##Viability information from impc
with open(os.path.join(self.prefix,"Viability.csv")) as vb:
for line in vb:
data=line.strip().split(",")
if data[-1] not in ['Viable','Subviable','Lethal']:
continue
try:
self.genes[data[-2]].viability=data[-1]
except KeyError:
pass
self.viability_=True
def add_OOGE(self):
self.OOGE_added=True
with open(os.path.join(self.prefix,"Mus_musculus_OOGE.csv")) as deg:
header=deg.next().strip().split(",")
##locus,symbols,datasets,datasetIDs,essentiality status,essentiality consensus
for line in deg:
data=line.strip().split(",")
ensb,symbol,gclass=data[0],data[1],data[-2]
try:
mgi=self.symbol2mgi[symbol]
except KeyError:
try:
mgi=self.ensembl2mgi[ensb]
except KeyError:
mgi=None
if not mgi:
continue
try:
self.genes[mgi]
except KeyError:
continue
if gclass == "NE":
self.genes[mgi].OOGEclass=0
else:
self.genes[mgi].OOGEclass=1
def add_DEG(self):
assert self.OOGE_added,"Must run MM to augment gene identifier relationships"
##
with open(os.path.join(self.prefix,"DEG_essential_mouse.csv")) as deg:
##no header here
for line in deg:
data=line.strip().split("\t")
ensb,gclass=data[3:5]
try:
mgi=self.ensembl2mgi[ensb]
self.genes[mgi]
if gclass == "NE":
self.genes[mgi].DEGclass=0
else:
self.genes[mgi].DEGclass=1
except KeyError:
continue
def add_OOGE_human(self):
assert self.OOGE_added,"Must run MM to augment gene identifier relationships"
##locus,symbols,datasets,datasetIDs,essentiality status,essentiality consensus
def clean_class(split):
line=split.strip("\"")
cleanline=line.split(",")
classes=list(set(cleanline))
if len(set(classes)) == 1:
if classes[0] == "NE":
return 0
elif classes[0] == "E":
return 1
else:
return classes.count("E")/float(len(classes))
with open(os.path.join(self.prefix,"Homo_sapiens_OOGE.csv")) as deg:
##no header here
for line in deg:
data=line.strip().split(",")
#print data
symbol,gclass=data[1],data[-2]
symbol=symbol.capitalize()
try:
mgi=self.symbol2mgi[symbol]
if data[-1].strip() == "Nonessential":
gclass=0
elif data[-1].strip() == "Essential":
gclass=1
else:
dinfo=line.split("\"")[-2].split(",")
gclass=round(dinfo.count("E")/float(len(dinfo)),3)
self.genes[mgi].HS_OOGEclass=gclass
#print symbol,mgi,self.genes[mgi].HS_OOGEclass
except KeyError:
continue
class | (Annotation):
"""Creating a data construct that will capture all information about a gene.
However we take a protein-centric view because there are multiple proteins found
for each gene identifier"""
refprot=re.compile("ENSMUSP[0-9]{3,15}",flags=re.I|re.X)
def __init__(self,symbol):
##ids obtain from ncbi and mgi
self.id=symbol
self.proteins=[]
self.mgi=None
self.symbol=None
self.uid=None
self.ensembl=None
##ncbi co-ordinates
self.co_ord='ncbi'
self.chromosome=0
self.start=0
self.end=0
self.strand=''
##metrics based on protein network
self.degree=None
self.dc=None
self.annotation=None
self.viability=None
self.regulatory=[]
self.DEGclass=None
self.OOGEclass=None
self.HS_OOGEclass=None
def grab_sequences(excelfile):
filedf=pd.read_excel(excelfile)
##get list of genes - convert to string from numpy returned unicode
symbols=list(map(lambda s: str(s),filedf['Gene Marker Symbol_x'].values))
return symbols
if __name__ == "__main__":
##If we add a list of genes, we compile for only these
#
inputfile="C:/Users/CLARG38/Downloads/PlayDate/Aggregated.xlsx"
genelist=grab_sequences(inputfile)
ann=Annotation(genelist)
ann.fileprefix='essential'
ann.link_ids()
ann.add_OOGE()
ann.add_viability()
ann.add_DEG()
ann.add_OOGE_human()
vb=[]
lt=[]
data=[]
col=['MGI_ID','Symbol','EnsemblID','Viability','DEG_essentiality','Human_Essentiality(OOGE)','GeneRif_keyword']
for gene,i in ann.genes.iteritems():
if not i.viability and (i.OOGEclass or i.HS_OOGEclass):
print gene,i.symbol,i.ensembl,i.viability,i.annotation,i.DEGclass,i.OOGEclass,i.HS_OOGEclass
data.append([gene,i.symbol,i.ensembl,i.viability,i.DEGclass,i.HS_OOGEclass,i.annotation])
df=pd.DataFrame(data,columns=col)
df.to_excel(os.path.join(ann.prefix,'Processed','GeneInformation.xlsx'),index=False)
| Gene | identifier_name |
ParseEssential.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 13:05:33 2019
@author: CLARG38
"""
import string, re,sys
import os
from collections import defaultdict
import glob
from cPickle import dump,load
import pandas as pd
import platform
class Annotation():
"""The class servers as a container for some properties and will be
the parent node and container for all Gene instances"""
if platform.node().startswith("greg-clarks-macbook"):
prefix="/Users/clarkgr1/Desktop/aws"
elif platform.node().startswith("WIN"):
prefix="C:/Users/CLARG38/Downloads/PlayDate/"
def __init__(self,genelist=[]):
self.genelist=genelist
self.definedgenes=bool(len(genelist))
self.fileprefix='_partial' if len(genelist) > 0 else '_all'
self.chromosomes=list(map(lambda s: str(s),range(1,20))) + ['X','Y','MT']
self.genes={}
self.uid2mgi={}
self.symbol2mgi={}
self.protein2mgi={}
self.ensembl2mgi={}
self.reannotate=False
self.link_ids_=False
| self.viability_=False #functional and viability info
self.ppi_=False
self.regflag=False
self.seqflag=False
self.OOGE_added=False
##Collections from excel spreadsheets are put into data-frames
##Will be reconciled/normalized later
self.lncRNA=pd.DataFrame()
def link_ids(self):
filename=os.path.join(self.prefix,'Processed',self.fileprefix+'_GeneInit.pkl')
if os.path.exists(filename) and not self.reannotate:
print "Gene info exists, retrieving it..."
self.genes=load(open(filename,'rb'))
if len(self.genes) < len(self.genelist):
print "We need to add more"
else:
self.link_ids_=True
return
##We have self.genes to act as a container for Gene instances
markers=os.path.join(self.prefix,"MRK_Sequence.rpt")
ncbiInfo=os.path.join(self.prefix,"MGI_Gene_Model_Coord.rpt")
refprot=re.compile("ENSMUSP[0-9]{3,15}",flags=re.I|re.X)
ensgene=re.compile("ENSMUSG[0-9]{3,15}",flags=re.I|re.X)
with open(markers) as mrk:
mrk.next()
#skip the header
for line in mrk:
data=line.split("\t")
mgi,symbol,refseq=data[0],data[1],data[-4]
##Master Annotation
self.symbol2mgi[symbol]=mgi
refseq=filter(lambda l: refprot.search(l),refseq.split("|"))
if self.definedgenes:
if mgi in self.genelist or symbol in self.genelist:
ins=Gene(symbol)
ins.mgi=mgi
ins.symbol=symbol
ins.proteins=refseq
for protein in refseq:
self.protein2mgi[protein]=mgi
self.genes[mgi]=ins
else:
##This means doing ALL genes
ins=Gene(symbol)
ins.mgi=mgi
ins.symbol=symbol
ins.proteins=refseq
for protein in refseq:
self.protein2mgi[protein]=mgi
self.genes[mgi]=ins
with open(ncbiInfo) as ncbi:
ncbi.next()
##skip the header
for line in ncbi:
data=line.strip().split("\t")
mgi,symbol,uid,ens,chrom,start,end,strand=map(lambda j: data[j],[0,2,5,10,11,12,13,14])
##We must have defined mgi via markers, or skip iteration
try: self.genes[mgi]
except KeyError: continue
##Master Annotation
self.uid2mgi[uid]=mgi
if ensgene.search(ens):
self.ensembl2mgi[ens]=mgi
self.genes[mgi].ensembl=ens
self.genes[mgi].uid=uid
self.genes[mgi].chromosome=chrom
try:
self.genes[mgi].chromosome=chrom
self.genes[mgi].start=int(start)
self.genes[mgi].end=int(end)
self.genes[mgi].length=abs(int(end)-int(start))+1
self.genes[mgi].strand=strand
except ValueError:
try:
self.genes[mgi].chromosome=data[6]
self.genes[mgi].start=int(data[7])
self.genes[mgi].end=int(data[8])
self.genes[mgi].length=abs(int(data[8])-int(data[7]))+1
self.genes[mgi].strand=data[9]
self.genes[mgi].co_ord='ensembl'
except ValueError:
##Strike abhorrent abberation
self.genes[mgi].co_ord=None
self.genes={i:j for i,j in self.genes.iteritems() if j.chromosome and j.co_ord}
if not self.definedgenes:
ioA=open(os.path.join(self.prefix,"Processed",'symbol2mgi.pkl'),'wb')
ioB=open(os.path.join(self.prefix,"Processed",'uid2mgi.pkl'),'wb')
ioC=open(os.path.join(self.prefix,"Processed",'protein2mgi.pkl'),'wb')
ioD=open(os.path.join(self.prefix,"Processed",'ensembl2mgi.pkl'),'wb')
dump(self.symbol2mgi,ioA)
dump(self.uid2mgi,ioB)
dump(self.protein2mgi,ioC)
dump(self.ensembl2mgi,ioD)
ioA.close()
ioB.close()
ioC.close()
ioD.close()
filehandler=open(filename,'wb')
dump(self.genes,filehandler)
filehandler.close()
self.link_ids_=True
def add_viability(self):
assert self.link_ids_, "Must link ids prior to annotation at gene level"
##Functional annotation from ncbi
with open(os.path.join(self.prefix,"generifs_basic")) as rif:
for line in rif:
data=line.strip().split("\t")
if data[0] == "10090": ## ncbi taxonomy id for mouse
if re.search("lethal|fatal|dead|viable|essential",data[-1]) and not re.search("protect|resistance|rescue",data[-1]):
try:
##We have to convert from uid to mgi (our chosen ID) first
mtch=re.search("lethal|fatal|dead|viable|essential",data[-1])
#print mtch.group(0),data[-1].split()
ant=mtch.group(0)
if not ant:
print "\n\n\n\n"
print re.search("lethal|fatal|dead|viable|essential",data[-1])
lclmgi=self.uid2mgi[data[1].strip()]
self.genes[lclmgi].annotation=ant
except KeyError:
pass
##Viability information from impc
with open(os.path.join(self.prefix,"Viability.csv")) as vb:
for line in vb:
data=line.strip().split(",")
if data[-1] not in ['Viable','Subviable','Lethal']:
continue
try:
self.genes[data[-2]].viability=data[-1]
except KeyError:
pass
self.viability_=True
def add_OOGE(self):
self.OOGE_added=True
with open(os.path.join(self.prefix,"Mus_musculus_OOGE.csv")) as deg:
header=deg.next().strip().split(",")
##locus,symbols,datasets,datasetIDs,essentiality status,essentiality consensus
for line in deg:
data=line.strip().split(",")
ensb,symbol,gclass=data[0],data[1],data[-2]
try:
mgi=self.symbol2mgi[symbol]
except KeyError:
try:
mgi=self.ensembl2mgi[ensb]
except KeyError:
mgi=None
if not mgi:
continue
try:
self.genes[mgi]
except KeyError:
continue
if gclass == "NE":
self.genes[mgi].OOGEclass=0
else:
self.genes[mgi].OOGEclass=1
def add_DEG(self):
assert self.OOGE_added,"Must run MM to augment gene identifier relationships"
##
with open(os.path.join(self.prefix,"DEG_essential_mouse.csv")) as deg:
##no header here
for line in deg:
data=line.strip().split("\t")
ensb,gclass=data[3:5]
try:
mgi=self.ensembl2mgi[ensb]
self.genes[mgi]
if gclass == "NE":
self.genes[mgi].DEGclass=0
else:
self.genes[mgi].DEGclass=1
except KeyError:
continue
def add_OOGE_human(self):
assert self.OOGE_added,"Must run MM to augment gene identifier relationships"
##locus,symbols,datasets,datasetIDs,essentiality status,essentiality consensus
def clean_class(split):
line=split.strip("\"")
cleanline=line.split(",")
classes=list(set(cleanline))
if len(set(classes)) == 1:
if classes[0] == "NE":
return 0
elif classes[0] == "E":
return 1
else:
return classes.count("E")/float(len(classes))
with open(os.path.join(self.prefix,"Homo_sapiens_OOGE.csv")) as deg:
##no header here
for line in deg:
data=line.strip().split(",")
#print data
symbol,gclass=data[1],data[-2]
symbol=symbol.capitalize()
try:
mgi=self.symbol2mgi[symbol]
if data[-1].strip() == "Nonessential":
gclass=0
elif data[-1].strip() == "Essential":
gclass=1
else:
dinfo=line.split("\"")[-2].split(",")
gclass=round(dinfo.count("E")/float(len(dinfo)),3)
self.genes[mgi].HS_OOGEclass=gclass
#print symbol,mgi,self.genes[mgi].HS_OOGEclass
except KeyError:
continue
class Gene(Annotation):
"""Creating a data construct that will capture all information about a gene.
However we take a protein-centric view because there are multiple proteins found
for each gene identifier"""
refprot=re.compile("ENSMUSP[0-9]{3,15}",flags=re.I|re.X)
def __init__(self,symbol):
##ids obtain from ncbi and mgi
self.id=symbol
self.proteins=[]
self.mgi=None
self.symbol=None
self.uid=None
self.ensembl=None
##ncbi co-ordinates
self.co_ord='ncbi'
self.chromosome=0
self.start=0
self.end=0
self.strand=''
##metrics based on protein network
self.degree=None
self.dc=None
self.annotation=None
self.viability=None
self.regulatory=[]
self.DEGclass=None
self.OOGEclass=None
self.HS_OOGEclass=None
def grab_sequences(excelfile):
filedf=pd.read_excel(excelfile)
##get list of genes - convert to string from numpy returned unicode
symbols=list(map(lambda s: str(s),filedf['Gene Marker Symbol_x'].values))
return symbols
if __name__ == "__main__":
##If we add a list of genes, we compile for only these
#
inputfile="C:/Users/CLARG38/Downloads/PlayDate/Aggregated.xlsx"
genelist=grab_sequences(inputfile)
ann=Annotation(genelist)
ann.fileprefix='essential'
ann.link_ids()
ann.add_OOGE()
ann.add_viability()
ann.add_DEG()
ann.add_OOGE_human()
vb=[]
lt=[]
data=[]
col=['MGI_ID','Symbol','EnsemblID','Viability','DEG_essentiality','Human_Essentiality(OOGE)','GeneRif_keyword']
for gene,i in ann.genes.iteritems():
if not i.viability and (i.OOGEclass or i.HS_OOGEclass):
print gene,i.symbol,i.ensembl,i.viability,i.annotation,i.DEGclass,i.OOGEclass,i.HS_OOGEclass
data.append([gene,i.symbol,i.ensembl,i.viability,i.DEGclass,i.HS_OOGEclass,i.annotation])
df=pd.DataFrame(data,columns=col)
df.to_excel(os.path.join(ann.prefix,'Processed','GeneInformation.xlsx'),index=False) | self.miRNA_=False
self.lcRNA_=False
self.encode_=False
| random_line_split |
ParseEssential.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 13:05:33 2019
@author: CLARG38
"""
import string, re,sys
import os
from collections import defaultdict
import glob
from cPickle import dump,load
import pandas as pd
import platform
class Annotation():
"""The class servers as a container for some properties and will be
the parent node and container for all Gene instances"""
if platform.node().startswith("greg-clarks-macbook"):
prefix="/Users/clarkgr1/Desktop/aws"
elif platform.node().startswith("WIN"):
prefix="C:/Users/CLARG38/Downloads/PlayDate/"
def __init__(self,genelist=[]):
self.genelist=genelist
self.definedgenes=bool(len(genelist))
self.fileprefix='_partial' if len(genelist) > 0 else '_all'
self.chromosomes=list(map(lambda s: str(s),range(1,20))) + ['X','Y','MT']
self.genes={}
self.uid2mgi={}
self.symbol2mgi={}
self.protein2mgi={}
self.ensembl2mgi={}
self.reannotate=False
self.link_ids_=False
self.miRNA_=False
self.lcRNA_=False
self.encode_=False
self.viability_=False #functional and viability info
self.ppi_=False
self.regflag=False
self.seqflag=False
self.OOGE_added=False
##Collections from excel spreadsheets are put into data-frames
##Will be reconciled/normalized later
self.lncRNA=pd.DataFrame()
def link_ids(self):
filename=os.path.join(self.prefix,'Processed',self.fileprefix+'_GeneInit.pkl')
if os.path.exists(filename) and not self.reannotate:
print "Gene info exists, retrieving it..."
self.genes=load(open(filename,'rb'))
if len(self.genes) < len(self.genelist):
print "We need to add more"
else:
self.link_ids_=True
return
##We have self.genes to act as a container for Gene instances
markers=os.path.join(self.prefix,"MRK_Sequence.rpt")
ncbiInfo=os.path.join(self.prefix,"MGI_Gene_Model_Coord.rpt")
refprot=re.compile("ENSMUSP[0-9]{3,15}",flags=re.I|re.X)
ensgene=re.compile("ENSMUSG[0-9]{3,15}",flags=re.I|re.X)
with open(markers) as mrk:
mrk.next()
#skip the header
for line in mrk:
data=line.split("\t")
mgi,symbol,refseq=data[0],data[1],data[-4]
##Master Annotation
self.symbol2mgi[symbol]=mgi
refseq=filter(lambda l: refprot.search(l),refseq.split("|"))
if self.definedgenes:
if mgi in self.genelist or symbol in self.genelist:
ins=Gene(symbol)
ins.mgi=mgi
ins.symbol=symbol
ins.proteins=refseq
for protein in refseq:
self.protein2mgi[protein]=mgi
self.genes[mgi]=ins
else:
##This means doing ALL genes
ins=Gene(symbol)
ins.mgi=mgi
ins.symbol=symbol
ins.proteins=refseq
for protein in refseq:
self.protein2mgi[protein]=mgi
self.genes[mgi]=ins
with open(ncbiInfo) as ncbi:
ncbi.next()
##skip the header
for line in ncbi:
data=line.strip().split("\t")
mgi,symbol,uid,ens,chrom,start,end,strand=map(lambda j: data[j],[0,2,5,10,11,12,13,14])
##We must have defined mgi via markers, or skip iteration
try: self.genes[mgi]
except KeyError: continue
##Master Annotation
self.uid2mgi[uid]=mgi
if ensgene.search(ens):
self.ensembl2mgi[ens]=mgi
self.genes[mgi].ensembl=ens
self.genes[mgi].uid=uid
self.genes[mgi].chromosome=chrom
try:
self.genes[mgi].chromosome=chrom
self.genes[mgi].start=int(start)
self.genes[mgi].end=int(end)
self.genes[mgi].length=abs(int(end)-int(start))+1
self.genes[mgi].strand=strand
except ValueError:
try:
self.genes[mgi].chromosome=data[6]
self.genes[mgi].start=int(data[7])
self.genes[mgi].end=int(data[8])
self.genes[mgi].length=abs(int(data[8])-int(data[7]))+1
self.genes[mgi].strand=data[9]
self.genes[mgi].co_ord='ensembl'
except ValueError:
##Strike abhorrent abberation
self.genes[mgi].co_ord=None
self.genes={i:j for i,j in self.genes.iteritems() if j.chromosome and j.co_ord}
if not self.definedgenes:
ioA=open(os.path.join(self.prefix,"Processed",'symbol2mgi.pkl'),'wb')
ioB=open(os.path.join(self.prefix,"Processed",'uid2mgi.pkl'),'wb')
ioC=open(os.path.join(self.prefix,"Processed",'protein2mgi.pkl'),'wb')
ioD=open(os.path.join(self.prefix,"Processed",'ensembl2mgi.pkl'),'wb')
dump(self.symbol2mgi,ioA)
dump(self.uid2mgi,ioB)
dump(self.protein2mgi,ioC)
dump(self.ensembl2mgi,ioD)
ioA.close()
ioB.close()
ioC.close()
ioD.close()
filehandler=open(filename,'wb')
dump(self.genes,filehandler)
filehandler.close()
self.link_ids_=True
def add_viability(self):
assert self.link_ids_, "Must link ids prior to annotation at gene level"
##Functional annotation from ncbi
with open(os.path.join(self.prefix,"generifs_basic")) as rif:
for line in rif:
data=line.strip().split("\t")
if data[0] == "10090": ## ncbi taxonomy id for mouse
if re.search("lethal|fatal|dead|viable|essential",data[-1]) and not re.search("protect|resistance|rescue",data[-1]):
try:
##We have to convert from uid to mgi (our chosen ID) first
mtch=re.search("lethal|fatal|dead|viable|essential",data[-1])
#print mtch.group(0),data[-1].split()
ant=mtch.group(0)
if not ant:
print "\n\n\n\n"
print re.search("lethal|fatal|dead|viable|essential",data[-1])
lclmgi=self.uid2mgi[data[1].strip()]
self.genes[lclmgi].annotation=ant
except KeyError:
pass
##Viability information from impc
with open(os.path.join(self.prefix,"Viability.csv")) as vb:
for line in vb:
data=line.strip().split(",")
if data[-1] not in ['Viable','Subviable','Lethal']:
continue
try:
self.genes[data[-2]].viability=data[-1]
except KeyError:
pass
self.viability_=True
def add_OOGE(self):
self.OOGE_added=True
with open(os.path.join(self.prefix,"Mus_musculus_OOGE.csv")) as deg:
header=deg.next().strip().split(",")
##locus,symbols,datasets,datasetIDs,essentiality status,essentiality consensus
for line in deg:
data=line.strip().split(",")
ensb,symbol,gclass=data[0],data[1],data[-2]
try:
mgi=self.symbol2mgi[symbol]
except KeyError:
try:
mgi=self.ensembl2mgi[ensb]
except KeyError:
mgi=None
if not mgi:
continue
try:
self.genes[mgi]
except KeyError:
continue
if gclass == "NE":
self.genes[mgi].OOGEclass=0
else:
self.genes[mgi].OOGEclass=1
def add_DEG(self):
assert self.OOGE_added,"Must run MM to augment gene identifier relationships"
##
with open(os.path.join(self.prefix,"DEG_essential_mouse.csv")) as deg:
##no header here
for line in deg:
data=line.strip().split("\t")
ensb,gclass=data[3:5]
try:
mgi=self.ensembl2mgi[ensb]
self.genes[mgi]
if gclass == "NE":
self.genes[mgi].DEGclass=0
else:
self.genes[mgi].DEGclass=1
except KeyError:
continue
def add_OOGE_human(self):
assert self.OOGE_added,"Must run MM to augment gene identifier relationships"
##locus,symbols,datasets,datasetIDs,essentiality status,essentiality consensus
def clean_class(split):
|
with open(os.path.join(self.prefix,"Homo_sapiens_OOGE.csv")) as deg:
##no header here
for line in deg:
data=line.strip().split(",")
#print data
symbol,gclass=data[1],data[-2]
symbol=symbol.capitalize()
try:
mgi=self.symbol2mgi[symbol]
if data[-1].strip() == "Nonessential":
gclass=0
elif data[-1].strip() == "Essential":
gclass=1
else:
dinfo=line.split("\"")[-2].split(",")
gclass=round(dinfo.count("E")/float(len(dinfo)),3)
self.genes[mgi].HS_OOGEclass=gclass
#print symbol,mgi,self.genes[mgi].HS_OOGEclass
except KeyError:
continue
class Gene(Annotation):
"""Creating a data construct that will capture all information about a gene.
However we take a protein-centric view because there are multiple proteins found
for each gene identifier"""
refprot=re.compile("ENSMUSP[0-9]{3,15}",flags=re.I|re.X)
def __init__(self,symbol):
##ids obtain from ncbi and mgi
self.id=symbol
self.proteins=[]
self.mgi=None
self.symbol=None
self.uid=None
self.ensembl=None
##ncbi co-ordinates
self.co_ord='ncbi'
self.chromosome=0
self.start=0
self.end=0
self.strand=''
##metrics based on protein network
self.degree=None
self.dc=None
self.annotation=None
self.viability=None
self.regulatory=[]
self.DEGclass=None
self.OOGEclass=None
self.HS_OOGEclass=None
def grab_sequences(excelfile):
filedf=pd.read_excel(excelfile)
##get list of genes - convert to string from numpy returned unicode
symbols=list(map(lambda s: str(s),filedf['Gene Marker Symbol_x'].values))
return symbols
if __name__ == "__main__":
##If we add a list of genes, we compile for only these
#
inputfile="C:/Users/CLARG38/Downloads/PlayDate/Aggregated.xlsx"
genelist=grab_sequences(inputfile)
ann=Annotation(genelist)
ann.fileprefix='essential'
ann.link_ids()
ann.add_OOGE()
ann.add_viability()
ann.add_DEG()
ann.add_OOGE_human()
vb=[]
lt=[]
data=[]
col=['MGI_ID','Symbol','EnsemblID','Viability','DEG_essentiality','Human_Essentiality(OOGE)','GeneRif_keyword']
for gene,i in ann.genes.iteritems():
if not i.viability and (i.OOGEclass or i.HS_OOGEclass):
print gene,i.symbol,i.ensembl,i.viability,i.annotation,i.DEGclass,i.OOGEclass,i.HS_OOGEclass
data.append([gene,i.symbol,i.ensembl,i.viability,i.DEGclass,i.HS_OOGEclass,i.annotation])
df=pd.DataFrame(data,columns=col)
df.to_excel(os.path.join(ann.prefix,'Processed','GeneInformation.xlsx'),index=False)
| line=split.strip("\"")
cleanline=line.split(",")
classes=list(set(cleanline))
if len(set(classes)) == 1:
if classes[0] == "NE":
return 0
elif classes[0] == "E":
return 1
else:
return classes.count("E")/float(len(classes)) | identifier_body |
main.go | package main
import (
"bytes"
"database/sql"
"encoding/json"
"fmt"
"html/template"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
_ "github.com/denisenkom/go-mssqldb"
"github.com/julienschmidt/httprouter"
sendgrid "github.com/sendgrid/sendgrid-go"
"github.com/sendgrid/sendgrid-go/helpers/mail"
)
//Config file structure
type Config struct {
Server string `json:"server"`
User string `json:"user"`
Pwd string `json:"pwd"`
Db string `json:"db"`
Port int `json:"port"`
SendgridAPIkey string `json:"sendgridAPIkey"`
EmailName string `json:"emailName"`
EmailAddress string `json:"emailAddress"`
}
type report struct {
TransportID int
ActionFlag string
JobNo string
Customer string
Department string
JobDescription string
TransportDate string
DateDiff int
ServiceType string
}
type data struct {
ReportsCollect []report
ReportsDeliver []report
ReportsCollectFuture []report
ReportsDeliverFuture []report
ReportsAll []report
ServerTime string
}
type jobdetails struct {
JobNo string `json:"jobNo"`
Department string `json:"department"`
CustomerName string `json:"customerName"`
JobDescription string `json:"jobDescription"`
}
type validation struct {
DisplayMessage string
SubmitMsg string
JobDescription string
JobNo string
Department string
Customer string
TransportDate string
ActionFlag string
ServiceType string
}
type jobEdit struct {
DisplayMessage string
TransportID string
ActionFlag string
JobNo string
Customer string
Department string
JobDescription string
TransportDate string
ServiceType string
}
var config Config
var db *sql.DB
func init() {
// Load application configuration from settings file
file, err := os.Open("config.json")
if err != nil {
log.Fatal(err)
}
defer file.Close()
err = json.NewDecoder(file).Decode(&config)
if err != nil {
log.Fatal(err)
}
// Connect to the database and test connection
connection := fmt.Sprintf("Server=%s;User ID=%s;Password=%s;database=%s;",
config.Server,
config.User,
config.Pwd,
config.Db)
db, err = sql.Open("mssql", connection)
if err != nil {
log.Fatal(err)
}
if err = db.Ping(); err != nil {
log.Fatal(err)
}
}
func main() {
router := httprouter.New()
router.ServeFiles("/assets/*filepath", http.Dir("./assets"))
router.GET("/transport/create", transportCreate)
router.POST("/transport/create", transportCreate)
router.GET("/transport/create/:jobno", createGetJobDetails)
router.GET("/transport/complete", completeList)
router.GET("/transport/complete/:transportid", completeJob)
router.GET("/transport/edit/:transportid", editJob)
router.POST("/transport/edit", editJob)
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(config.Port), router))
}
func transportCreate(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var output validation
t, _ := template.ParseFiles("assets/templates/create.tpl")
output.ActionFlag = "C"
if r.Method == "POST" {
output.ActionFlag = r.FormValue("action_flag")
output.Department = r.FormValue("department")
output.JobNo = r.FormValue("job_no")
output.Customer = r.FormValue("customer")
output.JobDescription = r.FormValue("job_description")
output.TransportDate = r.FormValue("date")
output.ServiceType = r.FormValue("service_type")
const shortForm = "2006-01-02"
fmt.Println(output.ServiceType)
if strings.TrimSpace(output.Customer) == "" {
output.DisplayMessage = "No customer entered"
} else if strings.TrimSpace(output.Department) == "" {
output.DisplayMessage = "No Department entered"
} else if strings.TrimSpace(output.TransportDate) == "" {
output.DisplayMessage = "No date entered"
} else {
sql := `INSERT INTO transport (action_flag, department, job_no, customer, job_description, transport_date, service_type, is_active)
VALUES (?, ?, ?, ?, ?, ?, ?, 'Y')`
d, _ := time.Parse(shortForm, output.TransportDate)
fmt.Println(output.ServiceType)
_, err := db.Exec(sql, output.ActionFlag, output.Department, output.JobNo, output.Customer, output.JobDescription, d, output.ServiceType)
if err != nil {
log.Fatal(err)
}
output.DisplayMessage = "Record submitted"
var email bytes.Buffer
t, _ := template.ParseFiles("assets/templates/email.tpl")
err = t.Execute(&email, output)
if err != nil {
log.Fatal(err)
}
m := mail.NewV3Mail()
m.SetFrom(mail.NewEmail("Rewinds & J Windsor Ltd", "donotreply@rjweng.com"))
m.Subject = fmt.Sprintf("New transportation job")
p := mail.NewPersonalization()
tos := []*mail.Email{
mail.NewEmail(config.EmailName, config.EmailAddress),
}
p.AddTos(tos...)
m.AddPersonalizations(p)
m.AddContent(mail.NewContent("text/html", email.String()))
request := sendgrid.GetRequest(config.SendgridAPIkey, "/v3/mail/send", "https://api.sendgrid.com")
request.Method = "POST"
request.Body = mail.GetRequestBody(m)
response, err := sendgrid.API(request)
if err != nil {
log.Fatal(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
//set outputs to blank string
output.ActionFlag = ""
output.Department = ""
output.JobNo = ""
output.Customer = ""
output.JobDescription = ""
output.TransportDate = ""
output.ServiceType = ""
}
}
err := t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
}
func createGetJobDetails(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
JobNo := p.ByName("jobno")
if len(JobNo) > 10 {
fmt.Println("Job number greater than 10 characters")
} else if len(JobNo) < 5 {
fmt.Println("Job number less than 5 characters")
} else {
var output jobdetails
sql1 := `SELECT sc.[name], LEFT(j.contract, 2)
AS department, LTRIM(RTRIM(j.description1)) AS job_description
FROM rjw.scheme.jcmastm j
JOIN rjw.scheme.slcustm sc ON j.customer = sc.customer
WHERE j.job_code = ?`
err := db.QueryRow(sql1, JobNo).Scan(&output.CustomerName, &output.Department, &output.JobDescription)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
b, _ := json.Marshal(output)
fmt.Fprintf(w, string(b))
}
}
func completeList(w http.ResponseWriter, r *http.Request, _ httprouter.Params) |
func completeJob(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
TransportID := p.ByName("transportid")
sql2 := `UPDATE transport
SET is_active = 'N'
WHERE transport_id = ?`
_, err := db.Exec(sql2, TransportID)
if err != nil {
log.Fatal(err)
}
http.Redirect(w, r, "/transport/complete", 303)
}
func editJob(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
TransportID := p.ByName("transportid")
var output jobEdit
sql1 := `SELECT transport_id, action_flag, job_no, customer, department, job_description, CONVERT(NVARCHAR(10), transport_date, 120), service_type
FROM transport
WHERE transport_id = ?`
err := db.QueryRow(sql1, TransportID).Scan(&output.TransportID, &output.ActionFlag, &output.JobNo, &output.Customer, &output.Department, &output.JobDescription, &output.TransportDate, &output.ServiceType)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
if r.Method == "POST" {
output.TransportID = r.FormValue("transport_id")
output.ActionFlag = r.FormValue("action_flag")
output.Department = r.FormValue("department")
output.JobNo = r.FormValue("job_no")
output.Customer = r.FormValue("customer")
output.JobDescription = r.FormValue("job_description")
output.TransportDate = r.FormValue("date")
output.ServiceType = r.FormValue("service_type")
const shortForm = "2006-01-02"
if strings.TrimSpace(output.Customer) == "" {
output.DisplayMessage = "No customer entered"
} else if strings.TrimSpace(output.Department) == "" {
output.DisplayMessage = "No Department entered"
} else if strings.TrimSpace(output.TransportDate) == "" {
output.DisplayMessage = "No date entered"
} else {
sql2 := `UPDATE transport
SET action_flag = ?, department = ?, job_no = ?, customer = ?, job_description = ?, transport_date = ?, service_type = ?
WHERE transport_id = ?`
d, _ := time.Parse(shortForm, output.TransportDate)
_, err := db.Exec(sql2, output.ActionFlag, output.Department, output.JobNo, output.Customer, output.JobDescription, d, output.ServiceType, output.TransportID)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
http.Redirect(w, r, "/transport/complete", 303)
}
}
t, err := template.ParseFiles("assets/templates/edit.tpl")
if err != nil {
log.Fatal(err)
}
err = t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
}
| {
var output data
sql1 := `SELECT transport_id, action_flag, job_no, customer, department, job_description, COALESCE(CONVERT(NVARCHAR(11), transport_date, 106), '-'), service_type
FROM transport
WHERE is_active = 'Y'
ORDER BY transport_date DESC`
rows, err := db.Query(sql1)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
output.ReportsAll = make([]report, 0)
for rows.Next() {
var r report
err := rows.Scan(&r.TransportID, &r.ActionFlag, &r.JobNo, &r.Customer, &r.Department, &r.JobDescription, &r.TransportDate, &r.ServiceType)
if err != nil {
log.Fatal(err)
}
if r.ActionFlag == "C" {
r.ActionFlag = "Collect"
} else if r.ActionFlag == "D" {
r.ActionFlag = "Deliver"
} else {
r.ActionFlag = "Future"
}
if r.ServiceType == "R" {
r.ServiceType = "Regular"
} else if r.ServiceType == "E" {
r.ServiceType = "Emergency"
}
fmt.Println(r.ServiceType)
output.ReportsAll = append(output.ReportsAll, r)
}
t, err := template.ParseFiles("assets/templates/complete.tpl")
if err != nil {
log.Fatal(err)
}
err = t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
} | identifier_body |
main.go | package main
import (
"bytes"
"database/sql"
"encoding/json"
"fmt"
"html/template"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
_ "github.com/denisenkom/go-mssqldb"
"github.com/julienschmidt/httprouter"
sendgrid "github.com/sendgrid/sendgrid-go"
"github.com/sendgrid/sendgrid-go/helpers/mail"
)
//Config file structure
type Config struct {
Server string `json:"server"`
User string `json:"user"`
Pwd string `json:"pwd"`
Db string `json:"db"`
Port int `json:"port"`
SendgridAPIkey string `json:"sendgridAPIkey"`
EmailName string `json:"emailName"`
EmailAddress string `json:"emailAddress"`
}
type report struct {
TransportID int
ActionFlag string
JobNo string
Customer string
Department string
JobDescription string
TransportDate string
DateDiff int
ServiceType string
}
type data struct {
ReportsCollect []report
ReportsDeliver []report
ReportsCollectFuture []report
ReportsDeliverFuture []report
ReportsAll []report
ServerTime string
}
type jobdetails struct {
JobNo string `json:"jobNo"`
Department string `json:"department"`
CustomerName string `json:"customerName"`
JobDescription string `json:"jobDescription"`
}
type validation struct {
DisplayMessage string
SubmitMsg string
JobDescription string
JobNo string
Department string
Customer string
TransportDate string
ActionFlag string
ServiceType string
}
type jobEdit struct {
DisplayMessage string
TransportID string
ActionFlag string
JobNo string
Customer string
Department string
JobDescription string
TransportDate string
ServiceType string
}
var config Config
var db *sql.DB
func init() {
// Load application configuration from settings file
file, err := os.Open("config.json")
if err != nil {
log.Fatal(err)
}
defer file.Close()
err = json.NewDecoder(file).Decode(&config)
if err != nil {
log.Fatal(err)
}
// Connect to the database and test connection
connection := fmt.Sprintf("Server=%s;User ID=%s;Password=%s;database=%s;",
config.Server,
config.User,
config.Pwd,
config.Db)
db, err = sql.Open("mssql", connection)
if err != nil {
log.Fatal(err)
}
if err = db.Ping(); err != nil {
log.Fatal(err)
}
}
func main() {
router := httprouter.New()
router.ServeFiles("/assets/*filepath", http.Dir("./assets"))
router.GET("/transport/create", transportCreate)
router.POST("/transport/create", transportCreate)
router.GET("/transport/create/:jobno", createGetJobDetails)
router.GET("/transport/complete", completeList)
router.GET("/transport/complete/:transportid", completeJob)
router.GET("/transport/edit/:transportid", editJob)
router.POST("/transport/edit", editJob)
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(config.Port), router))
}
func transportCreate(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var output validation
t, _ := template.ParseFiles("assets/templates/create.tpl")
output.ActionFlag = "C"
if r.Method == "POST" {
output.ActionFlag = r.FormValue("action_flag")
output.Department = r.FormValue("department")
output.JobNo = r.FormValue("job_no")
output.Customer = r.FormValue("customer")
output.JobDescription = r.FormValue("job_description")
output.TransportDate = r.FormValue("date")
output.ServiceType = r.FormValue("service_type")
const shortForm = "2006-01-02"
fmt.Println(output.ServiceType)
if strings.TrimSpace(output.Customer) == "" {
output.DisplayMessage = "No customer entered"
} else if strings.TrimSpace(output.Department) == "" {
output.DisplayMessage = "No Department entered"
} else if strings.TrimSpace(output.TransportDate) == "" {
output.DisplayMessage = "No date entered"
} else {
sql := `INSERT INTO transport (action_flag, department, job_no, customer, job_description, transport_date, service_type, is_active)
VALUES (?, ?, ?, ?, ?, ?, ?, 'Y')`
d, _ := time.Parse(shortForm, output.TransportDate)
fmt.Println(output.ServiceType)
_, err := db.Exec(sql, output.ActionFlag, output.Department, output.JobNo, output.Customer, output.JobDescription, d, output.ServiceType)
if err != nil {
log.Fatal(err)
}
output.DisplayMessage = "Record submitted"
var email bytes.Buffer
t, _ := template.ParseFiles("assets/templates/email.tpl")
err = t.Execute(&email, output)
if err != nil {
log.Fatal(err)
}
m := mail.NewV3Mail()
m.SetFrom(mail.NewEmail("Rewinds & J Windsor Ltd", "donotreply@rjweng.com"))
m.Subject = fmt.Sprintf("New transportation job")
p := mail.NewPersonalization()
tos := []*mail.Email{
mail.NewEmail(config.EmailName, config.EmailAddress),
}
p.AddTos(tos...)
m.AddPersonalizations(p)
m.AddContent(mail.NewContent("text/html", email.String()))
request := sendgrid.GetRequest(config.SendgridAPIkey, "/v3/mail/send", "https://api.sendgrid.com")
request.Method = "POST"
request.Body = mail.GetRequestBody(m)
response, err := sendgrid.API(request)
if err != nil {
log.Fatal(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
//set outputs to blank string
output.ActionFlag = ""
output.Department = ""
output.JobNo = ""
output.Customer = ""
output.JobDescription = ""
output.TransportDate = ""
output.ServiceType = ""
}
}
err := t.Execute(w, output)
if err != nil |
}
func createGetJobDetails(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
JobNo := p.ByName("jobno")
if len(JobNo) > 10 {
fmt.Println("Job number greater than 10 characters")
} else if len(JobNo) < 5 {
fmt.Println("Job number less than 5 characters")
} else {
var output jobdetails
sql1 := `SELECT sc.[name], LEFT(j.contract, 2)
AS department, LTRIM(RTRIM(j.description1)) AS job_description
FROM rjw.scheme.jcmastm j
JOIN rjw.scheme.slcustm sc ON j.customer = sc.customer
WHERE j.job_code = ?`
err := db.QueryRow(sql1, JobNo).Scan(&output.CustomerName, &output.Department, &output.JobDescription)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
b, _ := json.Marshal(output)
fmt.Fprintf(w, string(b))
}
}
func completeList(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var output data
sql1 := `SELECT transport_id, action_flag, job_no, customer, department, job_description, COALESCE(CONVERT(NVARCHAR(11), transport_date, 106), '-'), service_type
FROM transport
WHERE is_active = 'Y'
ORDER BY transport_date DESC`
rows, err := db.Query(sql1)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
output.ReportsAll = make([]report, 0)
for rows.Next() {
var r report
err := rows.Scan(&r.TransportID, &r.ActionFlag, &r.JobNo, &r.Customer, &r.Department, &r.JobDescription, &r.TransportDate, &r.ServiceType)
if err != nil {
log.Fatal(err)
}
if r.ActionFlag == "C" {
r.ActionFlag = "Collect"
} else if r.ActionFlag == "D" {
r.ActionFlag = "Deliver"
} else {
r.ActionFlag = "Future"
}
if r.ServiceType == "R" {
r.ServiceType = "Regular"
} else if r.ServiceType == "E" {
r.ServiceType = "Emergency"
}
fmt.Println(r.ServiceType)
output.ReportsAll = append(output.ReportsAll, r)
}
t, err := template.ParseFiles("assets/templates/complete.tpl")
if err != nil {
log.Fatal(err)
}
err = t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
}
func completeJob(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
TransportID := p.ByName("transportid")
sql2 := `UPDATE transport
SET is_active = 'N'
WHERE transport_id = ?`
_, err := db.Exec(sql2, TransportID)
if err != nil {
log.Fatal(err)
}
http.Redirect(w, r, "/transport/complete", 303)
}
func editJob(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
TransportID := p.ByName("transportid")
var output jobEdit
sql1 := `SELECT transport_id, action_flag, job_no, customer, department, job_description, CONVERT(NVARCHAR(10), transport_date, 120), service_type
FROM transport
WHERE transport_id = ?`
err := db.QueryRow(sql1, TransportID).Scan(&output.TransportID, &output.ActionFlag, &output.JobNo, &output.Customer, &output.Department, &output.JobDescription, &output.TransportDate, &output.ServiceType)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
if r.Method == "POST" {
output.TransportID = r.FormValue("transport_id")
output.ActionFlag = r.FormValue("action_flag")
output.Department = r.FormValue("department")
output.JobNo = r.FormValue("job_no")
output.Customer = r.FormValue("customer")
output.JobDescription = r.FormValue("job_description")
output.TransportDate = r.FormValue("date")
output.ServiceType = r.FormValue("service_type")
const shortForm = "2006-01-02"
if strings.TrimSpace(output.Customer) == "" {
output.DisplayMessage = "No customer entered"
} else if strings.TrimSpace(output.Department) == "" {
output.DisplayMessage = "No Department entered"
} else if strings.TrimSpace(output.TransportDate) == "" {
output.DisplayMessage = "No date entered"
} else {
sql2 := `UPDATE transport
SET action_flag = ?, department = ?, job_no = ?, customer = ?, job_description = ?, transport_date = ?, service_type = ?
WHERE transport_id = ?`
d, _ := time.Parse(shortForm, output.TransportDate)
_, err := db.Exec(sql2, output.ActionFlag, output.Department, output.JobNo, output.Customer, output.JobDescription, d, output.ServiceType, output.TransportID)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
http.Redirect(w, r, "/transport/complete", 303)
}
}
t, err := template.ParseFiles("assets/templates/edit.tpl")
if err != nil {
log.Fatal(err)
}
err = t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
}
| {
log.Fatal(err)
} | conditional_block |
main.go | package main
import (
"bytes"
"database/sql"
"encoding/json"
"fmt"
"html/template"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
_ "github.com/denisenkom/go-mssqldb"
"github.com/julienschmidt/httprouter"
sendgrid "github.com/sendgrid/sendgrid-go"
"github.com/sendgrid/sendgrid-go/helpers/mail"
)
//Config file structure
type Config struct {
Server string `json:"server"`
User string `json:"user"`
Pwd string `json:"pwd"`
Db string `json:"db"`
Port int `json:"port"`
SendgridAPIkey string `json:"sendgridAPIkey"`
EmailName string `json:"emailName"`
EmailAddress string `json:"emailAddress"`
}
type report struct {
TransportID int
ActionFlag string
JobNo string
Customer string
Department string
JobDescription string
TransportDate string
DateDiff int
ServiceType string
}
type data struct {
ReportsCollect []report
ReportsDeliver []report
ReportsCollectFuture []report
ReportsDeliverFuture []report
ReportsAll []report
ServerTime string
}
type jobdetails struct {
JobNo string `json:"jobNo"`
Department string `json:"department"`
CustomerName string `json:"customerName"`
JobDescription string `json:"jobDescription"`
}
type validation struct {
DisplayMessage string
SubmitMsg string
JobDescription string
JobNo string
Department string
Customer string
TransportDate string
ActionFlag string
ServiceType string
}
type jobEdit struct {
DisplayMessage string
TransportID string
ActionFlag string
JobNo string
Customer string
Department string
JobDescription string
TransportDate string
ServiceType string
}
var config Config
var db *sql.DB
func init() {
// Load application configuration from settings file
file, err := os.Open("config.json")
if err != nil {
log.Fatal(err)
}
defer file.Close()
err = json.NewDecoder(file).Decode(&config)
if err != nil {
log.Fatal(err)
}
// Connect to the database and test connection
connection := fmt.Sprintf("Server=%s;User ID=%s;Password=%s;database=%s;",
config.Server,
config.User,
config.Pwd,
config.Db)
db, err = sql.Open("mssql", connection)
if err != nil {
log.Fatal(err)
}
if err = db.Ping(); err != nil {
log.Fatal(err)
}
}
func main() {
router := httprouter.New()
router.ServeFiles("/assets/*filepath", http.Dir("./assets"))
router.GET("/transport/create", transportCreate)
router.POST("/transport/create", transportCreate)
router.GET("/transport/create/:jobno", createGetJobDetails)
router.GET("/transport/complete", completeList)
router.GET("/transport/complete/:transportid", completeJob)
router.GET("/transport/edit/:transportid", editJob)
router.POST("/transport/edit", editJob)
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(config.Port), router))
}
func transportCreate(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var output validation
t, _ := template.ParseFiles("assets/templates/create.tpl")
output.ActionFlag = "C"
if r.Method == "POST" {
output.ActionFlag = r.FormValue("action_flag")
output.Department = r.FormValue("department")
output.JobNo = r.FormValue("job_no")
output.Customer = r.FormValue("customer")
output.JobDescription = r.FormValue("job_description")
output.TransportDate = r.FormValue("date")
output.ServiceType = r.FormValue("service_type")
const shortForm = "2006-01-02"
fmt.Println(output.ServiceType)
if strings.TrimSpace(output.Customer) == "" {
output.DisplayMessage = "No customer entered"
} else if strings.TrimSpace(output.Department) == "" {
output.DisplayMessage = "No Department entered"
} else if strings.TrimSpace(output.TransportDate) == "" {
output.DisplayMessage = "No date entered"
} else {
sql := `INSERT INTO transport (action_flag, department, job_no, customer, job_description, transport_date, service_type, is_active)
VALUES (?, ?, ?, ?, ?, ?, ?, 'Y')`
d, _ := time.Parse(shortForm, output.TransportDate)
fmt.Println(output.ServiceType)
_, err := db.Exec(sql, output.ActionFlag, output.Department, output.JobNo, output.Customer, output.JobDescription, d, output.ServiceType)
if err != nil {
log.Fatal(err)
}
output.DisplayMessage = "Record submitted"
var email bytes.Buffer
t, _ := template.ParseFiles("assets/templates/email.tpl")
err = t.Execute(&email, output)
if err != nil {
log.Fatal(err)
}
m := mail.NewV3Mail()
m.SetFrom(mail.NewEmail("Rewinds & J Windsor Ltd", "donotreply@rjweng.com"))
m.Subject = fmt.Sprintf("New transportation job")
p := mail.NewPersonalization()
tos := []*mail.Email{
mail.NewEmail(config.EmailName, config.EmailAddress),
}
p.AddTos(tos...)
m.AddPersonalizations(p)
m.AddContent(mail.NewContent("text/html", email.String()))
request := sendgrid.GetRequest(config.SendgridAPIkey, "/v3/mail/send", "https://api.sendgrid.com")
request.Method = "POST"
request.Body = mail.GetRequestBody(m)
response, err := sendgrid.API(request)
if err != nil {
log.Fatal(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
//set outputs to blank string
output.ActionFlag = ""
output.Department = ""
output.JobNo = ""
output.Customer = ""
output.JobDescription = ""
output.TransportDate = ""
output.ServiceType = ""
}
}
err := t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
}
func createGetJobDetails(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
JobNo := p.ByName("jobno")
if len(JobNo) > 10 {
fmt.Println("Job number greater than 10 characters")
} else if len(JobNo) < 5 {
fmt.Println("Job number less than 5 characters")
} else {
var output jobdetails
sql1 := `SELECT sc.[name], LEFT(j.contract, 2)
AS department, LTRIM(RTRIM(j.description1)) AS job_description
FROM rjw.scheme.jcmastm j
JOIN rjw.scheme.slcustm sc ON j.customer = sc.customer
WHERE j.job_code = ?`
err := db.QueryRow(sql1, JobNo).Scan(&output.CustomerName, &output.Department, &output.JobDescription)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
b, _ := json.Marshal(output)
fmt.Fprintf(w, string(b))
}
}
func completeList(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var output data
sql1 := `SELECT transport_id, action_flag, job_no, customer, department, job_description, COALESCE(CONVERT(NVARCHAR(11), transport_date, 106), '-'), service_type
FROM transport
WHERE is_active = 'Y'
ORDER BY transport_date DESC`
rows, err := db.Query(sql1)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
output.ReportsAll = make([]report, 0)
for rows.Next() {
var r report
err := rows.Scan(&r.TransportID, &r.ActionFlag, &r.JobNo, &r.Customer, &r.Department, &r.JobDescription, &r.TransportDate, &r.ServiceType)
if err != nil {
log.Fatal(err)
}
if r.ActionFlag == "C" {
r.ActionFlag = "Collect"
} else if r.ActionFlag == "D" {
r.ActionFlag = "Deliver"
} else {
r.ActionFlag = "Future"
}
if r.ServiceType == "R" {
r.ServiceType = "Regular"
} else if r.ServiceType == "E" {
r.ServiceType = "Emergency"
}
fmt.Println(r.ServiceType)
output.ReportsAll = append(output.ReportsAll, r)
}
t, err := template.ParseFiles("assets/templates/complete.tpl")
if err != nil {
log.Fatal(err)
}
err = t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
}
| sql2 := `UPDATE transport
SET is_active = 'N'
WHERE transport_id = ?`
_, err := db.Exec(sql2, TransportID)
if err != nil {
log.Fatal(err)
}
http.Redirect(w, r, "/transport/complete", 303)
}
func editJob(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
TransportID := p.ByName("transportid")
var output jobEdit
sql1 := `SELECT transport_id, action_flag, job_no, customer, department, job_description, CONVERT(NVARCHAR(10), transport_date, 120), service_type
FROM transport
WHERE transport_id = ?`
err := db.QueryRow(sql1, TransportID).Scan(&output.TransportID, &output.ActionFlag, &output.JobNo, &output.Customer, &output.Department, &output.JobDescription, &output.TransportDate, &output.ServiceType)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
if r.Method == "POST" {
output.TransportID = r.FormValue("transport_id")
output.ActionFlag = r.FormValue("action_flag")
output.Department = r.FormValue("department")
output.JobNo = r.FormValue("job_no")
output.Customer = r.FormValue("customer")
output.JobDescription = r.FormValue("job_description")
output.TransportDate = r.FormValue("date")
output.ServiceType = r.FormValue("service_type")
const shortForm = "2006-01-02"
if strings.TrimSpace(output.Customer) == "" {
output.DisplayMessage = "No customer entered"
} else if strings.TrimSpace(output.Department) == "" {
output.DisplayMessage = "No Department entered"
} else if strings.TrimSpace(output.TransportDate) == "" {
output.DisplayMessage = "No date entered"
} else {
sql2 := `UPDATE transport
SET action_flag = ?, department = ?, job_no = ?, customer = ?, job_description = ?, transport_date = ?, service_type = ?
WHERE transport_id = ?`
d, _ := time.Parse(shortForm, output.TransportDate)
_, err := db.Exec(sql2, output.ActionFlag, output.Department, output.JobNo, output.Customer, output.JobDescription, d, output.ServiceType, output.TransportID)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
http.Redirect(w, r, "/transport/complete", 303)
}
}
t, err := template.ParseFiles("assets/templates/edit.tpl")
if err != nil {
log.Fatal(err)
}
err = t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
} |
func completeJob(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
TransportID := p.ByName("transportid")
| random_line_split |
main.go | package main
import (
"bytes"
"database/sql"
"encoding/json"
"fmt"
"html/template"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
_ "github.com/denisenkom/go-mssqldb"
"github.com/julienschmidt/httprouter"
sendgrid "github.com/sendgrid/sendgrid-go"
"github.com/sendgrid/sendgrid-go/helpers/mail"
)
//Config file structure
type Config struct {
Server string `json:"server"`
User string `json:"user"`
Pwd string `json:"pwd"`
Db string `json:"db"`
Port int `json:"port"`
SendgridAPIkey string `json:"sendgridAPIkey"`
EmailName string `json:"emailName"`
EmailAddress string `json:"emailAddress"`
}
type report struct {
TransportID int
ActionFlag string
JobNo string
Customer string
Department string
JobDescription string
TransportDate string
DateDiff int
ServiceType string
}
type data struct {
ReportsCollect []report
ReportsDeliver []report
ReportsCollectFuture []report
ReportsDeliverFuture []report
ReportsAll []report
ServerTime string
}
type jobdetails struct {
JobNo string `json:"jobNo"`
Department string `json:"department"`
CustomerName string `json:"customerName"`
JobDescription string `json:"jobDescription"`
}
type validation struct {
DisplayMessage string
SubmitMsg string
JobDescription string
JobNo string
Department string
Customer string
TransportDate string
ActionFlag string
ServiceType string
}
type jobEdit struct {
DisplayMessage string
TransportID string
ActionFlag string
JobNo string
Customer string
Department string
JobDescription string
TransportDate string
ServiceType string
}
var config Config
var db *sql.DB
func init() {
// Load application configuration from settings file
file, err := os.Open("config.json")
if err != nil {
log.Fatal(err)
}
defer file.Close()
err = json.NewDecoder(file).Decode(&config)
if err != nil {
log.Fatal(err)
}
// Connect to the database and test connection
connection := fmt.Sprintf("Server=%s;User ID=%s;Password=%s;database=%s;",
config.Server,
config.User,
config.Pwd,
config.Db)
db, err = sql.Open("mssql", connection)
if err != nil {
log.Fatal(err)
}
if err = db.Ping(); err != nil {
log.Fatal(err)
}
}
func main() {
router := httprouter.New()
router.ServeFiles("/assets/*filepath", http.Dir("./assets"))
router.GET("/transport/create", transportCreate)
router.POST("/transport/create", transportCreate)
router.GET("/transport/create/:jobno", createGetJobDetails)
router.GET("/transport/complete", completeList)
router.GET("/transport/complete/:transportid", completeJob)
router.GET("/transport/edit/:transportid", editJob)
router.POST("/transport/edit", editJob)
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(config.Port), router))
}
func | (w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var output validation
t, _ := template.ParseFiles("assets/templates/create.tpl")
output.ActionFlag = "C"
if r.Method == "POST" {
output.ActionFlag = r.FormValue("action_flag")
output.Department = r.FormValue("department")
output.JobNo = r.FormValue("job_no")
output.Customer = r.FormValue("customer")
output.JobDescription = r.FormValue("job_description")
output.TransportDate = r.FormValue("date")
output.ServiceType = r.FormValue("service_type")
const shortForm = "2006-01-02"
fmt.Println(output.ServiceType)
if strings.TrimSpace(output.Customer) == "" {
output.DisplayMessage = "No customer entered"
} else if strings.TrimSpace(output.Department) == "" {
output.DisplayMessage = "No Department entered"
} else if strings.TrimSpace(output.TransportDate) == "" {
output.DisplayMessage = "No date entered"
} else {
sql := `INSERT INTO transport (action_flag, department, job_no, customer, job_description, transport_date, service_type, is_active)
VALUES (?, ?, ?, ?, ?, ?, ?, 'Y')`
d, _ := time.Parse(shortForm, output.TransportDate)
fmt.Println(output.ServiceType)
_, err := db.Exec(sql, output.ActionFlag, output.Department, output.JobNo, output.Customer, output.JobDescription, d, output.ServiceType)
if err != nil {
log.Fatal(err)
}
output.DisplayMessage = "Record submitted"
var email bytes.Buffer
t, _ := template.ParseFiles("assets/templates/email.tpl")
err = t.Execute(&email, output)
if err != nil {
log.Fatal(err)
}
m := mail.NewV3Mail()
m.SetFrom(mail.NewEmail("Rewinds & J Windsor Ltd", "donotreply@rjweng.com"))
m.Subject = fmt.Sprintf("New transportation job")
p := mail.NewPersonalization()
tos := []*mail.Email{
mail.NewEmail(config.EmailName, config.EmailAddress),
}
p.AddTos(tos...)
m.AddPersonalizations(p)
m.AddContent(mail.NewContent("text/html", email.String()))
request := sendgrid.GetRequest(config.SendgridAPIkey, "/v3/mail/send", "https://api.sendgrid.com")
request.Method = "POST"
request.Body = mail.GetRequestBody(m)
response, err := sendgrid.API(request)
if err != nil {
log.Fatal(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
//set outputs to blank string
output.ActionFlag = ""
output.Department = ""
output.JobNo = ""
output.Customer = ""
output.JobDescription = ""
output.TransportDate = ""
output.ServiceType = ""
}
}
err := t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
}
func createGetJobDetails(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
JobNo := p.ByName("jobno")
if len(JobNo) > 10 {
fmt.Println("Job number greater than 10 characters")
} else if len(JobNo) < 5 {
fmt.Println("Job number less than 5 characters")
} else {
var output jobdetails
sql1 := `SELECT sc.[name], LEFT(j.contract, 2)
AS department, LTRIM(RTRIM(j.description1)) AS job_description
FROM rjw.scheme.jcmastm j
JOIN rjw.scheme.slcustm sc ON j.customer = sc.customer
WHERE j.job_code = ?`
err := db.QueryRow(sql1, JobNo).Scan(&output.CustomerName, &output.Department, &output.JobDescription)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
b, _ := json.Marshal(output)
fmt.Fprintf(w, string(b))
}
}
func completeList(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var output data
sql1 := `SELECT transport_id, action_flag, job_no, customer, department, job_description, COALESCE(CONVERT(NVARCHAR(11), transport_date, 106), '-'), service_type
FROM transport
WHERE is_active = 'Y'
ORDER BY transport_date DESC`
rows, err := db.Query(sql1)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
output.ReportsAll = make([]report, 0)
for rows.Next() {
var r report
err := rows.Scan(&r.TransportID, &r.ActionFlag, &r.JobNo, &r.Customer, &r.Department, &r.JobDescription, &r.TransportDate, &r.ServiceType)
if err != nil {
log.Fatal(err)
}
if r.ActionFlag == "C" {
r.ActionFlag = "Collect"
} else if r.ActionFlag == "D" {
r.ActionFlag = "Deliver"
} else {
r.ActionFlag = "Future"
}
if r.ServiceType == "R" {
r.ServiceType = "Regular"
} else if r.ServiceType == "E" {
r.ServiceType = "Emergency"
}
fmt.Println(r.ServiceType)
output.ReportsAll = append(output.ReportsAll, r)
}
t, err := template.ParseFiles("assets/templates/complete.tpl")
if err != nil {
log.Fatal(err)
}
err = t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
}
func completeJob(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
TransportID := p.ByName("transportid")
sql2 := `UPDATE transport
SET is_active = 'N'
WHERE transport_id = ?`
_, err := db.Exec(sql2, TransportID)
if err != nil {
log.Fatal(err)
}
http.Redirect(w, r, "/transport/complete", 303)
}
func editJob(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
TransportID := p.ByName("transportid")
var output jobEdit
sql1 := `SELECT transport_id, action_flag, job_no, customer, department, job_description, CONVERT(NVARCHAR(10), transport_date, 120), service_type
FROM transport
WHERE transport_id = ?`
err := db.QueryRow(sql1, TransportID).Scan(&output.TransportID, &output.ActionFlag, &output.JobNo, &output.Customer, &output.Department, &output.JobDescription, &output.TransportDate, &output.ServiceType)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
if r.Method == "POST" {
output.TransportID = r.FormValue("transport_id")
output.ActionFlag = r.FormValue("action_flag")
output.Department = r.FormValue("department")
output.JobNo = r.FormValue("job_no")
output.Customer = r.FormValue("customer")
output.JobDescription = r.FormValue("job_description")
output.TransportDate = r.FormValue("date")
output.ServiceType = r.FormValue("service_type")
const shortForm = "2006-01-02"
if strings.TrimSpace(output.Customer) == "" {
output.DisplayMessage = "No customer entered"
} else if strings.TrimSpace(output.Department) == "" {
output.DisplayMessage = "No Department entered"
} else if strings.TrimSpace(output.TransportDate) == "" {
output.DisplayMessage = "No date entered"
} else {
sql2 := `UPDATE transport
SET action_flag = ?, department = ?, job_no = ?, customer = ?, job_description = ?, transport_date = ?, service_type = ?
WHERE transport_id = ?`
d, _ := time.Parse(shortForm, output.TransportDate)
_, err := db.Exec(sql2, output.ActionFlag, output.Department, output.JobNo, output.Customer, output.JobDescription, d, output.ServiceType, output.TransportID)
if err != nil && err != sql.ErrNoRows {
log.Fatal(err)
}
http.Redirect(w, r, "/transport/complete", 303)
}
}
t, err := template.ParseFiles("assets/templates/edit.tpl")
if err != nil {
log.Fatal(err)
}
err = t.Execute(w, output)
if err != nil {
log.Fatal(err)
}
}
| transportCreate | identifier_name |
mod.rs | //! # Queueing Honey Badger
//!
//! This works exactly like Dynamic Honey Badger, but it has a transaction queue built in. Whenever
//! an epoch is output, it will automatically select a list of pending transactions and propose it
//! for the next one. The user can continuously add more pending transactions to the queue.
//!
//! If there are no pending transactions, no validators in the process of being added or
//! removed and not enough other nodes have proposed yet, no automatic proposal will be made: The
//! network then waits until at least _f + 1_ have any content for the next epoch.
//!
//! ## How it works
//!
//! Queueing Honey Badger runs a Dynamic Honey Badger internally, and automatically inputs a list
//! of pending transactions as its contribution at the beginning of each epoch. These are selected
//! by making a random choice of _B / N_ out of the first _B_ entries in the queue, where _B_ is the
//! configurable `batch_size` parameter, and _N_ is the current number of validators.
//!
//! After each output, the transactions that made it into the new batch are removed from the queue.
//!
//! The random choice of transactions is made to reduce redundancy even if all validators have
//! roughly the same entries in their queues. By selecting a random fraction of the first _B_
//! entries, any two nodes will likely make almost disjoint contributions instead of proposing
//! the same transaction multiple times.
use std::marker::PhantomData;
use std::{cmp, iter};
use crypto::PublicKey;
use derivative::Derivative;
use failure::Fail;
use rand::{Rand, Rng};
use serde::{de::DeserializeOwned, Serialize};
use dynamic_honey_badger::{self, Batch as DhbBatch, DynamicHoneyBadger, Message};
use transaction_queue::TransactionQueue;
use {util, Contribution, DistAlgorithm, NetworkInfo, NodeIdT};
pub use dynamic_honey_badger::{Change, ChangeState, Input};
/// Queueing honey badger error variants.
#[derive(Debug, Fail)]
pub enum Error {
/// Failed to handle input.
#[fail(display = "Input error: {}", _0)]
Input(dynamic_honey_badger::Error),
/// Failed to handle a message.
#[fail(display = "Handle message error: {}", _0)]
HandleMessage(dynamic_honey_badger::Error),
/// Failed to propose a contribution.
#[fail(display = "Propose error: {}", _0)]
Propose(dynamic_honey_badger::Error),
}
/// The result of `QueueingHoneyBadger` handling an input or message.
pub type Result<T> = ::std::result::Result<T, Error>;
/// A Queueing Honey Badger builder, to configure the parameters and create new instances of
/// `QueueingHoneyBadger`.
pub struct QueueingHoneyBadgerBuilder<T, N: Rand + Ord, Q> {
/// Shared network data.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
_phantom: PhantomData<T>,
}
type QueueingHoneyBadgerWithStep<T, N, Q> = (QueueingHoneyBadger<T, N, Q>, Step<T, N>);
impl<T, N, Q> QueueingHoneyBadgerBuilder<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
// TODO: Make it easier to build a `QueueingHoneyBadger` with a `JoinPlan`. Handle `Step`
// conversion internally.
pub fn new(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> Self {
// TODO: Use the defaults from `HoneyBadgerBuilder`.
QueueingHoneyBadgerBuilder {
dyn_hb,
batch_size: 100,
queue: Default::default(),
_phantom: PhantomData,
}
}
/// Sets the target number of transactions per batch.
pub fn batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
/// Sets the transaction queue object.
pub fn queue(mut self, queue: Q) -> Self {
self.queue = queue;
self
}
/// Creates a new Queueing Honey Badger instance with an empty buffer.
pub fn | <R>(self, rng: R) -> QueueingHoneyBadgerWithStep<T, N, Q>
where
R: 'static + Rng + Send + Sync,
{
self.build_with_transactions(None, rng)
.expect("building without transactions cannot fail")
}
/// Returns a new Queueing Honey Badger instance that starts with the given transactions in its
/// buffer.
pub fn build_with_transactions<TI, R>(
mut self,
txs: TI,
rng: R,
) -> Result<QueueingHoneyBadgerWithStep<T, N, Q>>
where
TI: IntoIterator<Item = T>,
R: 'static + Rng + Send + Sync,
{
self.queue.extend(txs);
let mut qhb = QueueingHoneyBadger {
dyn_hb: self.dyn_hb,
batch_size: self.batch_size,
queue: self.queue,
rng: Box::new(rng),
};
let step = qhb.propose()?;
Ok((qhb, step))
}
}
/// A Honey Badger instance that can handle adding and removing nodes and manages a transaction
/// queue.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct QueueingHoneyBadger<T, N: Rand + Ord, Q> {
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The internal managed `DynamicHoneyBadger` instance.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
/// Random number generator used for choosing transactions from the queue.
#[derivative(Debug(format_with = "util::fmt_rng"))]
rng: Box<dyn Rng + Send + Sync>,
}
/// A `QueueingHoneyBadger` step, possibly containing multiple outputs.
pub type Step<T, N> = ::Step<Message<N>, Batch<T, N>, N>;
impl<T, N, Q> DistAlgorithm for QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
type NodeId = N;
type Input = Input<T, N>;
type Output = Batch<T, N>;
type Message = Message<N>;
type Error = Error;
fn handle_input(&mut self, input: Self::Input) -> Result<Step<T, N>> {
// User transactions are forwarded to `HoneyBadger` right away. Internal messages are
// in addition signed and broadcast.
match input {
Input::User(tx) => self.push_transaction(tx),
Input::Change(change) => self.vote_for(change),
}
}
fn handle_message(&mut self, sender_id: &N, message: Self::Message) -> Result<Step<T, N>> {
self.handle_message(sender_id, message)
}
fn terminated(&self) -> bool {
false
}
fn our_id(&self) -> &N {
self.dyn_hb.our_id()
}
}
impl<T, N, Q> QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
pub fn builder(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> QueueingHoneyBadgerBuilder<T, N, Q> {
QueueingHoneyBadgerBuilder::new(dyn_hb)
}
/// Adds a transaction to the queue.
///
/// This can be called at any time to append to the transaction queue. The new transaction will
/// be proposed in some future epoch.
///
/// If no proposal has yet been made for the current epoch, this may trigger one. In this case,
/// a nonempty step will returned, with the corresponding messages. (Or, if we are the only
/// validator, even with the completed batch as an output.)
pub fn push_transaction(&mut self, tx: T) -> Result<Step<T, N>> {
self.queue.extend(iter::once(tx));
self.propose()
}
/// Casts a vote to change the set of validators.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_for(&mut self, change: Change<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_for(change))
}
/// Casts a vote to add a node as a validator.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_add(&mut self, node_id: N, pub_key: PublicKey) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_add(node_id, pub_key))
}
/// Casts a vote to demote a validator to observer.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_remove(&mut self, node_id: &N) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_remove(node_id))
}
/// Handles a message received from `sender_id`.
///
/// This must be called with every message we receive from another node.
pub fn handle_message(&mut self, sender_id: &N, message: Message<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.handle_message(sender_id, message))
}
/// Returns a reference to the internal managed `DynamicHoneyBadger` instance.
pub fn dyn_hb(&self) -> &DynamicHoneyBadger<Vec<T>, N> {
&self.dyn_hb
}
/// Returns the information about the node IDs in the network, and the cryptographic keys.
pub fn netinfo(&self) -> &NetworkInfo<N> {
self.dyn_hb.netinfo()
}
/// Applies a function `f` to the `DynamicHoneyBadger` instance and processes the step.
fn apply<F>(&mut self, f: F) -> Result<Step<T, N>>
where
F: FnOnce(&mut DynamicHoneyBadger<Vec<T>, N>) -> dynamic_honey_badger::Result<Step<T, N>>,
{
let step = f(&mut self.dyn_hb).map_err(Error::Input)?;
self.queue
.remove_multiple(step.output.iter().flat_map(Batch::iter));
Ok(step.join(self.propose()?))
}
/// Returns the epoch of the next batch that will be output.
pub fn next_epoch(&self) -> u64 {
self.dyn_hb.next_epoch()
}
/// Returns `true` if we are ready to propose our contribution for the next epoch, i.e. if the
/// previous epoch has completed and we have either pending transactions or we are required to
/// make a proposal to avoid stalling the network.
fn can_propose(&self) -> bool {
if self.dyn_hb.has_input() {
return false; // Previous epoch is still in progress.
}
!self.queue.is_empty() || self.dyn_hb.should_propose()
}
/// Initiates the next epoch by proposing a batch from the queue.
fn propose(&mut self) -> Result<Step<T, N>> {
let mut step = Step::default();
while self.can_propose() {
let amount = cmp::max(1, self.batch_size / self.dyn_hb.netinfo().num_nodes());
let proposal = self.queue.choose(&mut self.rng, amount, self.batch_size);
step.extend(
self.dyn_hb
.handle_input(Input::User(proposal))
.map_err(Error::Propose)?,
);
}
Ok(step)
}
}
/// A batch containing a list of transactions from at least two thirds of the validators.
pub type Batch<T, N> = DhbBatch<Vec<T>, N>;
| build | identifier_name |
mod.rs | //! # Queueing Honey Badger
//!
//! This works exactly like Dynamic Honey Badger, but it has a transaction queue built in. Whenever
//! an epoch is output, it will automatically select a list of pending transactions and propose it
//! for the next one. The user can continuously add more pending transactions to the queue.
//!
//! If there are no pending transactions, no validators in the process of being added or
//! removed and not enough other nodes have proposed yet, no automatic proposal will be made: The
//! network then waits until at least _f + 1_ have any content for the next epoch.
//!
//! ## How it works
//!
//! Queueing Honey Badger runs a Dynamic Honey Badger internally, and automatically inputs a list
//! of pending transactions as its contribution at the beginning of each epoch. These are selected
//! by making a random choice of _B / N_ out of the first _B_ entries in the queue, where _B_ is the
//! configurable `batch_size` parameter, and _N_ is the current number of validators.
//!
//! After each output, the transactions that made it into the new batch are removed from the queue.
//!
//! The random choice of transactions is made to reduce redundancy even if all validators have
//! roughly the same entries in their queues. By selecting a random fraction of the first _B_
//! entries, any two nodes will likely make almost disjoint contributions instead of proposing
//! the same transaction multiple times.
use std::marker::PhantomData;
use std::{cmp, iter};
use crypto::PublicKey;
use derivative::Derivative;
use failure::Fail;
use rand::{Rand, Rng};
use serde::{de::DeserializeOwned, Serialize};
use dynamic_honey_badger::{self, Batch as DhbBatch, DynamicHoneyBadger, Message};
use transaction_queue::TransactionQueue;
use {util, Contribution, DistAlgorithm, NetworkInfo, NodeIdT};
pub use dynamic_honey_badger::{Change, ChangeState, Input};
/// Queueing honey badger error variants.
#[derive(Debug, Fail)]
pub enum Error {
/// Failed to handle input.
#[fail(display = "Input error: {}", _0)]
Input(dynamic_honey_badger::Error),
/// Failed to handle a message.
#[fail(display = "Handle message error: {}", _0)]
HandleMessage(dynamic_honey_badger::Error),
/// Failed to propose a contribution.
#[fail(display = "Propose error: {}", _0)]
Propose(dynamic_honey_badger::Error),
}
/// The result of `QueueingHoneyBadger` handling an input or message.
pub type Result<T> = ::std::result::Result<T, Error>;
/// A Queueing Honey Badger builder, to configure the parameters and create new instances of
/// `QueueingHoneyBadger`.
pub struct QueueingHoneyBadgerBuilder<T, N: Rand + Ord, Q> {
/// Shared network data.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
_phantom: PhantomData<T>,
}
type QueueingHoneyBadgerWithStep<T, N, Q> = (QueueingHoneyBadger<T, N, Q>, Step<T, N>);
impl<T, N, Q> QueueingHoneyBadgerBuilder<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
// TODO: Make it easier to build a `QueueingHoneyBadger` with a `JoinPlan`. Handle `Step`
// conversion internally.
pub fn new(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> Self {
// TODO: Use the defaults from `HoneyBadgerBuilder`.
QueueingHoneyBadgerBuilder {
dyn_hb,
batch_size: 100,
queue: Default::default(),
_phantom: PhantomData,
}
}
/// Sets the target number of transactions per batch.
pub fn batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
/// Sets the transaction queue object.
pub fn queue(mut self, queue: Q) -> Self {
self.queue = queue;
self
}
/// Creates a new Queueing Honey Badger instance with an empty buffer.
pub fn build<R>(self, rng: R) -> QueueingHoneyBadgerWithStep<T, N, Q>
where
R: 'static + Rng + Send + Sync,
{
self.build_with_transactions(None, rng)
.expect("building without transactions cannot fail")
}
/// Returns a new Queueing Honey Badger instance that starts with the given transactions in its
/// buffer.
pub fn build_with_transactions<TI, R>(
mut self,
txs: TI,
rng: R,
) -> Result<QueueingHoneyBadgerWithStep<T, N, Q>>
where
TI: IntoIterator<Item = T>,
R: 'static + Rng + Send + Sync,
{
self.queue.extend(txs);
let mut qhb = QueueingHoneyBadger {
dyn_hb: self.dyn_hb,
batch_size: self.batch_size,
queue: self.queue,
rng: Box::new(rng),
};
let step = qhb.propose()?;
Ok((qhb, step))
}
}
/// A Honey Badger instance that can handle adding and removing nodes and manages a transaction
/// queue.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct QueueingHoneyBadger<T, N: Rand + Ord, Q> {
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The internal managed `DynamicHoneyBadger` instance.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
/// Random number generator used for choosing transactions from the queue.
#[derivative(Debug(format_with = "util::fmt_rng"))]
rng: Box<dyn Rng + Send + Sync>,
}
/// A `QueueingHoneyBadger` step, possibly containing multiple outputs.
pub type Step<T, N> = ::Step<Message<N>, Batch<T, N>, N>;
impl<T, N, Q> DistAlgorithm for QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
type NodeId = N;
type Input = Input<T, N>;
type Output = Batch<T, N>;
type Message = Message<N>;
type Error = Error;
fn handle_input(&mut self, input: Self::Input) -> Result<Step<T, N>> {
// User transactions are forwarded to `HoneyBadger` right away. Internal messages are
// in addition signed and broadcast.
match input {
Input::User(tx) => self.push_transaction(tx),
Input::Change(change) => self.vote_for(change),
}
}
fn handle_message(&mut self, sender_id: &N, message: Self::Message) -> Result<Step<T, N>> {
self.handle_message(sender_id, message)
}
fn terminated(&self) -> bool {
false
}
fn our_id(&self) -> &N {
self.dyn_hb.our_id()
}
}
impl<T, N, Q> QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
pub fn builder(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> QueueingHoneyBadgerBuilder<T, N, Q> {
QueueingHoneyBadgerBuilder::new(dyn_hb)
}
/// Adds a transaction to the queue.
///
/// This can be called at any time to append to the transaction queue. The new transaction will
/// be proposed in some future epoch.
///
/// If no proposal has yet been made for the current epoch, this may trigger one. In this case,
/// a nonempty step will returned, with the corresponding messages. (Or, if we are the only
/// validator, even with the completed batch as an output.)
pub fn push_transaction(&mut self, tx: T) -> Result<Step<T, N>> {
self.queue.extend(iter::once(tx));
self.propose()
}
/// Casts a vote to change the set of validators.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_for(&mut self, change: Change<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_for(change))
}
/// Casts a vote to add a node as a validator.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_add(&mut self, node_id: N, pub_key: PublicKey) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_add(node_id, pub_key))
}
/// Casts a vote to demote a validator to observer.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_remove(&mut self, node_id: &N) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_remove(node_id))
}
/// Handles a message received from `sender_id`.
///
/// This must be called with every message we receive from another node.
pub fn handle_message(&mut self, sender_id: &N, message: Message<N>) -> Result<Step<T, N>> |
/// Returns a reference to the internal managed `DynamicHoneyBadger` instance.
pub fn dyn_hb(&self) -> &DynamicHoneyBadger<Vec<T>, N> {
&self.dyn_hb
}
/// Returns the information about the node IDs in the network, and the cryptographic keys.
pub fn netinfo(&self) -> &NetworkInfo<N> {
self.dyn_hb.netinfo()
}
/// Applies a function `f` to the `DynamicHoneyBadger` instance and processes the step.
fn apply<F>(&mut self, f: F) -> Result<Step<T, N>>
where
F: FnOnce(&mut DynamicHoneyBadger<Vec<T>, N>) -> dynamic_honey_badger::Result<Step<T, N>>,
{
let step = f(&mut self.dyn_hb).map_err(Error::Input)?;
self.queue
.remove_multiple(step.output.iter().flat_map(Batch::iter));
Ok(step.join(self.propose()?))
}
/// Returns the epoch of the next batch that will be output.
pub fn next_epoch(&self) -> u64 {
self.dyn_hb.next_epoch()
}
/// Returns `true` if we are ready to propose our contribution for the next epoch, i.e. if the
/// previous epoch has completed and we have either pending transactions or we are required to
/// make a proposal to avoid stalling the network.
fn can_propose(&self) -> bool {
if self.dyn_hb.has_input() {
return false; // Previous epoch is still in progress.
}
!self.queue.is_empty() || self.dyn_hb.should_propose()
}
/// Initiates the next epoch by proposing a batch from the queue.
fn propose(&mut self) -> Result<Step<T, N>> {
let mut step = Step::default();
while self.can_propose() {
let amount = cmp::max(1, self.batch_size / self.dyn_hb.netinfo().num_nodes());
let proposal = self.queue.choose(&mut self.rng, amount, self.batch_size);
step.extend(
self.dyn_hb
.handle_input(Input::User(proposal))
.map_err(Error::Propose)?,
);
}
Ok(step)
}
}
/// A batch containing a list of transactions from at least two thirds of the validators.
pub type Batch<T, N> = DhbBatch<Vec<T>, N>;
| {
self.apply(|dyn_hb| dyn_hb.handle_message(sender_id, message))
} | identifier_body |
mod.rs | //! # Queueing Honey Badger
//!
//! This works exactly like Dynamic Honey Badger, but it has a transaction queue built in. Whenever
//! an epoch is output, it will automatically select a list of pending transactions and propose it
//! for the next one. The user can continuously add more pending transactions to the queue.
//!
//! If there are no pending transactions, no validators in the process of being added or
//! removed and not enough other nodes have proposed yet, no automatic proposal will be made: The
//! network then waits until at least _f + 1_ have any content for the next epoch.
//!
//! ## How it works
//!
//! Queueing Honey Badger runs a Dynamic Honey Badger internally, and automatically inputs a list
//! of pending transactions as its contribution at the beginning of each epoch. These are selected
//! by making a random choice of _B / N_ out of the first _B_ entries in the queue, where _B_ is the
//! configurable `batch_size` parameter, and _N_ is the current number of validators.
//!
//! After each output, the transactions that made it into the new batch are removed from the queue.
//!
//! The random choice of transactions is made to reduce redundancy even if all validators have
//! roughly the same entries in their queues. By selecting a random fraction of the first _B_
//! entries, any two nodes will likely make almost disjoint contributions instead of proposing
//! the same transaction multiple times.
use std::marker::PhantomData;
use std::{cmp, iter};
use crypto::PublicKey;
use derivative::Derivative;
use failure::Fail;
use rand::{Rand, Rng};
use serde::{de::DeserializeOwned, Serialize};
use dynamic_honey_badger::{self, Batch as DhbBatch, DynamicHoneyBadger, Message};
use transaction_queue::TransactionQueue;
use {util, Contribution, DistAlgorithm, NetworkInfo, NodeIdT};
pub use dynamic_honey_badger::{Change, ChangeState, Input};
/// Queueing honey badger error variants.
#[derive(Debug, Fail)]
pub enum Error {
/// Failed to handle input.
#[fail(display = "Input error: {}", _0)]
Input(dynamic_honey_badger::Error),
/// Failed to handle a message.
#[fail(display = "Handle message error: {}", _0)]
HandleMessage(dynamic_honey_badger::Error),
/// Failed to propose a contribution.
#[fail(display = "Propose error: {}", _0)]
Propose(dynamic_honey_badger::Error),
}
/// The result of `QueueingHoneyBadger` handling an input or message.
pub type Result<T> = ::std::result::Result<T, Error>;
/// A Queueing Honey Badger builder, to configure the parameters and create new instances of
/// `QueueingHoneyBadger`.
pub struct QueueingHoneyBadgerBuilder<T, N: Rand + Ord, Q> {
/// Shared network data.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
_phantom: PhantomData<T>,
}
type QueueingHoneyBadgerWithStep<T, N, Q> = (QueueingHoneyBadger<T, N, Q>, Step<T, N>);
impl<T, N, Q> QueueingHoneyBadgerBuilder<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
// TODO: Make it easier to build a `QueueingHoneyBadger` with a `JoinPlan`. Handle `Step`
// conversion internally.
pub fn new(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> Self {
// TODO: Use the defaults from `HoneyBadgerBuilder`.
QueueingHoneyBadgerBuilder {
dyn_hb,
batch_size: 100,
queue: Default::default(),
_phantom: PhantomData,
}
}
/// Sets the target number of transactions per batch.
pub fn batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
/// Sets the transaction queue object.
pub fn queue(mut self, queue: Q) -> Self {
self.queue = queue;
self
}
/// Creates a new Queueing Honey Badger instance with an empty buffer.
pub fn build<R>(self, rng: R) -> QueueingHoneyBadgerWithStep<T, N, Q>
where
R: 'static + Rng + Send + Sync,
{
self.build_with_transactions(None, rng)
.expect("building without transactions cannot fail")
}
/// Returns a new Queueing Honey Badger instance that starts with the given transactions in its
/// buffer.
pub fn build_with_transactions<TI, R>(
mut self,
txs: TI,
rng: R,
) -> Result<QueueingHoneyBadgerWithStep<T, N, Q>>
where
TI: IntoIterator<Item = T>,
R: 'static + Rng + Send + Sync,
{
self.queue.extend(txs);
let mut qhb = QueueingHoneyBadger {
dyn_hb: self.dyn_hb,
batch_size: self.batch_size,
queue: self.queue,
rng: Box::new(rng),
};
let step = qhb.propose()?;
Ok((qhb, step))
}
}
/// A Honey Badger instance that can handle adding and removing nodes and manages a transaction
/// queue.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct QueueingHoneyBadger<T, N: Rand + Ord, Q> {
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The internal managed `DynamicHoneyBadger` instance.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
/// Random number generator used for choosing transactions from the queue.
#[derivative(Debug(format_with = "util::fmt_rng"))]
rng: Box<dyn Rng + Send + Sync>,
}
/// A `QueueingHoneyBadger` step, possibly containing multiple outputs.
pub type Step<T, N> = ::Step<Message<N>, Batch<T, N>, N>;
impl<T, N, Q> DistAlgorithm for QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
type NodeId = N;
type Input = Input<T, N>;
type Output = Batch<T, N>;
type Message = Message<N>;
type Error = Error;
fn handle_input(&mut self, input: Self::Input) -> Result<Step<T, N>> {
// User transactions are forwarded to `HoneyBadger` right away. Internal messages are
// in addition signed and broadcast.
match input {
Input::User(tx) => self.push_transaction(tx),
Input::Change(change) => self.vote_for(change),
}
}
fn handle_message(&mut self, sender_id: &N, message: Self::Message) -> Result<Step<T, N>> {
self.handle_message(sender_id, message)
}
fn terminated(&self) -> bool {
false
}
| self.dyn_hb.our_id()
}
}
impl<T, N, Q> QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
pub fn builder(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> QueueingHoneyBadgerBuilder<T, N, Q> {
QueueingHoneyBadgerBuilder::new(dyn_hb)
}
/// Adds a transaction to the queue.
///
/// This can be called at any time to append to the transaction queue. The new transaction will
/// be proposed in some future epoch.
///
/// If no proposal has yet been made for the current epoch, this may trigger one. In this case,
/// a nonempty step will returned, with the corresponding messages. (Or, if we are the only
/// validator, even with the completed batch as an output.)
pub fn push_transaction(&mut self, tx: T) -> Result<Step<T, N>> {
self.queue.extend(iter::once(tx));
self.propose()
}
/// Casts a vote to change the set of validators.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_for(&mut self, change: Change<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_for(change))
}
/// Casts a vote to add a node as a validator.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_add(&mut self, node_id: N, pub_key: PublicKey) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_add(node_id, pub_key))
}
/// Casts a vote to demote a validator to observer.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_remove(&mut self, node_id: &N) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_remove(node_id))
}
/// Handles a message received from `sender_id`.
///
/// This must be called with every message we receive from another node.
pub fn handle_message(&mut self, sender_id: &N, message: Message<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.handle_message(sender_id, message))
}
/// Returns a reference to the internal managed `DynamicHoneyBadger` instance.
pub fn dyn_hb(&self) -> &DynamicHoneyBadger<Vec<T>, N> {
&self.dyn_hb
}
/// Returns the information about the node IDs in the network, and the cryptographic keys.
pub fn netinfo(&self) -> &NetworkInfo<N> {
self.dyn_hb.netinfo()
}
/// Applies a function `f` to the `DynamicHoneyBadger` instance and processes the step.
fn apply<F>(&mut self, f: F) -> Result<Step<T, N>>
where
F: FnOnce(&mut DynamicHoneyBadger<Vec<T>, N>) -> dynamic_honey_badger::Result<Step<T, N>>,
{
let step = f(&mut self.dyn_hb).map_err(Error::Input)?;
self.queue
.remove_multiple(step.output.iter().flat_map(Batch::iter));
Ok(step.join(self.propose()?))
}
/// Returns the epoch of the next batch that will be output.
pub fn next_epoch(&self) -> u64 {
self.dyn_hb.next_epoch()
}
/// Returns `true` if we are ready to propose our contribution for the next epoch, i.e. if the
/// previous epoch has completed and we have either pending transactions or we are required to
/// make a proposal to avoid stalling the network.
fn can_propose(&self) -> bool {
if self.dyn_hb.has_input() {
return false; // Previous epoch is still in progress.
}
!self.queue.is_empty() || self.dyn_hb.should_propose()
}
/// Initiates the next epoch by proposing a batch from the queue.
fn propose(&mut self) -> Result<Step<T, N>> {
let mut step = Step::default();
while self.can_propose() {
let amount = cmp::max(1, self.batch_size / self.dyn_hb.netinfo().num_nodes());
let proposal = self.queue.choose(&mut self.rng, amount, self.batch_size);
step.extend(
self.dyn_hb
.handle_input(Input::User(proposal))
.map_err(Error::Propose)?,
);
}
Ok(step)
}
}
/// A batch containing a list of transactions from at least two thirds of the validators.
pub type Batch<T, N> = DhbBatch<Vec<T>, N>; | fn our_id(&self) -> &N { | random_line_split |
install_plans.go | package recipe
import (
"context"
"fmt"
"sort"
log "github.com/sirupsen/logrus"
existinginfrav1 "github.com/weaveworks/cluster-api-provider-existinginfra/apis/cluster.weave.works/v1alpha3"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/flavors/eksd"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/resource"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/envcfg"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/object"
)
const (
// PlanKey for storing plans as annotations on Nodes
PlanKey string = "wks.weave.works/node-plan"
)
// BuildBasePlan creates a plan for installing the base building blocks for the node
func BuildBasePlan(pkgType resource.PkgType) plan.Resource {
b := plan.NewBuilder()
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
// Package manager features
b.AddResource("install:yum-utils", &resource.RPM{Name: "yum-utils"})
b.AddResource("install:yum-versionlock", &resource.RPM{Name: "yum-plugin-versionlock"})
// Device Mapper
b.AddResource("install:device-mapper-persistent-data", &resource.RPM{Name: "device-mapper-persistent-data"})
b.AddResource("install:lvm2", &resource.RPM{Name: "lvm2"})
case resource.PkgTypeDeb:
// Package manager features
b.AddResource("install:gnupg", &resource.Deb{Name: "gnupg"})
// TODO(michal): Enable locking
// Device Mapper
b.AddResource("install:thin-provisioning-tools", &resource.Deb{Name: "thin-provisioning-tools"})
b.AddResource("install:lvm2", &resource.Deb{Name: "lvm2"})
}
p, err := b.Plan()
p.SetUndoCondition(func(_ plan.Runner, _ plan.State) bool { return false })
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildConfigPlan creates a plan for handling the configuration files
func BuildConfigPlan(files []*resource.File) plan.Resource {
b := plan.NewBuilder()
for idx, file := range files {
b.AddResource(fmt.Sprintf("install:config-file-%d", idx), file)
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildConfigMapPlan creates a plan to handle config maps
func BuildConfigMapPlan(manifests map[string][]byte, namespace string) plan.Resource {
b := plan.NewBuilder()
for name, manifest := range manifests {
remoteName := fmt.Sprintf("config-map-%s", name)
b.AddResource("install:"+remoteName, &resource.KubectlApply{Filename: object.String(remoteName), Manifest: manifest, Namespace: object.String(namespace)})
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildCNIPlan creates a sub-plan to install the CNI plugin.
func BuildCNIPlan(cni string, manifests [][]byte) plan.Resource {
b := plan.NewBuilder()
b.AddResource(
"install-cni:apply-manifests",
&resource.KubectlApply{Manifest: manifests[0], Filename: object.String(cni + ".yaml")},
)
if len(manifests) == 2 {
b.AddResource(
"install-cni:apply-manifests-ds",
&resource.KubectlApply{Manifest: manifests[1], Filename: object.String(cni + "-daemon-set" + ".yaml")},
plan.DependOn("install-cni:apply-manifests"))
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildCRIPlan creates a plan for installing a CRI. Currently, Docker is the only supported CRI
func BuildCRIPlan(ctx context.Context, criSpec *existinginfrav1.ContainerRuntime, cfg *envcfg.EnvSpecificConfig, pkgType resource.PkgType) plan.Resource {
b := plan.NewBuilder()
if criSpec.Kind != "docker" {
log.Fatalf("Unknown CRI - %s", criSpec.Kind)
}
IsDockerOnCentOS := false
// Docker runtime
switch pkgType {
case resource.PkgTypeRHEL:
b.AddResource("install:container-selinux",
&resource.Run{
Script: object.String("yum install -y http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm || true"),
UndoScript: object.String("yum remove -y container-selinux || true")})
b.AddResource("install:docker",
&resource.RPM{Name: criSpec.Package, Version: criSpec.Version},
plan.DependOn("install:container-selinux"))
// SELinux will be here along with docker and containerd-selinux packages
IsDockerOnCentOS = true
case resource.PkgTypeRPM:
b.AddResource("install:docker",
&resource.RPM{Name: criSpec.Package, Version: criSpec.Version})
// SELinux will be here along with docker and containerd-selinux packages
IsDockerOnCentOS = true
case resource.PkgTypeDeb:
// TODO(michal): Use the official docker.com repo
b.AddResource("install:docker", &resource.Deb{Name: "docker.io"})
}
if cfg.LockYUMPkgs {
b.AddResource(
"lock-package:docker",
&resource.Run{
Script: object.String("yum versionlock add docker-ce"),
// If we never installed yum-plugin-versionlock or docker, this should not fail
UndoScript: object.String("yum versionlock delete docker-ce || true")},
plan.DependOn("install:docker"))
}
// this is a special case: if SELinux is not there on RH, CentOS Linux family
// installing Docker will also installing SELinux
// then we set SELinux mode to be permissive right after the docker installation step
if IsDockerOnCentOS && cfg.SetSELinuxPermissive {
b.AddResource(
"selinux:permissive",
&resource.Run{
Script: object.String("setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config"),
// sometime, SELinux not installed yet so || true to ignore the error
UndoScript: object.String("setenforce 1 && sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config || true"),
},
plan.DependOn("install:docker"))
}
b.AddResource(
"systemd:daemon-reload",
&resource.Run{Script: object.String("systemctl daemon-reload")},
plan.DependOn("install:docker"),
)
b.AddResource(
"service-init:docker-service",
&resource.Service{Name: "docker", Status: "active", Enabled: true},
plan.DependOn("systemd:daemon-reload"))
p, err := b.Plan()
p.SetUndoCondition(func(r plan.Runner, _ plan.State) bool {
type AwareChanger interface {
WouldChangeState(ctx context.Context, r plan.Runner) (bool, error)
}
chg, err := p.GetResource("install:docker").(AwareChanger).WouldChangeState(ctx, r)
return chg || (err != nil)
})
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BinInstaller creates a function to install binaries based on package type and cluster flavors
func | (pkgType resource.PkgType, f *eksd.EKSD) (func(string, string) plan.Resource, error) {
if f != nil {
log.Debugf("Using flavor %+v", f)
return func(binName, version string) plan.Resource {
// TODO (Mark) logic for the architecture
binURL, sha256, err := f.KubeBinURL(binName)
if err != nil {
log.Fatalf("%v", err)
return nil
}
binPath := "/usr/bin/" + binName
return &resource.Run{
Script: object.String(fmt.Sprintf("curl -o %s %s && openssl dgst -sha256 %s | grep \"%s\" > /dev/null && chmod 755 %s", binPath, binURL, binPath, sha256, binPath)),
UndoScript: object.String(fmt.Sprintf("pkill --uid 0 %s && rm %s || true", binName, binPath))}
}, nil
}
if pkgType == resource.PkgTypeDeb {
return func(binName, version string) plan.Resource {
return &resource.Deb{Name: binName, Suffix: "=" + version + "-00"}
}, nil
}
return func(binName, version string) plan.Resource {
return &resource.RPM{Name: binName, Version: version, DisableExcludes: "kubernetes"}
}, nil
}
// BuildK8SPlan creates a plan for running kubernetes on a node
func BuildK8SPlan(kubernetesVersion string, kubeletNodeIP string, seLinuxInstalled, setSELinuxPermissive, disableSwap, lockYUMPkgs bool, pkgType resource.PkgType, cloudProvider string, extraArgs map[string]string, binInstaller func(string, string) plan.Resource, flavor *eksd.EKSD) plan.Resource {
b := plan.NewBuilder()
// Kubernetes repos
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
// do nothing
case resource.PkgTypeDeb:
// XXX: Workaround for https://github.com/weaveworks/wksctl/issues/654 : *.gpg is a binary format, and currently wks is unable to handle
// binary files in the configuration configmap. Therefore, I needed to supply the *.gpg contents base64-encoded.
// In a world without that bug, one could just use the "!!binary"" YAML format in the configmap and store the *.gpg there directly.
b.AddResource("configure:kubernetes-repo-key", &resource.Run{
Script: object.String("base64 -d /tmp/cloud-google-com.gpg.b64 > /etc/apt/trusted.gpg.d/cloud-google-com.gpg"),
})
repoLine := "deb https://apt.kubernetes.io/ kubernetes-xenial main"
repoFile := "/etc/apt/sources.list.d/wks-google.list"
sedExpr := fmt.Sprintf(`\!%s!d`, repoLine) // same as '/%s/d' but allows '/' in %s
b.AddResource("configure:kubernetes-repo", &resource.Run{
Script: object.String(fmt.Sprintf("echo %q | tee -a %q", repoLine, repoFile)),
UndoScript: object.String(fmt.Sprintf(`test ! -f %q || sed -i '%s' %q`, repoFile, sedExpr, repoFile)),
}, plan.DependOn("configure:kubernetes-repo-key"))
}
// If SELinux is already installed and we need to set SELinux to permissive mode, do it
if seLinuxInstalled && setSELinuxPermissive {
b.AddResource(
"selinux:permissive",
&resource.Run{
Script: object.String("setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config"),
UndoScript: object.String("setenforce 1 && sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config || true"),
})
}
// Install k8s packages
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
if flavor != nil {
b.AddResource("install:kubelet-package", &resource.RPM{Name: "kubelet", Version: kubernetesVersion, DisableExcludes: "kubernetes"})
b.AddResource(
"cleanup:kubelet",
&resource.Run{Script: object.String("pkill kubelet | true")},
plan.DependOn("install:kubelet-package"))
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("cleanup:kubelet"))
} else {
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion))
}
b.AddResource("install:kubectl", binInstaller("kubectl", kubernetesVersion))
b.AddResource("install:kubeadm",
binInstaller("kubeadm", kubernetesVersion),
plan.DependOn("install:kubectl"),
plan.DependOn("install:kubelet"),
)
case resource.PkgTypeDeb:
// TODO(michal): Install the newest release version by default instead of hardcoding "-00".
if flavor != nil {
b.AddResource("install:kubelet-package", &resource.Deb{Name: "kubelet", Suffix: "=" + kubernetesVersion + "-00"},
plan.DependOn("configure:kubernetes-repo"))
b.AddResource(
"cleanup:kubelet",
&resource.Run{Script: object.String("pkill kubelet | true")},
plan.DependOn("install:kubelet-package"))
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("cleanup:kubelet"))
} else {
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"))
}
b.AddResource("install:kubeadm", binInstaller("kubeadm", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"), plan.DependOn("install:kubelet"))
b.AddResource("install:kubectl", binInstaller("kubectl", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"))
}
if lockYUMPkgs {
b.AddResource(
"lock-package:kubernetes",
&resource.Run{
Script: object.String("yum versionlock add 'kube*'"),
// If we never installed yum-plugin-versionlock or kubernetes, this should not fail
UndoScript: object.String("yum versionlock delete 'kube*' || true")},
plan.DependOn("install:kubectl"),
)
}
b.AddResource(
"create-dir:kubelet.service.d",
&resource.Dir{Path: object.String("/etc/systemd/system/kubelet.service.d")},
)
b.AddResource(
"install:kubeadm-conf",
&resource.File{Content: `# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS`,
Destination: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"},
plan.DependOn("create-dir:kubelet.service.d"))
kubeletDeps := []string{"install:kubeadm-conf"}
processCloudProvider := func(cmdline string) string {
if cloudProvider != "" {
log.WithField("cloudProvider", cloudProvider).Debug("using cloud provider")
return fmt.Sprintf("%s --cloud-provider=%s\n", cmdline, cloudProvider)
}
return cmdline + "\n"
}
processAdditionalArgs := func(cmdline string) string {
result := cmdline
strs := []string{}
for name, value := range extraArgs {
strs = append(strs, fmt.Sprintf("--%s='%s'", name, value))
}
sort.Strings(strs)
for _, str := range strs {
result = fmt.Sprintf("%s %s", result, str)
}
return processCloudProvider(result)
}
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
if disableSwap {
swapDisable := "configure:kubernetes-swap-disable"
kubeletDeps = append(kubeletDeps, swapDisable)
b.AddResource(
swapDisable,
buildDisableSwapPlan(),
plan.DependOn("create-dir:kubelet.service.d"))
kubeletSysconfig := "configure:kubelet-sysconfig"
b.AddResource(
kubeletSysconfig,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
kubeletDeps = append(kubeletDeps, kubeletSysconfig)
} else {
kubeletSysconfig := "configure:kubelet-sysconfig"
kubeletDeps = append(kubeletDeps, kubeletSysconfig)
b.AddResource(
kubeletSysconfig,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
}
case resource.PkgTypeDeb:
if disableSwap {
swapDisable := "configure:kubernetes-swap-disable"
kubeletDeps = append(kubeletDeps, swapDisable)
b.AddResource(
swapDisable,
buildDisableSwapPlan(),
plan.DependOn("create-dir:kubelet.service.d"))
kubeletDefault := "configure:kubelet-default"
kubeletDeps = append(kubeletDeps, kubeletDefault)
b.AddResource(
kubeletDefault,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
} else {
kubeletDefault := "configure:kubelet-default"
kubeletDeps = append(kubeletDeps, kubeletDefault)
b.AddResource(
kubeletDefault,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
}
}
b.AddResource(
"systemd:daemon-reload",
&resource.Run{Script: object.String("systemctl daemon-reload")},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
b.AddResource(
"service-init:kubelet",
&resource.Service{Name: "kubelet", Status: "active", Enabled: true},
plan.DependOn("systemd:daemon-reload", kubeletDeps...))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildDisableSwapPlan turns off swap and removes swap entries from /etc/fstab so swap will remain disabled on reboot
func buildDisableSwapPlan() plan.Resource {
b := plan.NewBuilder()
b.AddResource("configure:disable-swap-in-session", &resource.Run{Script: object.String("/sbin/swapoff -a")})
b.AddResource(
"configure:disable-swap-going-forward",
&resource.Run{Script: object.String(
// The ";" instead of "&&" below is because we want to copy the empty temp file over /etc/fstab if /etc/fstab only contains swap entries
// and the "egrep" will fail on an empty file
`tmpfile=$(mktemp /tmp/disable-swap.XXXXXX) && egrep -v '\s*\S*\s*\S*\s*swap.*' /etc/fstab > $tmpfile; mv $tmpfile /etc/fstab`)},
plan.DependOn("configure:disable-swap-in-session"))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildKubeadmPrejoinPlan creates a sub-plan to prepare for running
// kubeadm join.
func BuildKubeadmPrejoinPlan(useIPTables bool) plan.Resource {
b := plan.NewBuilder()
if useIPTables {
b.AddResource(
"configure:net.bridge",
&resource.Run{Script: object.String("sysctl net.bridge.bridge-nf-call-iptables=1")},
)
}
b.AddResource(
"configure:kubeadm-force-reset",
&resource.Run{Script: object.String("kubeadm reset --force")},
)
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildSealedSecretPlan creates a sub-plan to install sealed secrets so we can check secrets into GitHub for GitOps
func BuildSealedSecretPlan(sealedSecretVersion, crdManifest, keyManifest, controllerManifest []byte) plan.Resource {
b := plan.NewBuilder()
b.AddResource("install:sealed-secret-crd",
&resource.KubectlApply{Manifest: crdManifest, Filename: object.String("SealedSecretCRD.yaml"),
WaitCondition: "condition=Established"})
b.AddResource("install:sealed-secrets-key", &resource.KubectlApply{Manifest: keyManifest})
b.AddResource("install:sealed-secrets-controller",
&resource.KubectlApply{Manifest: controllerManifest, Filename: object.String("SealedSecretController.yaml")},
plan.DependOn("install:sealed-secrets-key"))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
| BinInstaller | identifier_name |
install_plans.go | package recipe
import (
"context"
"fmt"
"sort"
log "github.com/sirupsen/logrus"
existinginfrav1 "github.com/weaveworks/cluster-api-provider-existinginfra/apis/cluster.weave.works/v1alpha3"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/flavors/eksd"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/resource"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/envcfg"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/object"
)
const (
// PlanKey for storing plans as annotations on Nodes
PlanKey string = "wks.weave.works/node-plan"
)
// BuildBasePlan creates a plan for installing the base building blocks for the node
func BuildBasePlan(pkgType resource.PkgType) plan.Resource {
b := plan.NewBuilder()
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
// Package manager features
b.AddResource("install:yum-utils", &resource.RPM{Name: "yum-utils"})
b.AddResource("install:yum-versionlock", &resource.RPM{Name: "yum-plugin-versionlock"})
// Device Mapper
b.AddResource("install:device-mapper-persistent-data", &resource.RPM{Name: "device-mapper-persistent-data"})
b.AddResource("install:lvm2", &resource.RPM{Name: "lvm2"})
case resource.PkgTypeDeb:
// Package manager features
b.AddResource("install:gnupg", &resource.Deb{Name: "gnupg"})
// TODO(michal): Enable locking
// Device Mapper
b.AddResource("install:thin-provisioning-tools", &resource.Deb{Name: "thin-provisioning-tools"})
b.AddResource("install:lvm2", &resource.Deb{Name: "lvm2"})
}
p, err := b.Plan()
p.SetUndoCondition(func(_ plan.Runner, _ plan.State) bool { return false })
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildConfigPlan creates a plan for handling the configuration files
func BuildConfigPlan(files []*resource.File) plan.Resource {
b := plan.NewBuilder()
for idx, file := range files {
b.AddResource(fmt.Sprintf("install:config-file-%d", idx), file)
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildConfigMapPlan creates a plan to handle config maps
func BuildConfigMapPlan(manifests map[string][]byte, namespace string) plan.Resource {
b := plan.NewBuilder()
for name, manifest := range manifests {
remoteName := fmt.Sprintf("config-map-%s", name)
b.AddResource("install:"+remoteName, &resource.KubectlApply{Filename: object.String(remoteName), Manifest: manifest, Namespace: object.String(namespace)})
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildCNIPlan creates a sub-plan to install the CNI plugin.
func BuildCNIPlan(cni string, manifests [][]byte) plan.Resource |
// BuildCRIPlan creates a plan for installing a CRI. Currently, Docker is the only supported CRI
func BuildCRIPlan(ctx context.Context, criSpec *existinginfrav1.ContainerRuntime, cfg *envcfg.EnvSpecificConfig, pkgType resource.PkgType) plan.Resource {
b := plan.NewBuilder()
if criSpec.Kind != "docker" {
log.Fatalf("Unknown CRI - %s", criSpec.Kind)
}
IsDockerOnCentOS := false
// Docker runtime
switch pkgType {
case resource.PkgTypeRHEL:
b.AddResource("install:container-selinux",
&resource.Run{
Script: object.String("yum install -y http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm || true"),
UndoScript: object.String("yum remove -y container-selinux || true")})
b.AddResource("install:docker",
&resource.RPM{Name: criSpec.Package, Version: criSpec.Version},
plan.DependOn("install:container-selinux"))
// SELinux will be here along with docker and containerd-selinux packages
IsDockerOnCentOS = true
case resource.PkgTypeRPM:
b.AddResource("install:docker",
&resource.RPM{Name: criSpec.Package, Version: criSpec.Version})
// SELinux will be here along with docker and containerd-selinux packages
IsDockerOnCentOS = true
case resource.PkgTypeDeb:
// TODO(michal): Use the official docker.com repo
b.AddResource("install:docker", &resource.Deb{Name: "docker.io"})
}
if cfg.LockYUMPkgs {
b.AddResource(
"lock-package:docker",
&resource.Run{
Script: object.String("yum versionlock add docker-ce"),
// If we never installed yum-plugin-versionlock or docker, this should not fail
UndoScript: object.String("yum versionlock delete docker-ce || true")},
plan.DependOn("install:docker"))
}
// this is a special case: if SELinux is not there on RH, CentOS Linux family
// installing Docker will also installing SELinux
// then we set SELinux mode to be permissive right after the docker installation step
if IsDockerOnCentOS && cfg.SetSELinuxPermissive {
b.AddResource(
"selinux:permissive",
&resource.Run{
Script: object.String("setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config"),
// sometime, SELinux not installed yet so || true to ignore the error
UndoScript: object.String("setenforce 1 && sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config || true"),
},
plan.DependOn("install:docker"))
}
b.AddResource(
"systemd:daemon-reload",
&resource.Run{Script: object.String("systemctl daemon-reload")},
plan.DependOn("install:docker"),
)
b.AddResource(
"service-init:docker-service",
&resource.Service{Name: "docker", Status: "active", Enabled: true},
plan.DependOn("systemd:daemon-reload"))
p, err := b.Plan()
p.SetUndoCondition(func(r plan.Runner, _ plan.State) bool {
type AwareChanger interface {
WouldChangeState(ctx context.Context, r plan.Runner) (bool, error)
}
chg, err := p.GetResource("install:docker").(AwareChanger).WouldChangeState(ctx, r)
return chg || (err != nil)
})
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BinInstaller creates a function to install binaries based on package type and cluster flavors
func BinInstaller(pkgType resource.PkgType, f *eksd.EKSD) (func(string, string) plan.Resource, error) {
if f != nil {
log.Debugf("Using flavor %+v", f)
return func(binName, version string) plan.Resource {
// TODO (Mark) logic for the architecture
binURL, sha256, err := f.KubeBinURL(binName)
if err != nil {
log.Fatalf("%v", err)
return nil
}
binPath := "/usr/bin/" + binName
return &resource.Run{
Script: object.String(fmt.Sprintf("curl -o %s %s && openssl dgst -sha256 %s | grep \"%s\" > /dev/null && chmod 755 %s", binPath, binURL, binPath, sha256, binPath)),
UndoScript: object.String(fmt.Sprintf("pkill --uid 0 %s && rm %s || true", binName, binPath))}
}, nil
}
if pkgType == resource.PkgTypeDeb {
return func(binName, version string) plan.Resource {
return &resource.Deb{Name: binName, Suffix: "=" + version + "-00"}
}, nil
}
return func(binName, version string) plan.Resource {
return &resource.RPM{Name: binName, Version: version, DisableExcludes: "kubernetes"}
}, nil
}
// BuildK8SPlan creates a plan for running kubernetes on a node
func BuildK8SPlan(kubernetesVersion string, kubeletNodeIP string, seLinuxInstalled, setSELinuxPermissive, disableSwap, lockYUMPkgs bool, pkgType resource.PkgType, cloudProvider string, extraArgs map[string]string, binInstaller func(string, string) plan.Resource, flavor *eksd.EKSD) plan.Resource {
b := plan.NewBuilder()
// Kubernetes repos
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
// do nothing
case resource.PkgTypeDeb:
// XXX: Workaround for https://github.com/weaveworks/wksctl/issues/654 : *.gpg is a binary format, and currently wks is unable to handle
// binary files in the configuration configmap. Therefore, I needed to supply the *.gpg contents base64-encoded.
// In a world without that bug, one could just use the "!!binary"" YAML format in the configmap and store the *.gpg there directly.
b.AddResource("configure:kubernetes-repo-key", &resource.Run{
Script: object.String("base64 -d /tmp/cloud-google-com.gpg.b64 > /etc/apt/trusted.gpg.d/cloud-google-com.gpg"),
})
repoLine := "deb https://apt.kubernetes.io/ kubernetes-xenial main"
repoFile := "/etc/apt/sources.list.d/wks-google.list"
sedExpr := fmt.Sprintf(`\!%s!d`, repoLine) // same as '/%s/d' but allows '/' in %s
b.AddResource("configure:kubernetes-repo", &resource.Run{
Script: object.String(fmt.Sprintf("echo %q | tee -a %q", repoLine, repoFile)),
UndoScript: object.String(fmt.Sprintf(`test ! -f %q || sed -i '%s' %q`, repoFile, sedExpr, repoFile)),
}, plan.DependOn("configure:kubernetes-repo-key"))
}
// If SELinux is already installed and we need to set SELinux to permissive mode, do it
if seLinuxInstalled && setSELinuxPermissive {
b.AddResource(
"selinux:permissive",
&resource.Run{
Script: object.String("setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config"),
UndoScript: object.String("setenforce 1 && sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config || true"),
})
}
// Install k8s packages
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
if flavor != nil {
b.AddResource("install:kubelet-package", &resource.RPM{Name: "kubelet", Version: kubernetesVersion, DisableExcludes: "kubernetes"})
b.AddResource(
"cleanup:kubelet",
&resource.Run{Script: object.String("pkill kubelet | true")},
plan.DependOn("install:kubelet-package"))
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("cleanup:kubelet"))
} else {
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion))
}
b.AddResource("install:kubectl", binInstaller("kubectl", kubernetesVersion))
b.AddResource("install:kubeadm",
binInstaller("kubeadm", kubernetesVersion),
plan.DependOn("install:kubectl"),
plan.DependOn("install:kubelet"),
)
case resource.PkgTypeDeb:
// TODO(michal): Install the newest release version by default instead of hardcoding "-00".
if flavor != nil {
b.AddResource("install:kubelet-package", &resource.Deb{Name: "kubelet", Suffix: "=" + kubernetesVersion + "-00"},
plan.DependOn("configure:kubernetes-repo"))
b.AddResource(
"cleanup:kubelet",
&resource.Run{Script: object.String("pkill kubelet | true")},
plan.DependOn("install:kubelet-package"))
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("cleanup:kubelet"))
} else {
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"))
}
b.AddResource("install:kubeadm", binInstaller("kubeadm", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"), plan.DependOn("install:kubelet"))
b.AddResource("install:kubectl", binInstaller("kubectl", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"))
}
if lockYUMPkgs {
b.AddResource(
"lock-package:kubernetes",
&resource.Run{
Script: object.String("yum versionlock add 'kube*'"),
// If we never installed yum-plugin-versionlock or kubernetes, this should not fail
UndoScript: object.String("yum versionlock delete 'kube*' || true")},
plan.DependOn("install:kubectl"),
)
}
b.AddResource(
"create-dir:kubelet.service.d",
&resource.Dir{Path: object.String("/etc/systemd/system/kubelet.service.d")},
)
b.AddResource(
"install:kubeadm-conf",
&resource.File{Content: `# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS`,
Destination: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"},
plan.DependOn("create-dir:kubelet.service.d"))
kubeletDeps := []string{"install:kubeadm-conf"}
processCloudProvider := func(cmdline string) string {
if cloudProvider != "" {
log.WithField("cloudProvider", cloudProvider).Debug("using cloud provider")
return fmt.Sprintf("%s --cloud-provider=%s\n", cmdline, cloudProvider)
}
return cmdline + "\n"
}
processAdditionalArgs := func(cmdline string) string {
result := cmdline
strs := []string{}
for name, value := range extraArgs {
strs = append(strs, fmt.Sprintf("--%s='%s'", name, value))
}
sort.Strings(strs)
for _, str := range strs {
result = fmt.Sprintf("%s %s", result, str)
}
return processCloudProvider(result)
}
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
if disableSwap {
swapDisable := "configure:kubernetes-swap-disable"
kubeletDeps = append(kubeletDeps, swapDisable)
b.AddResource(
swapDisable,
buildDisableSwapPlan(),
plan.DependOn("create-dir:kubelet.service.d"))
kubeletSysconfig := "configure:kubelet-sysconfig"
b.AddResource(
kubeletSysconfig,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
kubeletDeps = append(kubeletDeps, kubeletSysconfig)
} else {
kubeletSysconfig := "configure:kubelet-sysconfig"
kubeletDeps = append(kubeletDeps, kubeletSysconfig)
b.AddResource(
kubeletSysconfig,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
}
case resource.PkgTypeDeb:
if disableSwap {
swapDisable := "configure:kubernetes-swap-disable"
kubeletDeps = append(kubeletDeps, swapDisable)
b.AddResource(
swapDisable,
buildDisableSwapPlan(),
plan.DependOn("create-dir:kubelet.service.d"))
kubeletDefault := "configure:kubelet-default"
kubeletDeps = append(kubeletDeps, kubeletDefault)
b.AddResource(
kubeletDefault,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
} else {
kubeletDefault := "configure:kubelet-default"
kubeletDeps = append(kubeletDeps, kubeletDefault)
b.AddResource(
kubeletDefault,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
}
}
b.AddResource(
"systemd:daemon-reload",
&resource.Run{Script: object.String("systemctl daemon-reload")},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
b.AddResource(
"service-init:kubelet",
&resource.Service{Name: "kubelet", Status: "active", Enabled: true},
plan.DependOn("systemd:daemon-reload", kubeletDeps...))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildDisableSwapPlan turns off swap and removes swap entries from /etc/fstab so swap will remain disabled on reboot
func buildDisableSwapPlan() plan.Resource {
b := plan.NewBuilder()
b.AddResource("configure:disable-swap-in-session", &resource.Run{Script: object.String("/sbin/swapoff -a")})
b.AddResource(
"configure:disable-swap-going-forward",
&resource.Run{Script: object.String(
// The ";" instead of "&&" below is because we want to copy the empty temp file over /etc/fstab if /etc/fstab only contains swap entries
// and the "egrep" will fail on an empty file
`tmpfile=$(mktemp /tmp/disable-swap.XXXXXX) && egrep -v '\s*\S*\s*\S*\s*swap.*' /etc/fstab > $tmpfile; mv $tmpfile /etc/fstab`)},
plan.DependOn("configure:disable-swap-in-session"))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildKubeadmPrejoinPlan creates a sub-plan to prepare for running
// kubeadm join.
func BuildKubeadmPrejoinPlan(useIPTables bool) plan.Resource {
b := plan.NewBuilder()
if useIPTables {
b.AddResource(
"configure:net.bridge",
&resource.Run{Script: object.String("sysctl net.bridge.bridge-nf-call-iptables=1")},
)
}
b.AddResource(
"configure:kubeadm-force-reset",
&resource.Run{Script: object.String("kubeadm reset --force")},
)
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildSealedSecretPlan creates a sub-plan to install sealed secrets so we can check secrets into GitHub for GitOps
func BuildSealedSecretPlan(sealedSecretVersion, crdManifest, keyManifest, controllerManifest []byte) plan.Resource {
b := plan.NewBuilder()
b.AddResource("install:sealed-secret-crd",
&resource.KubectlApply{Manifest: crdManifest, Filename: object.String("SealedSecretCRD.yaml"),
WaitCondition: "condition=Established"})
b.AddResource("install:sealed-secrets-key", &resource.KubectlApply{Manifest: keyManifest})
b.AddResource("install:sealed-secrets-controller",
&resource.KubectlApply{Manifest: controllerManifest, Filename: object.String("SealedSecretController.yaml")},
plan.DependOn("install:sealed-secrets-key"))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
| {
b := plan.NewBuilder()
b.AddResource(
"install-cni:apply-manifests",
&resource.KubectlApply{Manifest: manifests[0], Filename: object.String(cni + ".yaml")},
)
if len(manifests) == 2 {
b.AddResource(
"install-cni:apply-manifests-ds",
&resource.KubectlApply{Manifest: manifests[1], Filename: object.String(cni + "-daemon-set" + ".yaml")},
plan.DependOn("install-cni:apply-manifests"))
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
} | identifier_body |
install_plans.go | package recipe
import (
"context"
"fmt"
"sort"
log "github.com/sirupsen/logrus"
existinginfrav1 "github.com/weaveworks/cluster-api-provider-existinginfra/apis/cluster.weave.works/v1alpha3"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/flavors/eksd"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/resource"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/envcfg"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/object"
)
const (
// PlanKey for storing plans as annotations on Nodes
PlanKey string = "wks.weave.works/node-plan"
)
// BuildBasePlan creates a plan for installing the base building blocks for the node
func BuildBasePlan(pkgType resource.PkgType) plan.Resource {
b := plan.NewBuilder()
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
// Package manager features
b.AddResource("install:yum-utils", &resource.RPM{Name: "yum-utils"})
b.AddResource("install:yum-versionlock", &resource.RPM{Name: "yum-plugin-versionlock"})
// Device Mapper
b.AddResource("install:device-mapper-persistent-data", &resource.RPM{Name: "device-mapper-persistent-data"})
b.AddResource("install:lvm2", &resource.RPM{Name: "lvm2"})
case resource.PkgTypeDeb:
// Package manager features
b.AddResource("install:gnupg", &resource.Deb{Name: "gnupg"})
// TODO(michal): Enable locking
// Device Mapper
b.AddResource("install:thin-provisioning-tools", &resource.Deb{Name: "thin-provisioning-tools"})
b.AddResource("install:lvm2", &resource.Deb{Name: "lvm2"})
}
p, err := b.Plan()
p.SetUndoCondition(func(_ plan.Runner, _ plan.State) bool { return false })
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildConfigPlan creates a plan for handling the configuration files
func BuildConfigPlan(files []*resource.File) plan.Resource {
b := plan.NewBuilder()
for idx, file := range files {
b.AddResource(fmt.Sprintf("install:config-file-%d", idx), file)
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildConfigMapPlan creates a plan to handle config maps
func BuildConfigMapPlan(manifests map[string][]byte, namespace string) plan.Resource {
b := plan.NewBuilder()
for name, manifest := range manifests {
remoteName := fmt.Sprintf("config-map-%s", name)
b.AddResource("install:"+remoteName, &resource.KubectlApply{Filename: object.String(remoteName), Manifest: manifest, Namespace: object.String(namespace)})
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildCNIPlan creates a sub-plan to install the CNI plugin.
func BuildCNIPlan(cni string, manifests [][]byte) plan.Resource {
b := plan.NewBuilder()
b.AddResource(
"install-cni:apply-manifests",
&resource.KubectlApply{Manifest: manifests[0], Filename: object.String(cni + ".yaml")},
)
if len(manifests) == 2 {
b.AddResource(
"install-cni:apply-manifests-ds",
&resource.KubectlApply{Manifest: manifests[1], Filename: object.String(cni + "-daemon-set" + ".yaml")},
plan.DependOn("install-cni:apply-manifests"))
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildCRIPlan creates a plan for installing a CRI. Currently, Docker is the only supported CRI
func BuildCRIPlan(ctx context.Context, criSpec *existinginfrav1.ContainerRuntime, cfg *envcfg.EnvSpecificConfig, pkgType resource.PkgType) plan.Resource {
b := plan.NewBuilder()
if criSpec.Kind != "docker" |
IsDockerOnCentOS := false
// Docker runtime
switch pkgType {
case resource.PkgTypeRHEL:
b.AddResource("install:container-selinux",
&resource.Run{
Script: object.String("yum install -y http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm || true"),
UndoScript: object.String("yum remove -y container-selinux || true")})
b.AddResource("install:docker",
&resource.RPM{Name: criSpec.Package, Version: criSpec.Version},
plan.DependOn("install:container-selinux"))
// SELinux will be here along with docker and containerd-selinux packages
IsDockerOnCentOS = true
case resource.PkgTypeRPM:
b.AddResource("install:docker",
&resource.RPM{Name: criSpec.Package, Version: criSpec.Version})
// SELinux will be here along with docker and containerd-selinux packages
IsDockerOnCentOS = true
case resource.PkgTypeDeb:
// TODO(michal): Use the official docker.com repo
b.AddResource("install:docker", &resource.Deb{Name: "docker.io"})
}
if cfg.LockYUMPkgs {
b.AddResource(
"lock-package:docker",
&resource.Run{
Script: object.String("yum versionlock add docker-ce"),
// If we never installed yum-plugin-versionlock or docker, this should not fail
UndoScript: object.String("yum versionlock delete docker-ce || true")},
plan.DependOn("install:docker"))
}
// this is a special case: if SELinux is not there on RH, CentOS Linux family
// installing Docker will also installing SELinux
// then we set SELinux mode to be permissive right after the docker installation step
if IsDockerOnCentOS && cfg.SetSELinuxPermissive {
b.AddResource(
"selinux:permissive",
&resource.Run{
Script: object.String("setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config"),
// sometime, SELinux not installed yet so || true to ignore the error
UndoScript: object.String("setenforce 1 && sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config || true"),
},
plan.DependOn("install:docker"))
}
b.AddResource(
"systemd:daemon-reload",
&resource.Run{Script: object.String("systemctl daemon-reload")},
plan.DependOn("install:docker"),
)
b.AddResource(
"service-init:docker-service",
&resource.Service{Name: "docker", Status: "active", Enabled: true},
plan.DependOn("systemd:daemon-reload"))
p, err := b.Plan()
p.SetUndoCondition(func(r plan.Runner, _ plan.State) bool {
type AwareChanger interface {
WouldChangeState(ctx context.Context, r plan.Runner) (bool, error)
}
chg, err := p.GetResource("install:docker").(AwareChanger).WouldChangeState(ctx, r)
return chg || (err != nil)
})
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BinInstaller creates a function to install binaries based on package type and cluster flavors
func BinInstaller(pkgType resource.PkgType, f *eksd.EKSD) (func(string, string) plan.Resource, error) {
if f != nil {
log.Debugf("Using flavor %+v", f)
return func(binName, version string) plan.Resource {
// TODO (Mark) logic for the architecture
binURL, sha256, err := f.KubeBinURL(binName)
if err != nil {
log.Fatalf("%v", err)
return nil
}
binPath := "/usr/bin/" + binName
return &resource.Run{
Script: object.String(fmt.Sprintf("curl -o %s %s && openssl dgst -sha256 %s | grep \"%s\" > /dev/null && chmod 755 %s", binPath, binURL, binPath, sha256, binPath)),
UndoScript: object.String(fmt.Sprintf("pkill --uid 0 %s && rm %s || true", binName, binPath))}
}, nil
}
if pkgType == resource.PkgTypeDeb {
return func(binName, version string) plan.Resource {
return &resource.Deb{Name: binName, Suffix: "=" + version + "-00"}
}, nil
}
return func(binName, version string) plan.Resource {
return &resource.RPM{Name: binName, Version: version, DisableExcludes: "kubernetes"}
}, nil
}
// BuildK8SPlan creates a plan for running kubernetes on a node
func BuildK8SPlan(kubernetesVersion string, kubeletNodeIP string, seLinuxInstalled, setSELinuxPermissive, disableSwap, lockYUMPkgs bool, pkgType resource.PkgType, cloudProvider string, extraArgs map[string]string, binInstaller func(string, string) plan.Resource, flavor *eksd.EKSD) plan.Resource {
b := plan.NewBuilder()
// Kubernetes repos
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
// do nothing
case resource.PkgTypeDeb:
// XXX: Workaround for https://github.com/weaveworks/wksctl/issues/654 : *.gpg is a binary format, and currently wks is unable to handle
// binary files in the configuration configmap. Therefore, I needed to supply the *.gpg contents base64-encoded.
// In a world without that bug, one could just use the "!!binary"" YAML format in the configmap and store the *.gpg there directly.
b.AddResource("configure:kubernetes-repo-key", &resource.Run{
Script: object.String("base64 -d /tmp/cloud-google-com.gpg.b64 > /etc/apt/trusted.gpg.d/cloud-google-com.gpg"),
})
repoLine := "deb https://apt.kubernetes.io/ kubernetes-xenial main"
repoFile := "/etc/apt/sources.list.d/wks-google.list"
sedExpr := fmt.Sprintf(`\!%s!d`, repoLine) // same as '/%s/d' but allows '/' in %s
b.AddResource("configure:kubernetes-repo", &resource.Run{
Script: object.String(fmt.Sprintf("echo %q | tee -a %q", repoLine, repoFile)),
UndoScript: object.String(fmt.Sprintf(`test ! -f %q || sed -i '%s' %q`, repoFile, sedExpr, repoFile)),
}, plan.DependOn("configure:kubernetes-repo-key"))
}
// If SELinux is already installed and we need to set SELinux to permissive mode, do it
if seLinuxInstalled && setSELinuxPermissive {
b.AddResource(
"selinux:permissive",
&resource.Run{
Script: object.String("setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config"),
UndoScript: object.String("setenforce 1 && sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config || true"),
})
}
// Install k8s packages
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
if flavor != nil {
b.AddResource("install:kubelet-package", &resource.RPM{Name: "kubelet", Version: kubernetesVersion, DisableExcludes: "kubernetes"})
b.AddResource(
"cleanup:kubelet",
&resource.Run{Script: object.String("pkill kubelet | true")},
plan.DependOn("install:kubelet-package"))
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("cleanup:kubelet"))
} else {
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion))
}
b.AddResource("install:kubectl", binInstaller("kubectl", kubernetesVersion))
b.AddResource("install:kubeadm",
binInstaller("kubeadm", kubernetesVersion),
plan.DependOn("install:kubectl"),
plan.DependOn("install:kubelet"),
)
case resource.PkgTypeDeb:
// TODO(michal): Install the newest release version by default instead of hardcoding "-00".
if flavor != nil {
b.AddResource("install:kubelet-package", &resource.Deb{Name: "kubelet", Suffix: "=" + kubernetesVersion + "-00"},
plan.DependOn("configure:kubernetes-repo"))
b.AddResource(
"cleanup:kubelet",
&resource.Run{Script: object.String("pkill kubelet | true")},
plan.DependOn("install:kubelet-package"))
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("cleanup:kubelet"))
} else {
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"))
}
b.AddResource("install:kubeadm", binInstaller("kubeadm", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"), plan.DependOn("install:kubelet"))
b.AddResource("install:kubectl", binInstaller("kubectl", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"))
}
if lockYUMPkgs {
b.AddResource(
"lock-package:kubernetes",
&resource.Run{
Script: object.String("yum versionlock add 'kube*'"),
// If we never installed yum-plugin-versionlock or kubernetes, this should not fail
UndoScript: object.String("yum versionlock delete 'kube*' || true")},
plan.DependOn("install:kubectl"),
)
}
b.AddResource(
"create-dir:kubelet.service.d",
&resource.Dir{Path: object.String("/etc/systemd/system/kubelet.service.d")},
)
b.AddResource(
"install:kubeadm-conf",
&resource.File{Content: `# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS`,
Destination: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"},
plan.DependOn("create-dir:kubelet.service.d"))
kubeletDeps := []string{"install:kubeadm-conf"}
processCloudProvider := func(cmdline string) string {
if cloudProvider != "" {
log.WithField("cloudProvider", cloudProvider).Debug("using cloud provider")
return fmt.Sprintf("%s --cloud-provider=%s\n", cmdline, cloudProvider)
}
return cmdline + "\n"
}
processAdditionalArgs := func(cmdline string) string {
result := cmdline
strs := []string{}
for name, value := range extraArgs {
strs = append(strs, fmt.Sprintf("--%s='%s'", name, value))
}
sort.Strings(strs)
for _, str := range strs {
result = fmt.Sprintf("%s %s", result, str)
}
return processCloudProvider(result)
}
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
if disableSwap {
swapDisable := "configure:kubernetes-swap-disable"
kubeletDeps = append(kubeletDeps, swapDisable)
b.AddResource(
swapDisable,
buildDisableSwapPlan(),
plan.DependOn("create-dir:kubelet.service.d"))
kubeletSysconfig := "configure:kubelet-sysconfig"
b.AddResource(
kubeletSysconfig,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
kubeletDeps = append(kubeletDeps, kubeletSysconfig)
} else {
kubeletSysconfig := "configure:kubelet-sysconfig"
kubeletDeps = append(kubeletDeps, kubeletSysconfig)
b.AddResource(
kubeletSysconfig,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
}
case resource.PkgTypeDeb:
if disableSwap {
swapDisable := "configure:kubernetes-swap-disable"
kubeletDeps = append(kubeletDeps, swapDisable)
b.AddResource(
swapDisable,
buildDisableSwapPlan(),
plan.DependOn("create-dir:kubelet.service.d"))
kubeletDefault := "configure:kubelet-default"
kubeletDeps = append(kubeletDeps, kubeletDefault)
b.AddResource(
kubeletDefault,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
} else {
kubeletDefault := "configure:kubelet-default"
kubeletDeps = append(kubeletDeps, kubeletDefault)
b.AddResource(
kubeletDefault,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
}
}
b.AddResource(
"systemd:daemon-reload",
&resource.Run{Script: object.String("systemctl daemon-reload")},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
b.AddResource(
"service-init:kubelet",
&resource.Service{Name: "kubelet", Status: "active", Enabled: true},
plan.DependOn("systemd:daemon-reload", kubeletDeps...))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildDisableSwapPlan turns off swap and removes swap entries from /etc/fstab so swap will remain disabled on reboot
func buildDisableSwapPlan() plan.Resource {
b := plan.NewBuilder()
b.AddResource("configure:disable-swap-in-session", &resource.Run{Script: object.String("/sbin/swapoff -a")})
b.AddResource(
"configure:disable-swap-going-forward",
&resource.Run{Script: object.String(
// The ";" instead of "&&" below is because we want to copy the empty temp file over /etc/fstab if /etc/fstab only contains swap entries
// and the "egrep" will fail on an empty file
`tmpfile=$(mktemp /tmp/disable-swap.XXXXXX) && egrep -v '\s*\S*\s*\S*\s*swap.*' /etc/fstab > $tmpfile; mv $tmpfile /etc/fstab`)},
plan.DependOn("configure:disable-swap-in-session"))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildKubeadmPrejoinPlan creates a sub-plan to prepare for running
// kubeadm join.
func BuildKubeadmPrejoinPlan(useIPTables bool) plan.Resource {
b := plan.NewBuilder()
if useIPTables {
b.AddResource(
"configure:net.bridge",
&resource.Run{Script: object.String("sysctl net.bridge.bridge-nf-call-iptables=1")},
)
}
b.AddResource(
"configure:kubeadm-force-reset",
&resource.Run{Script: object.String("kubeadm reset --force")},
)
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildSealedSecretPlan creates a sub-plan to install sealed secrets so we can check secrets into GitHub for GitOps
func BuildSealedSecretPlan(sealedSecretVersion, crdManifest, keyManifest, controllerManifest []byte) plan.Resource {
b := plan.NewBuilder()
b.AddResource("install:sealed-secret-crd",
&resource.KubectlApply{Manifest: crdManifest, Filename: object.String("SealedSecretCRD.yaml"),
WaitCondition: "condition=Established"})
b.AddResource("install:sealed-secrets-key", &resource.KubectlApply{Manifest: keyManifest})
b.AddResource("install:sealed-secrets-controller",
&resource.KubectlApply{Manifest: controllerManifest, Filename: object.String("SealedSecretController.yaml")},
plan.DependOn("install:sealed-secrets-key"))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
| {
log.Fatalf("Unknown CRI - %s", criSpec.Kind)
} | conditional_block |
install_plans.go | package recipe
import (
"context"
"fmt"
"sort"
log "github.com/sirupsen/logrus"
existinginfrav1 "github.com/weaveworks/cluster-api-provider-existinginfra/apis/cluster.weave.works/v1alpha3"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/flavors/eksd"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/resource"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/envcfg"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/object"
)
const (
// PlanKey for storing plans as annotations on Nodes
PlanKey string = "wks.weave.works/node-plan"
)
// BuildBasePlan creates a plan for installing the base building blocks for the node
func BuildBasePlan(pkgType resource.PkgType) plan.Resource {
b := plan.NewBuilder()
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
// Package manager features
b.AddResource("install:yum-utils", &resource.RPM{Name: "yum-utils"})
b.AddResource("install:yum-versionlock", &resource.RPM{Name: "yum-plugin-versionlock"})
// Device Mapper
b.AddResource("install:device-mapper-persistent-data", &resource.RPM{Name: "device-mapper-persistent-data"})
b.AddResource("install:lvm2", &resource.RPM{Name: "lvm2"})
case resource.PkgTypeDeb:
// Package manager features
b.AddResource("install:gnupg", &resource.Deb{Name: "gnupg"})
// TODO(michal): Enable locking
// Device Mapper
b.AddResource("install:thin-provisioning-tools", &resource.Deb{Name: "thin-provisioning-tools"})
b.AddResource("install:lvm2", &resource.Deb{Name: "lvm2"})
}
p, err := b.Plan()
p.SetUndoCondition(func(_ plan.Runner, _ plan.State) bool { return false })
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildConfigPlan creates a plan for handling the configuration files
func BuildConfigPlan(files []*resource.File) plan.Resource {
b := plan.NewBuilder()
for idx, file := range files {
b.AddResource(fmt.Sprintf("install:config-file-%d", idx), file)
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildConfigMapPlan creates a plan to handle config maps
func BuildConfigMapPlan(manifests map[string][]byte, namespace string) plan.Resource {
b := plan.NewBuilder()
for name, manifest := range manifests {
remoteName := fmt.Sprintf("config-map-%s", name)
b.AddResource("install:"+remoteName, &resource.KubectlApply{Filename: object.String(remoteName), Manifest: manifest, Namespace: object.String(namespace)})
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildCNIPlan creates a sub-plan to install the CNI plugin.
func BuildCNIPlan(cni string, manifests [][]byte) plan.Resource {
b := plan.NewBuilder()
b.AddResource(
"install-cni:apply-manifests",
&resource.KubectlApply{Manifest: manifests[0], Filename: object.String(cni + ".yaml")},
)
if len(manifests) == 2 {
b.AddResource(
"install-cni:apply-manifests-ds",
&resource.KubectlApply{Manifest: manifests[1], Filename: object.String(cni + "-daemon-set" + ".yaml")},
plan.DependOn("install-cni:apply-manifests"))
}
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildCRIPlan creates a plan for installing a CRI. Currently, Docker is the only supported CRI
func BuildCRIPlan(ctx context.Context, criSpec *existinginfrav1.ContainerRuntime, cfg *envcfg.EnvSpecificConfig, pkgType resource.PkgType) plan.Resource {
b := plan.NewBuilder()
if criSpec.Kind != "docker" {
log.Fatalf("Unknown CRI - %s", criSpec.Kind)
}
IsDockerOnCentOS := false | &resource.Run{
Script: object.String("yum install -y http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm || true"),
UndoScript: object.String("yum remove -y container-selinux || true")})
b.AddResource("install:docker",
&resource.RPM{Name: criSpec.Package, Version: criSpec.Version},
plan.DependOn("install:container-selinux"))
// SELinux will be here along with docker and containerd-selinux packages
IsDockerOnCentOS = true
case resource.PkgTypeRPM:
b.AddResource("install:docker",
&resource.RPM{Name: criSpec.Package, Version: criSpec.Version})
// SELinux will be here along with docker and containerd-selinux packages
IsDockerOnCentOS = true
case resource.PkgTypeDeb:
// TODO(michal): Use the official docker.com repo
b.AddResource("install:docker", &resource.Deb{Name: "docker.io"})
}
if cfg.LockYUMPkgs {
b.AddResource(
"lock-package:docker",
&resource.Run{
Script: object.String("yum versionlock add docker-ce"),
// If we never installed yum-plugin-versionlock or docker, this should not fail
UndoScript: object.String("yum versionlock delete docker-ce || true")},
plan.DependOn("install:docker"))
}
// this is a special case: if SELinux is not there on RH, CentOS Linux family
// installing Docker will also installing SELinux
// then we set SELinux mode to be permissive right after the docker installation step
if IsDockerOnCentOS && cfg.SetSELinuxPermissive {
b.AddResource(
"selinux:permissive",
&resource.Run{
Script: object.String("setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config"),
// sometime, SELinux not installed yet so || true to ignore the error
UndoScript: object.String("setenforce 1 && sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config || true"),
},
plan.DependOn("install:docker"))
}
b.AddResource(
"systemd:daemon-reload",
&resource.Run{Script: object.String("systemctl daemon-reload")},
plan.DependOn("install:docker"),
)
b.AddResource(
"service-init:docker-service",
&resource.Service{Name: "docker", Status: "active", Enabled: true},
plan.DependOn("systemd:daemon-reload"))
p, err := b.Plan()
p.SetUndoCondition(func(r plan.Runner, _ plan.State) bool {
type AwareChanger interface {
WouldChangeState(ctx context.Context, r plan.Runner) (bool, error)
}
chg, err := p.GetResource("install:docker").(AwareChanger).WouldChangeState(ctx, r)
return chg || (err != nil)
})
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BinInstaller creates a function to install binaries based on package type and cluster flavors
func BinInstaller(pkgType resource.PkgType, f *eksd.EKSD) (func(string, string) plan.Resource, error) {
if f != nil {
log.Debugf("Using flavor %+v", f)
return func(binName, version string) plan.Resource {
// TODO (Mark) logic for the architecture
binURL, sha256, err := f.KubeBinURL(binName)
if err != nil {
log.Fatalf("%v", err)
return nil
}
binPath := "/usr/bin/" + binName
return &resource.Run{
Script: object.String(fmt.Sprintf("curl -o %s %s && openssl dgst -sha256 %s | grep \"%s\" > /dev/null && chmod 755 %s", binPath, binURL, binPath, sha256, binPath)),
UndoScript: object.String(fmt.Sprintf("pkill --uid 0 %s && rm %s || true", binName, binPath))}
}, nil
}
if pkgType == resource.PkgTypeDeb {
return func(binName, version string) plan.Resource {
return &resource.Deb{Name: binName, Suffix: "=" + version + "-00"}
}, nil
}
return func(binName, version string) plan.Resource {
return &resource.RPM{Name: binName, Version: version, DisableExcludes: "kubernetes"}
}, nil
}
// BuildK8SPlan creates a plan for running kubernetes on a node
func BuildK8SPlan(kubernetesVersion string, kubeletNodeIP string, seLinuxInstalled, setSELinuxPermissive, disableSwap, lockYUMPkgs bool, pkgType resource.PkgType, cloudProvider string, extraArgs map[string]string, binInstaller func(string, string) plan.Resource, flavor *eksd.EKSD) plan.Resource {
b := plan.NewBuilder()
// Kubernetes repos
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
// do nothing
case resource.PkgTypeDeb:
// XXX: Workaround for https://github.com/weaveworks/wksctl/issues/654 : *.gpg is a binary format, and currently wks is unable to handle
// binary files in the configuration configmap. Therefore, I needed to supply the *.gpg contents base64-encoded.
// In a world without that bug, one could just use the "!!binary"" YAML format in the configmap and store the *.gpg there directly.
b.AddResource("configure:kubernetes-repo-key", &resource.Run{
Script: object.String("base64 -d /tmp/cloud-google-com.gpg.b64 > /etc/apt/trusted.gpg.d/cloud-google-com.gpg"),
})
repoLine := "deb https://apt.kubernetes.io/ kubernetes-xenial main"
repoFile := "/etc/apt/sources.list.d/wks-google.list"
sedExpr := fmt.Sprintf(`\!%s!d`, repoLine) // same as '/%s/d' but allows '/' in %s
b.AddResource("configure:kubernetes-repo", &resource.Run{
Script: object.String(fmt.Sprintf("echo %q | tee -a %q", repoLine, repoFile)),
UndoScript: object.String(fmt.Sprintf(`test ! -f %q || sed -i '%s' %q`, repoFile, sedExpr, repoFile)),
}, plan.DependOn("configure:kubernetes-repo-key"))
}
// If SELinux is already installed and we need to set SELinux to permissive mode, do it
if seLinuxInstalled && setSELinuxPermissive {
b.AddResource(
"selinux:permissive",
&resource.Run{
Script: object.String("setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config"),
UndoScript: object.String("setenforce 1 && sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config || true"),
})
}
// Install k8s packages
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
if flavor != nil {
b.AddResource("install:kubelet-package", &resource.RPM{Name: "kubelet", Version: kubernetesVersion, DisableExcludes: "kubernetes"})
b.AddResource(
"cleanup:kubelet",
&resource.Run{Script: object.String("pkill kubelet | true")},
plan.DependOn("install:kubelet-package"))
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("cleanup:kubelet"))
} else {
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion))
}
b.AddResource("install:kubectl", binInstaller("kubectl", kubernetesVersion))
b.AddResource("install:kubeadm",
binInstaller("kubeadm", kubernetesVersion),
plan.DependOn("install:kubectl"),
plan.DependOn("install:kubelet"),
)
case resource.PkgTypeDeb:
// TODO(michal): Install the newest release version by default instead of hardcoding "-00".
if flavor != nil {
b.AddResource("install:kubelet-package", &resource.Deb{Name: "kubelet", Suffix: "=" + kubernetesVersion + "-00"},
plan.DependOn("configure:kubernetes-repo"))
b.AddResource(
"cleanup:kubelet",
&resource.Run{Script: object.String("pkill kubelet | true")},
plan.DependOn("install:kubelet-package"))
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("cleanup:kubelet"))
} else {
b.AddResource("install:kubelet", binInstaller("kubelet", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"))
}
b.AddResource("install:kubeadm", binInstaller("kubeadm", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"), plan.DependOn("install:kubelet"))
b.AddResource("install:kubectl", binInstaller("kubectl", kubernetesVersion), plan.DependOn("configure:kubernetes-repo"))
}
if lockYUMPkgs {
b.AddResource(
"lock-package:kubernetes",
&resource.Run{
Script: object.String("yum versionlock add 'kube*'"),
// If we never installed yum-plugin-versionlock or kubernetes, this should not fail
UndoScript: object.String("yum versionlock delete 'kube*' || true")},
plan.DependOn("install:kubectl"),
)
}
b.AddResource(
"create-dir:kubelet.service.d",
&resource.Dir{Path: object.String("/etc/systemd/system/kubelet.service.d")},
)
b.AddResource(
"install:kubeadm-conf",
&resource.File{Content: `# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS`,
Destination: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"},
plan.DependOn("create-dir:kubelet.service.d"))
kubeletDeps := []string{"install:kubeadm-conf"}
processCloudProvider := func(cmdline string) string {
if cloudProvider != "" {
log.WithField("cloudProvider", cloudProvider).Debug("using cloud provider")
return fmt.Sprintf("%s --cloud-provider=%s\n", cmdline, cloudProvider)
}
return cmdline + "\n"
}
processAdditionalArgs := func(cmdline string) string {
result := cmdline
strs := []string{}
for name, value := range extraArgs {
strs = append(strs, fmt.Sprintf("--%s='%s'", name, value))
}
sort.Strings(strs)
for _, str := range strs {
result = fmt.Sprintf("%s %s", result, str)
}
return processCloudProvider(result)
}
switch pkgType {
case resource.PkgTypeRPM, resource.PkgTypeRHEL:
if disableSwap {
swapDisable := "configure:kubernetes-swap-disable"
kubeletDeps = append(kubeletDeps, swapDisable)
b.AddResource(
swapDisable,
buildDisableSwapPlan(),
plan.DependOn("create-dir:kubelet.service.d"))
kubeletSysconfig := "configure:kubelet-sysconfig"
b.AddResource(
kubeletSysconfig,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
kubeletDeps = append(kubeletDeps, kubeletSysconfig)
} else {
kubeletSysconfig := "configure:kubelet-sysconfig"
kubeletDeps = append(kubeletDeps, kubeletSysconfig)
b.AddResource(
kubeletSysconfig,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
}
case resource.PkgTypeDeb:
if disableSwap {
swapDisable := "configure:kubernetes-swap-disable"
kubeletDeps = append(kubeletDeps, swapDisable)
b.AddResource(
swapDisable,
buildDisableSwapPlan(),
plan.DependOn("create-dir:kubelet.service.d"))
kubeletDefault := "configure:kubelet-default"
kubeletDeps = append(kubeletDeps, kubeletDefault)
b.AddResource(
kubeletDefault,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
} else {
kubeletDefault := "configure:kubelet-default"
kubeletDeps = append(kubeletDeps, kubeletDefault)
b.AddResource(
kubeletDefault,
&resource.File{
Content: processAdditionalArgs(fmt.Sprintf("KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip=%s", kubeletNodeIP)),
Destination: "/etc/default/kubelet"},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
}
}
b.AddResource(
"systemd:daemon-reload",
&resource.Run{Script: object.String("systemctl daemon-reload")},
plan.DependOn("create-dir:kubelet.service.d", "install:kubelet"))
b.AddResource(
"service-init:kubelet",
&resource.Service{Name: "kubelet", Status: "active", Enabled: true},
plan.DependOn("systemd:daemon-reload", kubeletDeps...))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildDisableSwapPlan turns off swap and removes swap entries from /etc/fstab so swap will remain disabled on reboot
func buildDisableSwapPlan() plan.Resource {
b := plan.NewBuilder()
b.AddResource("configure:disable-swap-in-session", &resource.Run{Script: object.String("/sbin/swapoff -a")})
b.AddResource(
"configure:disable-swap-going-forward",
&resource.Run{Script: object.String(
// The ";" instead of "&&" below is because we want to copy the empty temp file over /etc/fstab if /etc/fstab only contains swap entries
// and the "egrep" will fail on an empty file
`tmpfile=$(mktemp /tmp/disable-swap.XXXXXX) && egrep -v '\s*\S*\s*\S*\s*swap.*' /etc/fstab > $tmpfile; mv $tmpfile /etc/fstab`)},
plan.DependOn("configure:disable-swap-in-session"))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildKubeadmPrejoinPlan creates a sub-plan to prepare for running
// kubeadm join.
func BuildKubeadmPrejoinPlan(useIPTables bool) plan.Resource {
b := plan.NewBuilder()
if useIPTables {
b.AddResource(
"configure:net.bridge",
&resource.Run{Script: object.String("sysctl net.bridge.bridge-nf-call-iptables=1")},
)
}
b.AddResource(
"configure:kubeadm-force-reset",
&resource.Run{Script: object.String("kubeadm reset --force")},
)
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
}
// BuildSealedSecretPlan creates a sub-plan to install sealed secrets so we can check secrets into GitHub for GitOps
func BuildSealedSecretPlan(sealedSecretVersion, crdManifest, keyManifest, controllerManifest []byte) plan.Resource {
b := plan.NewBuilder()
b.AddResource("install:sealed-secret-crd",
&resource.KubectlApply{Manifest: crdManifest, Filename: object.String("SealedSecretCRD.yaml"),
WaitCondition: "condition=Established"})
b.AddResource("install:sealed-secrets-key", &resource.KubectlApply{Manifest: keyManifest})
b.AddResource("install:sealed-secrets-controller",
&resource.KubectlApply{Manifest: controllerManifest, Filename: object.String("SealedSecretController.yaml")},
plan.DependOn("install:sealed-secrets-key"))
p, err := b.Plan()
if err != nil {
log.Fatalf("%v", err)
}
return &p
} |
// Docker runtime
switch pkgType {
case resource.PkgTypeRHEL:
b.AddResource("install:container-selinux", | random_line_split |
outputs.js | // TODO: make this a static class with static members
// this requires babel-eslint or being patient.
// Throw exceptions on invalid property names to catch easy typos.
class ThrowOnInvalidProxy {
constructor(obj) |
}
// Output strings for now require a field for every language, so this is a
// helper function to generate one for literal numbers.
const numberToOutputString = (n) => {
const str = n.toString();
return {
en: str,
de: str,
fr: str,
ja: str,
cn: str,
ko: str,
};
};
// General guidelines:
// * property names should closely match English text
// * use OnPlayer suffix for things with `${player}`
// * use OnTarget suffix for things with `${name}`
// * any other parameters (of which there are none, currently) should use consistent suffixes.
// * the value of each property should be a single object with localized keys
const Outputs = new ThrowOnInvalidProxy({
aoe: {
en: 'aoe',
de: 'AoE',
fr: 'AoE',
ja: 'AoE',
cn: 'AoE',
ko: '전체 공격',
},
bigAoe: {
en: 'big aoe!',
de: 'Große AoE!',
fr: 'Grosse AoE !',
ja: '大ダメージAoE',
cn: '大AoE伤害!',
ko: '강한 전체 공격!',
},
tankBuster: {
en: 'Tank Buster',
de: 'Tank buster',
fr: 'Tank buster',
ja: 'タンクバスター',
cn: '坦克死刑',
ko: '탱버',
},
miniBuster: {
en: 'Mini Buster',
de: 'Kleiner Tankbuster',
fr: 'Mini Buster',
ja: 'ミニバスター',
cn: '小死刑',
ko: '약한 탱버',
},
tankBusterOnPlayer: {
en: 'Tank Buster on ${player}',
de: 'Tank buster auf ${player}',
fr: 'Tank buster sur ${player}',
ja: '${player}にタンクバスター',
cn: '死刑 点 ${player}',
ko: '"${player}" 탱버',
},
tankBusterOnYou: {
en: 'Tank Buster on YOU',
de: 'Tank buster auf DIR',
fr: 'Tank buster sur VOUS',
ja: '自分にタンクバスター',
cn: '死刑点名',
ko: '탱버 대상자',
},
// when there are multiple tankbusters going out
tankBusters: {
en: 'Tank Busters',
de: 'Tank buster',
fr: 'Tank busters',
ja: 'タンクバスター',
cn: '坦克死刑',
ko: '탱버',
},
tankCleave: {
en: 'Tank cleave',
de: 'Tank Cleave',
fr: 'Tank cleave',
ja: '前方範囲攻撃',
cn: '顺劈',
ko: '광역 탱버',
},
avoidTankCleave: {
en: 'Avoid tank cleave',
de: 'Tank Cleave ausweichen',
fr: 'Évitez le tank cleave',
ja: '前方範囲攻撃を避け',
cn: '远离顺劈',
ko: '광역 탱버 피하기',
},
tankCleaveOnYou: {
en: 'Tank cleave on YOU',
de: 'Tank Cleave aud DIR',
fr: 'Tank cleave sur VOUS',
ja: '自分に前方範囲攻撃',
cn: '顺劈点名',
ko: '나에게 광역 탱버',
},
tankSwap: {
en: 'Tank Swap!',
de: 'Tankwechsel!',
fr: 'Tank swap !',
ja: 'タンクスイッチ!',
cn: '换T!',
ko: '탱 교대',
},
spread: {
en: 'Spread',
de: 'Verteilen',
fr: 'Dispersez-vous',
ja: '散開',
cn: '分散',
ko: '산개',
},
stackMarker: {
// for stack marker situations
en: 'Stack',
de: 'Sammeln',
fr: 'Packez-vous',
ja: '頭割り',
cn: '分摊',
ko: '쉐어뎀',
},
getTogether: {
// for getting together without stack marker
en: 'Stack',
de: 'Sammeln',
fr: 'Packez-vous',
ja: '集合',
cn: '集合',
ko: '쉐어뎀',
},
stackOnYou: {
en: 'Stack on YOU',
de: 'Auf DIR sammeln',
fr: 'Package sur VOUS',
ja: '自分に集合',
cn: '集合点名',
ko: '쉐어징 대상자',
},
stackOnPlayer: {
en: 'Stack on ${player}',
de: 'Auf ${player} sammeln',
fr: 'Packez-vous sur ${player}',
ja: '${player}に集合',
cn: '靠近 ${player}集合',
ko: '"${player}" 쉐어징',
},
stackMiddle: {
en: 'Stack in middle',
de: 'In der Mitte sammeln',
fr: 'Packez-vous au milieu',
ja: '中央で集合',
cn: '中间集合',
ko: '중앙에서 모이기',
},
doritoStack: {
en: 'Dorito Stack',
de: 'Mit Marker sammeln',
fr: 'Packez les marquages',
ja: 'マーカー付けた人と集合',
cn: '点名集合',
ko: '징끼리 모이기',
},
spreadThenStack: {
en: 'Spread => Stack',
de: 'Verteilen => Sammeln',
fr: 'Dispersion => Package',
ja: '散開 => 集合',
cn: '分散 => 集合',
ko: '산개 => 집합',
},
stackThenSpread: {
en: 'Stack => Spread',
de: 'Sammeln => Verteilen',
fr: 'Package => Dispersion',
ja: 'スタック => 散開',
cn: '集合 => 分散',
ko: '집합 => 산개',
},
knockback: {
en: 'Knockback',
de: 'Rückstoß',
fr: 'Poussée',
ja: 'ノックバック',
cn: '击退',
ko: '넉백',
},
knockbackOnYou: {
en: 'Knockback on YOU',
de: 'Rückstoß auf DIR',
fr: 'Poussée sur VOUS',
ja: '自分にノックバック',
cn: '击退点名',
ko: '넉백징 대상자',
},
knockbackOnPlayer: {
en: 'Knockback on ${player}',
de: 'Rückstoß auf ${player}',
fr: 'Poussée sur ${player}',
ja: '${player}にノックバック',
cn: '击退点名${player}',
ko: '"${player}" 넉백징',
},
lookTowardsBoss: {
en: 'Look Towards Boss',
de: 'Anschauen Boss',
fr: 'Regardez le boss',
ja: 'ボスを見る',
cn: '面向Boss',
ko: '쳐다보기',
},
lookAway: {
en: 'Look Away',
de: 'Wegschauen',
fr: 'Regardez ailleurs',
ja: 'ボスを見ない',
cn: '背对Boss',
ko: '뒤돌기',
},
lookAwayFromPlayer: {
en: 'Look Away from ${player}',
de: 'Schau weg von ${player}',
fr: 'Ne regardez pas ${player}',
ja: '${player}を見ない',
cn: '背对${player}',
ko: '${player}에게서 뒤돌기',
},
lookAwayFromTarget: {
en: 'Look Away from ${name}',
de: 'Schau weg von ${name}',
fr: 'Ne regardez pas ${name}',
ja: '${name}を見ない',
cn: '背对${name}',
ko: '${name}에게서 뒤돌기',
},
getBehind: {
en: 'Get Behind',
de: 'Hinter ihn',
fr: 'Passez derrière',
ja: '背面へ',
cn: '去背后',
ko: '보스 뒤로',
},
goFrontOrSides: {
en: 'Go Front / Sides',
de: 'Gehe nach Vorne/ zu den Seiten',
fr: 'Allez Devant / Côtés',
ja: '前/横へ',
cn: '去前侧方',
ko: '보스 후방 피하기',
},
goFront: {
en: 'Go Front',
de: 'Geh nach vorn',
fr: 'Allez Devant',
ja: '前へ',
cn: '去前面',
},
// getUnder is used when you have to get into the bosses hitbox
getUnder: {
en: 'Get Under',
de: 'Unter ihn',
fr: 'En dessous',
ja: 'ボスと貼り付く',
cn: '去脚下',
ko: '보스 아래로',
},
// in is more like "get close but maybe even melee range is fine"
in: {
en: 'In',
de: 'Rein',
fr: 'Intérieur',
ja: '中へ',
cn: '靠近',
ko: '안으로',
},
// out means get far away
out: {
en: 'Out',
de: 'Raus',
fr: 'Exterieur',
ja: '外へ',
cn: '远离',
ko: '밖으로',
},
outOfMelee: {
en: 'Out of melee',
de: 'Raus aus Nahkampf',
fr: 'Hors de la mêlée',
ja: '近接最大レンジ',
cn: '近战最远距离回避',
ko: '근접범위 밖으로',
},
inThenOut: {
en: 'In, then out',
de: 'Rein, dann raus',
fr: 'Intérieur, puis extérieur',
ja: '中 => 外',
cn: '先靠近,再远离',
ko: '안으로 => 밖으로',
},
outThenIn: {
en: 'Out, then in',
de: 'Raus, dann rein',
fr: 'Extérieur, puis intérieur',
ja: '外 => 中',
cn: '先远离,再靠近',
ko: '밖으로 => 안으로',
},
backThenFront: {
en: 'Back Then Front',
de: 'Nach Hinten, danach nach Vorne',
fr: 'Derrière puis devant',
ja: '後ろ => 前',
cn: '后 => 前',
ko: '뒤로 => 앞으로',
},
frontThenBack: {
en: 'Front Then Back',
de: 'Nach Vorne, danach nach Hinten',
fr: 'Devant puis derrière',
ja: '前 => 後ろ',
cn: '前 => 后',
ko: '앞으로 => 뒤로',
},
goIntoMiddle: {
en: 'go into middle',
de: 'in die Mitte gehen',
fr: 'Allez au milieu',
ja: '中へ',
cn: '去中间',
ko: '중앙으로',
},
right: {
en: 'Right',
de: 'Rechts',
fr: 'Droite ',
ja: '右へ',
cn: '右',
ko: '오른쪽',
},
left: {
en: 'Left',
de: 'Links',
fr: 'Gauche',
ja: '左へ',
cn: '左',
ko: '왼쪽',
},
getLeftAndWest: {
en: '<= Get Left/West',
de: '<= Nach Links/Westen',
fr: '<= Allez à Gauche/Ouest',
ja: '<= 左/西へ',
cn: '<= 去左/西边',
ko: '<= 왼쪽으로',
},
getRightAndEast: {
en: 'Get Right/East =>',
de: 'Nach Rechts/Osten =>',
fr: 'Allez à Droite/Est =>',
ja: '右/東へ =>',
cn: '去右/东边 =>',
ko: '오른쪽으로 =>',
},
goFrontBack: {
en: 'Go Front/Back',
de: 'Geh nach Vorne/Hinten',
fr: 'Allez Devant/Derrière',
ja: '縦へ',
cn: '去前后',
ko: '앞/뒤로',
},
sides: {
en: 'Sides',
de: 'Seiten',
fr: 'Côtés',
ja: '横へ',
cn: '去侧面',
ko: '양옆으로',
},
middle: {
en: 'Middle',
de: 'Mitte',
fr: 'Milieu',
ja: '中へ',
cn: '中间',
ko: '중앙',
},
// killAdds is used for adds that will always be available
killAdds: {
en: 'Kill adds',
de: 'Adds besiegen',
fr: 'Tuez les adds',
ja: '雑魚を処理',
cn: '击杀小怪',
ko: '쫄 잡기',
},
// killExtraAdd is used for adds that appear if a mechanic was not played correctly
killExtraAdd: {
en: 'Kill Extra Add',
de: 'Add besiegen',
fr: 'Tuez l\'add',
ja: '雑魚を倒す',
cn: '击杀小怪',
ko: '쫄 잡기',
},
awayFromFront: {
en: 'Away From Front',
de: 'Weg von Vorne',
fr: 'Éloignez-vous du devant',
ja: '前方から離れる',
cn: '远离正面',
ko: '보스 전방 피하기',
},
sleepTarget: {
en: 'Sleep ${name}',
de: 'Schlaf auf ${name}',
fr: 'Sommeil sur ${name}',
ja: '${name} にスリプル',
cn: '催眠 ${name}',
ko: '${name} 슬리플',
},
stunTarget: {
en: 'Stun ${name}',
de: 'Betäubung auf ${name}',
fr: 'Étourdissement sur ${name}',
ja: '${name} にスタン',
cn: '眩晕 ${name}',
ko: '${name}기절',
},
interruptTarget: {
en: 'interrupt ${name}',
de: 'unterbreche ${name}',
fr: 'Interrompez ${name}',
ja: '${name} に沈黙',
cn: '打断${name}',
ko: '${name}기술 시전 끊기',
},
preyOnYou: {
en: 'Prey on YOU',
de: 'Marker auf DIR',
fr: 'Marquage sur VOUS',
ja: '自分に捕食',
cn: '掠食点名',
ko: '홍옥징 대상자',
},
preyOnPlayer: {
en: 'Prey on ${player}',
de: 'Marker auf ${player}',
fr: 'Marquage sur ${player}',
ja: '${player}に捕食',
cn: '掠食点名${player}',
ko: '"${player}" 홍옥징',
},
awayFromGroup: {
en: 'Away from Group',
de: 'Weg von der Gruppe',
fr: 'Éloignez-vous du groupe',
ja: '外へ',
cn: '远离人群',
ko: '다른 사람들이랑 떨어지기',
},
awayFromPlayer: {
en: 'Away from ${player}',
de: 'Weg von ${player}',
fr: 'Éloignez-vous de ${player}',
ja: '${player}から離れ',
cn: '远离${player}',
ko: '"${player}"에서 멀어지기',
},
meteorOnYou: {
en: 'Meteor on YOU',
de: 'Meteor auf DIR',
fr: 'Météore sur VOUS',
ja: '自分にメテオ',
cn: '陨石点名',
ko: '나에게 메테오징',
},
stopMoving: {
en: 'Stop Moving!',
de: 'Bewegung stoppen!',
fr: 'Ne bougez pas !',
ja: '移動禁止!',
cn: '停止移动!',
ko: '이동 멈추기!',
},
stopEverything: {
en: 'Stop Everything!',
de: 'Stoppe Alles!',
fr: 'Arrêtez TOUT !',
ja: '行動禁止!',
cn: '停止行动!',
ko: '행동 멈추기!',
},
moveAway: {
// move away to dodge aoes
en: 'Move!',
de: 'Bewegen!',
fr: 'Bougez !',
ja: '避けて!',
cn: '快躲开!',
ko: '이동하기!',
},
moveAround: {
// move around (e.g. jumping) to avoid being frozen
en: 'Move!',
de: 'Bewegen!',
fr: 'Bougez !',
ja: '動く!',
cn: '快动!',
ko: '움직이기!',
},
breakChains: {
en: 'Break chains',
de: 'Kette zerbrechen',
fr: 'Brisez les chaines',
ja: '線を切る',
cn: '切断连线',
ko: '선 끊기',
},
moveChainsTogether: {
en: 'Move chains together',
de: 'Ketten zusammen bewegen',
fr: 'Bougez les chaines ensemble',
ja: '線同士一緒に移動',
cn: '连线一起移动',
ko: '선 붙어서 같이 움직이기',
},
earthshakerOnYou: {
en: 'Earth Shaker on YOU',
de: 'Erdstoß auf DIR',
fr: 'Marque de terre sur VOUS',
ja: '自分にアースシェイカー',
cn: '大地摇动点名',
ko: '어스징 대상자',
},
wakeUp: {
en: 'WAKE UP',
de: 'AUFWACHEN',
fr: 'RÉVEILLES-TOI',
ja: '目を覚めて!',
cn: '醒醒!动一动!!',
ko: '강제 퇴장 7분 전',
},
closeTethersWithPlayer: {
en: 'Close Tethers (${player})',
de: 'Nahe Verbindungen (${player})',
fr: 'Liens proches avec (${player})',
ja: '(${player})に近づく',
cn: '靠近连线 (${player})',
ko: '상대와 가까이 붙기 (${player})',
},
farTethersWithPlayer: {
en: 'Far Tethers (${player})',
de: 'Entfernte Verbindungen (${player})',
fr: 'Liens éloignés avec (${player})',
ja: ' (${player})から離れる',
cn: '远离连线 (${player})',
ko: '상대와 떨어지기 (${player})',
},
unknownTarget: {
en: '???',
de: '???',
fr: '???',
ja: '???',
cn: '???',
ko: '???',
},
north: {
en: 'North',
de: 'Norden',
fr: 'Nord',
ja: '北',
cn: '北',
ko: '북쪽',
},
south: {
en: 'South',
de: 'Süden',
fr: 'Sud',
ja: '南',
cn: '南',
ko: '남쪽',
},
east: {
en: 'East',
de: 'Osten',
fr: 'Est',
ja: '東',
cn: '东',
ko: '동쪽',
},
west: {
en: 'West',
de: 'Westen',
fr: 'Ouest',
ja: '西',
cn: '西',
ko: '서쪽',
},
northwest: {
en: 'Northwest',
de: 'Nordwesten',
fr: 'nord-ouest',
ja: '北西',
cn: '西北',
ko: '북서',
},
northeast: {
en: 'Northeast',
de: 'Nordosten',
fr: 'nord-est',
ja: '北東',
cn: '东北',
ko: '북동',
},
southwest: {
en: 'Southwest',
de: 'Südwesten',
fr: 'sud-ouest',
ja: '南西',
cn: '西南',
ko: '남서',
},
southeast: {
en: 'Southeast',
de: 'Südosten',
fr: 'sud-est',
ja: '南東',
cn: '东南',
ko: '남동',
},
dirN: {
en: 'N',
de: 'N',
fr: 'N',
ja: '北',
cn: '北',
ko: '북쪽',
},
dirS: {
en: 'S',
de: 'S',
fr: 'S',
ja: '南',
cn: '南',
ko: '남쪽',
},
dirE: {
en: 'E',
de: 'O',
fr: 'E',
ja: '東',
cn: '东',
ko: '동쪽',
},
dirW: {
en: 'W',
de: 'W',
fr: 'O',
ja: '西',
cn: '西',
ko: '서쪽',
},
dirNW: {
en: 'NW',
de: 'NW',
fr: 'NO',
ja: '北西',
cn: '西北',
ko: '북서',
},
dirNE: {
en: 'NE',
de: 'NO',
fr: 'NE',
ja: '北東',
cn: '东北',
ko: '북동',
},
dirSW: {
en: 'SW',
de: 'SW',
fr: 'SO',
ja: '南西',
cn: '西南',
ko: '남서',
},
dirSE: {
en: 'SE',
de: 'SO',
fr: 'SE',
ja: '南東',
cn: '东南',
ko: '남동',
},
// Literal numbers.
num0: numberToOutputString(0),
num1: numberToOutputString(1),
num2: numberToOutputString(2),
num3: numberToOutputString(3),
num4: numberToOutputString(4),
num5: numberToOutputString(5),
num6: numberToOutputString(6),
num7: numberToOutputString(7),
num8: numberToOutputString(8),
num9: numberToOutputString(9),
});
export default Outputs;
| {
this.obj = obj;
if ('toJSON' in obj)
throw new Error('Cannot have toJSON property.');
return new Proxy(this, {
set(target, property, value) {
throw new Error('Cannot set readonly object.');
},
get(target, name) {
if (name === 'toJSON')
return JSON.stringify(obj);
if (name in target.obj)
return target.obj[name];
throw new Error(`Unknown property ${name}`);
},
});
} | identifier_body |
outputs.js | // TODO: make this a static class with static members
// this requires babel-eslint or being patient.
// Throw exceptions on invalid property names to catch easy typos.
class ThrowOnInvalidProxy {
constructor(obj) {
this.obj = obj;
if ('toJSON' in obj)
throw new Error('Cannot have toJSON property.');
return new Proxy(this, {
set(target, property, value) {
throw new Error('Cannot set readonly object.');
},
get(target, name) {
if (name === 'toJSON')
return JSON.stringify(obj);
if (name in target.obj)
return target.obj[name];
throw new Error(`Unknown property ${name}`);
},
});
}
}
// Output strings for now require a field for every language, so this is a
// helper function to generate one for literal numbers.
const numberToOutputString = (n) => {
const str = n.toString();
return {
en: str,
de: str,
fr: str,
ja: str,
cn: str,
ko: str,
};
};
// General guidelines:
// * property names should closely match English text
// * use OnPlayer suffix for things with `${player}`
// * use OnTarget suffix for things with `${name}`
// * any other parameters (of which there are none, currently) should use consistent suffixes.
// * the value of each property should be a single object with localized keys
const Outputs = new ThrowOnInvalidProxy({
aoe: {
en: 'aoe',
de: 'AoE',
fr: 'AoE',
ja: 'AoE',
cn: 'AoE',
ko: '전체 공격',
},
bigAoe: {
en: 'big aoe!',
de: 'Große AoE!',
fr: 'Grosse AoE !',
ja: '大ダメージAoE',
cn: '大AoE伤害!',
ko: '강한 전체 공격!',
},
tankBuster: {
en: 'Tank Buster',
de: 'Tank buster',
fr: 'Tank buster',
ja: 'タンクバスター',
cn: '坦克死刑',
ko: '탱버',
},
miniBuster: {
en: 'Mini Buster',
de: 'Kleiner Tankbuster',
fr: 'Mini Buster',
ja: 'ミニバスター',
cn: '小死刑',
ko: '약한 탱버',
},
tankBusterOnPlayer: {
en: 'Tank Buster on ${player}',
de: 'Tank buster auf ${player}',
fr: 'Tank buster sur ${player}',
ja: '${player}にタンクバスター',
cn: '死刑 点 ${player}',
ko: '"${player}" 탱버',
},
tankBusterOnYou: {
en: 'Tank Buster on YOU',
de: 'Tank buster auf DIR',
fr: 'Tank buster sur VOUS',
ja: '自分にタンクバスター',
cn: '死刑点名',
ko: '탱버 대상자',
},
// when there are multiple tankbusters going out
tankBusters: {
en: 'Tank Busters',
de: 'Tank buster',
fr: 'Tank busters',
ja: 'タンクバスター',
cn: '坦克死刑',
ko: '탱버',
},
tankCleave: {
en: 'Tank cleave',
de: 'Tank Cleave',
fr: 'Tank cleave',
ja: '前方範囲攻撃',
cn: '顺劈',
ko: '광역 탱버',
},
avoidTankCleave: {
en: 'Avoid tank cleave',
de: 'Tank Cleave ausweichen',
fr: 'Évitez le tank cleave',
ja: '前方範囲攻撃を避け',
cn: '远离顺劈',
ko: '광역 탱버 피하기',
},
tankCleaveOnYou: {
en: 'Tank cleave on YOU',
de: 'Tank Cleave aud DIR',
fr: 'Tank cleave sur VOUS',
ja: '自分に前方範囲攻撃',
cn: '顺劈点名',
ko: '나에게 광역 탱버',
},
tankSwap: {
en: 'Tank Swap!',
de: 'Tankwechsel!',
fr: 'Tank swap !',
ja: 'タンクスイッチ!',
cn: '换T!',
ko: '탱 교대',
},
spread: {
en: 'Spread',
de: 'Verteilen',
fr: 'Dispersez-vous',
ja: '散開',
cn: '分散',
ko: '산개',
},
stackMarker: {
// for stack marker situations
en: 'Stack',
de: 'Sammeln',
fr: 'Packez-vous',
ja: '頭割り',
cn: '分摊',
ko: '쉐어뎀',
},
getTogether: {
// for getting together without stack marker
en: 'Stack',
de: 'Sammeln',
fr: 'Packez-vous',
ja: '集合',
cn: '集合',
ko: '쉐어뎀',
},
stackOnYou: {
en: 'Stack on YOU',
de: 'Auf DIR sammeln',
fr: 'Package sur VOUS',
ja: '自分に集合',
cn: '集合点名',
ko: '쉐어징 대상자',
},
stackOnPlayer: {
en: 'Stack on ${player}',
de: 'Auf ${player} sammeln',
fr: 'Packez-vous sur ${player}',
ja: '${player}に集合',
cn: '靠近 ${player}集合',
ko: '"${player}" 쉐어징',
},
stackMiddle: {
en: 'Stack in middle',
de: 'In der Mitte sammeln',
fr: 'Packez-vous au milieu',
ja: '中央で集合',
cn: '中间集合',
ko: '중앙에서 모이기',
},
doritoStack: {
en: 'Dorito Stack',
de: 'Mit Marker sammeln',
fr: 'Packez les marquages',
ja: 'マーカー付けた人と集合',
cn: '点名集合',
ko: '징끼리 모이기',
},
spreadThenStack: {
en: 'Spread => Stack',
de: 'Verteilen => Sammeln',
fr: 'Dispersion => Package',
ja: '散開 => 集合',
cn: '分散 => 集合',
ko: '산개 => 집합',
},
stackThenSpread: {
en: 'Stack => Spread',
de: 'Sammeln => Verteilen',
fr: 'Package => Dispersion',
ja: 'スタック => 散開',
cn: '集合 => 分散',
ko: '집합 => 산개',
},
knockback: {
en: 'Knockback',
de: 'Rückstoß',
fr: 'Poussée',
ja: 'ノックバック',
cn: '击退',
ko: '넉백',
},
knockbackOnYou: {
en: 'Knockback on YOU',
de: 'Rückstoß auf DIR',
fr: 'Poussée sur VOUS',
ja: '自分にノックバック',
cn: '击退点名',
ko: '넉백징 대상자',
},
knockbackOnPlayer: {
en: 'Knockback on ${player}',
de: 'Rückstoß auf ${player}',
fr: 'Poussée sur ${player}',
ja: '${player}にノックバック',
cn: '击退点名${player}',
ko: '"${player}" 넉백징',
},
lookTowardsBoss: {
en: 'Look Towards Boss',
de: 'Anschauen Boss',
fr: 'Regardez le boss',
ja: 'ボスを見る',
cn: '面向Boss',
ko: '쳐다보기',
},
lookAway: {
en: 'Look Away',
de: 'Wegschauen',
fr: 'Regardez ailleurs',
ja: 'ボスを見ない',
cn: '背对Boss',
ko: '뒤돌기',
},
lookAwayFromPlayer: {
en: 'Look Away from ${player}',
de: 'Schau weg von ${player}',
fr: 'Ne regardez pas ${player}',
ja: '${player}を見ない',
cn: '背对${player}',
ko: '${player}에게서 뒤돌기',
},
lookAwayFromTarget: {
en: 'Look Away from ${name}',
de: 'Schau weg von ${name}',
fr: 'Ne regardez pas ${name}',
ja: '${name}を見ない',
cn: '背对${name}',
ko: '${name}에게서 뒤돌기',
},
getBehind: {
en: 'Get Behind',
de: 'Hinter ihn',
fr: 'Passez derrière',
ja: '背面へ',
cn: '去背后',
ko: '보스 뒤로',
},
goFrontOrSides: {
en: 'Go Front / Sides',
de: 'Gehe nach Vorne/ zu den Seiten',
fr: 'Allez Devant / Côtés',
ja: '前/横へ',
cn: '去前侧方',
ko: '보스 후방 피하기',
},
goFront: {
en: 'Go Front',
de: 'Geh nach vorn',
fr: 'Allez Devant',
ja: '前へ',
cn: '去前面',
},
// getUnder is used when you have to get into the bosses hitbox
getUnder: {
en: 'Get Under',
de: 'Unter ihn',
fr: 'En dessous',
ja: 'ボスと貼り付く',
cn: '去脚下',
ko: '보스 아래로',
},
// in is more like "get close but maybe even melee range is fine"
in: {
en: 'In',
de: 'Rein',
fr: 'Intérieur',
ja: '中へ',
cn: '靠近',
ko: '안으로',
},
// out means get far away
out: {
en: 'Out',
de: 'Raus',
fr: 'Exterieur',
ja: '外へ',
cn: '远离',
ko: '밖으로',
},
outOfMelee: {
en: 'Out of melee',
de: 'Raus aus Nahkampf',
fr: 'Hors de la mêlée',
ja: '近接最大レンジ',
cn: '近战最远距离回避',
ko: '근접범위 밖으로',
},
inThenOut: {
en: 'In, then out',
de: 'Rein, dann raus',
fr: 'Intérieur, puis extérieur',
ja: '中 => 外',
cn: '先靠近,再远离',
ko: '안으로 => 밖으로',
},
outThenIn: {
en: 'Out, then in',
de: 'Raus, dann rein',
fr: 'Extérieur, puis intérieur',
ja: '外 => 中',
cn: '先远离,再靠近',
ko: '밖으로 => 안으로',
},
backThenFront: {
en: 'Back Then Front',
de: 'Nach Hinten, danach nach Vorne',
fr: 'Derrière puis devant',
ja: '後ろ => 前',
cn: '后 => 前',
ko: '뒤로 => 앞으로',
},
frontThenBack: {
en: 'Front Then Back',
de: 'Nach Vorne, danach nach Hinten',
fr: 'Devant puis derrière',
ja: '前 => 後ろ',
cn: '前 => 后',
ko: '앞으로 => 뒤로',
},
goIntoMiddle: {
en: 'go into middle',
de: 'in die Mitte gehen',
fr: 'Allez au milieu',
ja: '中へ',
cn: '去中间',
ko: '중앙으로',
},
right: {
en: 'Right',
de: 'Rechts',
fr: 'Droite ',
ja: '右へ',
cn: '右',
ko: '오른쪽',
},
left: {
en: 'Left',
de: 'Links',
fr: 'Gauche',
ja: '左へ',
cn: '左',
ko: '왼쪽',
},
getLeftAndWest: {
en: '<= Get Left/West',
de: '<= Nach Links/Westen',
fr: '<= Allez à Gauche/Ouest',
ja: '<= 左/西へ',
cn: '<= 去左/西边',
ko: '<= 왼쪽으로',
},
getRightAndEast: {
en: 'Get Right/East =>',
de: 'Nach Rechts/Osten =>',
fr: 'Allez à Droite/Est =>',
ja: '右/東へ =>',
cn: '去右/东边 =>',
ko: '오른쪽으로 =>',
},
goFrontBack: {
en: 'Go Front/Back',
de: 'Geh nach Vorne/Hinten',
fr: 'Allez Devant/Derrière',
ja: '縦へ',
cn: '去前后',
ko: '앞/뒤로',
},
sides: {
en: 'Sides',
de: 'Seiten',
fr: 'Côtés',
ja: '横へ',
cn: '去侧面',
ko: '양옆으로',
},
middle: {
en: 'Middle',
de: 'Mitte',
fr: 'Milieu',
ja: '中へ',
cn: '中间',
ko: '중앙',
},
// killAdds is used for adds that will always be available
killAdds: {
en: 'Kill adds',
de: 'Adds besiegen',
fr: 'Tuez les adds',
ja: '雑魚を処理',
cn: '击杀小怪',
ko: '쫄 잡기',
},
// killExtraAdd is used for adds that appear if a mechanic was not played correctly
killExtraAdd: {
en: 'Kill Extra Add',
de: 'Add besiegen',
fr: 'Tuez l\'add',
ja: '雑魚を倒す',
cn: '击杀小怪',
ko: '쫄 잡기',
},
awayFromFront: {
en: 'Away From Front',
de: 'Weg von Vorne',
fr: 'Éloignez-vous du devant',
ja: '前方から離れる',
cn: '远离正面',
ko: '보스 전방 피하기',
},
sleepTarget: {
en: 'Sleep ${name}',
de: 'Schlaf auf ${name}',
fr: 'Sommeil sur ${name}',
ja: '${name} にスリプル',
cn: '催眠 ${name}',
ko: '${name} 슬리플',
},
stunTarget: {
en: 'Stun ${name}',
de: 'Betäubung auf ${name}',
fr: 'Étourdissement sur ${name}',
ja: '${name} にスタン',
cn: '眩晕 ${name}',
ko: '${name}기절',
},
interruptTarget: {
en: 'interrupt ${name}',
de: 'unterbreche ${name}',
fr: 'Interrompez ${name}',
ja: '${name} に沈黙',
cn: '打断${name}',
ko: '${name}기술 시전 끊기',
},
preyOnYou: {
en: 'Prey on YOU',
de: 'Marker auf DIR',
fr: 'Marquage sur VOUS',
ja: '自分に捕食',
cn: '掠食点名',
ko: '홍옥징 대상자',
},
preyOnPlayer: {
en: 'Prey on ${player}',
de: 'Marker auf ${player}',
fr: 'Marquage sur ${player}',
ja: '${player}に捕食',
cn: '掠食点名${player}',
ko: '"${player}" 홍옥징',
},
awayFromGroup: {
en: 'Away from Group',
de: 'Weg von der Gruppe',
fr: 'Éloignez-vous du groupe',
ja: '外へ',
cn: '远离人群',
ko: '다른 사람들이랑 떨어지기',
},
awayFromPlayer: {
en: 'Away from ${player}',
de: 'Weg von ${player}',
fr: 'Éloignez-vous de ${player}',
ja: '${player}から離れ',
cn: '远离${player}',
ko: '"${player}"에서 멀어지기',
},
meteorOnYou: {
en: 'Meteor on YOU',
de: 'Meteor auf DIR',
fr: 'Météore sur VOUS',
ja: '自分にメテオ',
cn: '陨石点名',
ko: '나에게 메테오징',
},
stopMoving: {
en: 'Stop Moving!',
de: 'Bewegung stoppen!',
fr: 'Ne bougez pas !',
ja: '移動禁止!',
cn: '停止移动!',
ko: '이동 멈추기!',
},
stopEverything: {
en: 'Stop Everything!',
de: 'Stoppe Alles!',
fr: 'Arrêtez TOUT !',
ja: '行動禁止!',
cn: '停止行动!',
ko: '행동 멈추기!',
},
moveAway: {
// move away to dodge aoes
en: 'Move!',
de: 'Bewegen!',
fr: 'Bougez !',
ja: '避けて!',
cn: '快躲开!',
ko: '이동하기!',
},
moveAround: {
// move around (e.g. jumping) to avoid being frozen
en: 'Move!',
de: 'Bewegen!',
fr: 'Bougez !',
ja: '動く!',
cn: '快动!',
ko: '움직이기!',
},
breakChains: {
en: 'Break chains',
de: 'Kette zerbrechen',
fr: 'Brisez les chaines',
ja: '線を切る',
cn: '切断连线',
ko: '선 끊기',
},
moveChainsTogether: {
en: 'Move chains together',
de: 'Ketten zusammen bewegen',
fr: 'Bougez les chaines ensemble',
ja: '線同士一緒に移動',
cn: '连线一起移动',
ko: '선 붙어서 같이 움직이기',
},
earthshakerOnYou: {
en: 'Earth Shaker on YOU',
de: 'Erdstoß auf DIR',
fr: 'Marque de terre sur VOUS',
ja: '自分にアースシェイカー',
cn: '大地摇动点名',
ko: '어스징 대상자',
},
wakeUp: {
en: 'WAKE UP',
de: 'AUFWACHEN',
fr: 'RÉVEILLES-TOI',
ja: '目を覚めて!',
cn: '醒醒!动一动!!',
ko: '강제 퇴장 7분 전',
},
closeTethersWithPlayer: {
en: 'Close Tethers (${player})',
de: 'Nahe Verbindungen (${player})',
fr: 'Liens proches avec (${player})',
ja: '(${player})に近づく',
cn: '靠近连线 (${player})',
ko: '상대와 가까이 붙기 (${player})',
},
farTethersWithPlayer: {
en: 'Far Tethers (${player})',
de: 'Entfernte Verbindungen (${player})',
fr: 'Liens éloignés avec (${player})',
ja: ' (${player})から離れる',
cn: '远离连线 (${player})',
ko: '상대와 떨어지기 (${player})',
},
unknownTarget: {
en: '???',
de: '???',
fr: '???',
ja: '???',
cn: '???',
ko: '???',
},
north: {
en: 'North',
de: 'Norden',
fr: 'Nord',
ja: '北',
cn: '北',
ko: '북쪽',
},
south: {
en: 'South',
de: 'Süden',
fr: 'Sud',
ja: '南',
cn: '南',
ko: '남쪽',
},
east: {
en: 'East',
de: 'Osten',
fr: 'Est',
ja: '東',
cn: '东',
ko: '동쪽',
},
west: {
en: 'West',
de: 'Westen',
fr: 'Ouest',
ja: '西',
cn: '西', | fr: 'nord-ouest',
ja: '北西',
cn: '西北',
ko: '북서',
},
northeast: {
en: 'Northeast',
de: 'Nordosten',
fr: 'nord-est',
ja: '北東',
cn: '东北',
ko: '북동',
},
southwest: {
en: 'Southwest',
de: 'Südwesten',
fr: 'sud-ouest',
ja: '南西',
cn: '西南',
ko: '남서',
},
southeast: {
en: 'Southeast',
de: 'Südosten',
fr: 'sud-est',
ja: '南東',
cn: '东南',
ko: '남동',
},
dirN: {
en: 'N',
de: 'N',
fr: 'N',
ja: '北',
cn: '北',
ko: '북쪽',
},
dirS: {
en: 'S',
de: 'S',
fr: 'S',
ja: '南',
cn: '南',
ko: '남쪽',
},
dirE: {
en: 'E',
de: 'O',
fr: 'E',
ja: '東',
cn: '东',
ko: '동쪽',
},
dirW: {
en: 'W',
de: 'W',
fr: 'O',
ja: '西',
cn: '西',
ko: '서쪽',
},
dirNW: {
en: 'NW',
de: 'NW',
fr: 'NO',
ja: '北西',
cn: '西北',
ko: '북서',
},
dirNE: {
en: 'NE',
de: 'NO',
fr: 'NE',
ja: '北東',
cn: '东北',
ko: '북동',
},
dirSW: {
en: 'SW',
de: 'SW',
fr: 'SO',
ja: '南西',
cn: '西南',
ko: '남서',
},
dirSE: {
en: 'SE',
de: 'SO',
fr: 'SE',
ja: '南東',
cn: '东南',
ko: '남동',
},
// Literal numbers.
num0: numberToOutputString(0),
num1: numberToOutputString(1),
num2: numberToOutputString(2),
num3: numberToOutputString(3),
num4: numberToOutputString(4),
num5: numberToOutputString(5),
num6: numberToOutputString(6),
num7: numberToOutputString(7),
num8: numberToOutputString(8),
num9: numberToOutputString(9),
});
export default Outputs; | ko: '서쪽',
},
northwest: {
en: 'Northwest',
de: 'Nordwesten', | random_line_split |
outputs.js | // TODO: make this a static class with static members
// this requires babel-eslint or being patient.
// Throw exceptions on invalid property names to catch easy typos.
class | {
constructor(obj) {
this.obj = obj;
if ('toJSON' in obj)
throw new Error('Cannot have toJSON property.');
return new Proxy(this, {
set(target, property, value) {
throw new Error('Cannot set readonly object.');
},
get(target, name) {
if (name === 'toJSON')
return JSON.stringify(obj);
if (name in target.obj)
return target.obj[name];
throw new Error(`Unknown property ${name}`);
},
});
}
}
// Output strings for now require a field for every language, so this is a
// helper function to generate one for literal numbers.
const numberToOutputString = (n) => {
const str = n.toString();
return {
en: str,
de: str,
fr: str,
ja: str,
cn: str,
ko: str,
};
};
// General guidelines:
// * property names should closely match English text
// * use OnPlayer suffix for things with `${player}`
// * use OnTarget suffix for things with `${name}`
// * any other parameters (of which there are none, currently) should use consistent suffixes.
// * the value of each property should be a single object with localized keys
const Outputs = new ThrowOnInvalidProxy({
aoe: {
en: 'aoe',
de: 'AoE',
fr: 'AoE',
ja: 'AoE',
cn: 'AoE',
ko: '전체 공격',
},
bigAoe: {
en: 'big aoe!',
de: 'Große AoE!',
fr: 'Grosse AoE !',
ja: '大ダメージAoE',
cn: '大AoE伤害!',
ko: '강한 전체 공격!',
},
tankBuster: {
en: 'Tank Buster',
de: 'Tank buster',
fr: 'Tank buster',
ja: 'タンクバスター',
cn: '坦克死刑',
ko: '탱버',
},
miniBuster: {
en: 'Mini Buster',
de: 'Kleiner Tankbuster',
fr: 'Mini Buster',
ja: 'ミニバスター',
cn: '小死刑',
ko: '약한 탱버',
},
tankBusterOnPlayer: {
en: 'Tank Buster on ${player}',
de: 'Tank buster auf ${player}',
fr: 'Tank buster sur ${player}',
ja: '${player}にタンクバスター',
cn: '死刑 点 ${player}',
ko: '"${player}" 탱버',
},
tankBusterOnYou: {
en: 'Tank Buster on YOU',
de: 'Tank buster auf DIR',
fr: 'Tank buster sur VOUS',
ja: '自分にタンクバスター',
cn: '死刑点名',
ko: '탱버 대상자',
},
// when there are multiple tankbusters going out
tankBusters: {
en: 'Tank Busters',
de: 'Tank buster',
fr: 'Tank busters',
ja: 'タンクバスター',
cn: '坦克死刑',
ko: '탱버',
},
tankCleave: {
en: 'Tank cleave',
de: 'Tank Cleave',
fr: 'Tank cleave',
ja: '前方範囲攻撃',
cn: '顺劈',
ko: '광역 탱버',
},
avoidTankCleave: {
en: 'Avoid tank cleave',
de: 'Tank Cleave ausweichen',
fr: 'Évitez le tank cleave',
ja: '前方範囲攻撃を避け',
cn: '远离顺劈',
ko: '광역 탱버 피하기',
},
tankCleaveOnYou: {
en: 'Tank cleave on YOU',
de: 'Tank Cleave aud DIR',
fr: 'Tank cleave sur VOUS',
ja: '自分に前方範囲攻撃',
cn: '顺劈点名',
ko: '나에게 광역 탱버',
},
tankSwap: {
en: 'Tank Swap!',
de: 'Tankwechsel!',
fr: 'Tank swap !',
ja: 'タンクスイッチ!',
cn: '换T!',
ko: '탱 교대',
},
spread: {
en: 'Spread',
de: 'Verteilen',
fr: 'Dispersez-vous',
ja: '散開',
cn: '分散',
ko: '산개',
},
stackMarker: {
// for stack marker situations
en: 'Stack',
de: 'Sammeln',
fr: 'Packez-vous',
ja: '頭割り',
cn: '分摊',
ko: '쉐어뎀',
},
getTogether: {
// for getting together without stack marker
en: 'Stack',
de: 'Sammeln',
fr: 'Packez-vous',
ja: '集合',
cn: '集合',
ko: '쉐어뎀',
},
stackOnYou: {
en: 'Stack on YOU',
de: 'Auf DIR sammeln',
fr: 'Package sur VOUS',
ja: '自分に集合',
cn: '集合点名',
ko: '쉐어징 대상자',
},
stackOnPlayer: {
en: 'Stack on ${player}',
de: 'Auf ${player} sammeln',
fr: 'Packez-vous sur ${player}',
ja: '${player}に集合',
cn: '靠近 ${player}集合',
ko: '"${player}" 쉐어징',
},
stackMiddle: {
en: 'Stack in middle',
de: 'In der Mitte sammeln',
fr: 'Packez-vous au milieu',
ja: '中央で集合',
cn: '中间集合',
ko: '중앙에서 모이기',
},
doritoStack: {
en: 'Dorito Stack',
de: 'Mit Marker sammeln',
fr: 'Packez les marquages',
ja: 'マーカー付けた人と集合',
cn: '点名集合',
ko: '징끼리 모이기',
},
spreadThenStack: {
en: 'Spread => Stack',
de: 'Verteilen => Sammeln',
fr: 'Dispersion => Package',
ja: '散開 => 集合',
cn: '分散 => 集合',
ko: '산개 => 집합',
},
stackThenSpread: {
en: 'Stack => Spread',
de: 'Sammeln => Verteilen',
fr: 'Package => Dispersion',
ja: 'スタック => 散開',
cn: '集合 => 分散',
ko: '집합 => 산개',
},
knockback: {
en: 'Knockback',
de: 'Rückstoß',
fr: 'Poussée',
ja: 'ノックバック',
cn: '击退',
ko: '넉백',
},
knockbackOnYou: {
en: 'Knockback on YOU',
de: 'Rückstoß auf DIR',
fr: 'Poussée sur VOUS',
ja: '自分にノックバック',
cn: '击退点名',
ko: '넉백징 대상자',
},
knockbackOnPlayer: {
en: 'Knockback on ${player}',
de: 'Rückstoß auf ${player}',
fr: 'Poussée sur ${player}',
ja: '${player}にノックバック',
cn: '击退点名${player}',
ko: '"${player}" 넉백징',
},
lookTowardsBoss: {
en: 'Look Towards Boss',
de: 'Anschauen Boss',
fr: 'Regardez le boss',
ja: 'ボスを見る',
cn: '面向Boss',
ko: '쳐다보기',
},
lookAway: {
en: 'Look Away',
de: 'Wegschauen',
fr: 'Regardez ailleurs',
ja: 'ボスを見ない',
cn: '背对Boss',
ko: '뒤돌기',
},
lookAwayFromPlayer: {
en: 'Look Away from ${player}',
de: 'Schau weg von ${player}',
fr: 'Ne regardez pas ${player}',
ja: '${player}を見ない',
cn: '背对${player}',
ko: '${player}에게서 뒤돌기',
},
lookAwayFromTarget: {
en: 'Look Away from ${name}',
de: 'Schau weg von ${name}',
fr: 'Ne regardez pas ${name}',
ja: '${name}を見ない',
cn: '背对${name}',
ko: '${name}에게서 뒤돌기',
},
getBehind: {
en: 'Get Behind',
de: 'Hinter ihn',
fr: 'Passez derrière',
ja: '背面へ',
cn: '去背后',
ko: '보스 뒤로',
},
goFrontOrSides: {
en: 'Go Front / Sides',
de: 'Gehe nach Vorne/ zu den Seiten',
fr: 'Allez Devant / Côtés',
ja: '前/横へ',
cn: '去前侧方',
ko: '보스 후방 피하기',
},
goFront: {
en: 'Go Front',
de: 'Geh nach vorn',
fr: 'Allez Devant',
ja: '前へ',
cn: '去前面',
},
// getUnder is used when you have to get into the bosses hitbox
getUnder: {
en: 'Get Under',
de: 'Unter ihn',
fr: 'En dessous',
ja: 'ボスと貼り付く',
cn: '去脚下',
ko: '보스 아래로',
},
// in is more like "get close but maybe even melee range is fine"
in: {
en: 'In',
de: 'Rein',
fr: 'Intérieur',
ja: '中へ',
cn: '靠近',
ko: '안으로',
},
// out means get far away
out: {
en: 'Out',
de: 'Raus',
fr: 'Exterieur',
ja: '外へ',
cn: '远离',
ko: '밖으로',
},
outOfMelee: {
en: 'Out of melee',
de: 'Raus aus Nahkampf',
fr: 'Hors de la mêlée',
ja: '近接最大レンジ',
cn: '近战最远距离回避',
ko: '근접범위 밖으로',
},
inThenOut: {
en: 'In, then out',
de: 'Rein, dann raus',
fr: 'Intérieur, puis extérieur',
ja: '中 => 外',
cn: '先靠近,再远离',
ko: '안으로 => 밖으로',
},
outThenIn: {
en: 'Out, then in',
de: 'Raus, dann rein',
fr: 'Extérieur, puis intérieur',
ja: '外 => 中',
cn: '先远离,再靠近',
ko: '밖으로 => 안으로',
},
backThenFront: {
en: 'Back Then Front',
de: 'Nach Hinten, danach nach Vorne',
fr: 'Derrière puis devant',
ja: '後ろ => 前',
cn: '后 => 前',
ko: '뒤로 => 앞으로',
},
frontThenBack: {
en: 'Front Then Back',
de: 'Nach Vorne, danach nach Hinten',
fr: 'Devant puis derrière',
ja: '前 => 後ろ',
cn: '前 => 后',
ko: '앞으로 => 뒤로',
},
goIntoMiddle: {
en: 'go into middle',
de: 'in die Mitte gehen',
fr: 'Allez au milieu',
ja: '中へ',
cn: '去中间',
ko: '중앙으로',
},
right: {
en: 'Right',
de: 'Rechts',
fr: 'Droite ',
ja: '右へ',
cn: '右',
ko: '오른쪽',
},
left: {
en: 'Left',
de: 'Links',
fr: 'Gauche',
ja: '左へ',
cn: '左',
ko: '왼쪽',
},
getLeftAndWest: {
en: '<= Get Left/West',
de: '<= Nach Links/Westen',
fr: '<= Allez à Gauche/Ouest',
ja: '<= 左/西へ',
cn: '<= 去左/西边',
ko: '<= 왼쪽으로',
},
getRightAndEast: {
en: 'Get Right/East =>',
de: 'Nach Rechts/Osten =>',
fr: 'Allez à Droite/Est =>',
ja: '右/東へ =>',
cn: '去右/东边 =>',
ko: '오른쪽으로 =>',
},
goFrontBack: {
en: 'Go Front/Back',
de: 'Geh nach Vorne/Hinten',
fr: 'Allez Devant/Derrière',
ja: '縦へ',
cn: '去前后',
ko: '앞/뒤로',
},
sides: {
en: 'Sides',
de: 'Seiten',
fr: 'Côtés',
ja: '横へ',
cn: '去侧面',
ko: '양옆으로',
},
middle: {
en: 'Middle',
de: 'Mitte',
fr: 'Milieu',
ja: '中へ',
cn: '中间',
ko: '중앙',
},
// killAdds is used for adds that will always be available
killAdds: {
en: 'Kill adds',
de: 'Adds besiegen',
fr: 'Tuez les adds',
ja: '雑魚を処理',
cn: '击杀小怪',
ko: '쫄 잡기',
},
// killExtraAdd is used for adds that appear if a mechanic was not played correctly
killExtraAdd: {
en: 'Kill Extra Add',
de: 'Add besiegen',
fr: 'Tuez l\'add',
ja: '雑魚を倒す',
cn: '击杀小怪',
ko: '쫄 잡기',
},
awayFromFront: {
en: 'Away From Front',
de: 'Weg von Vorne',
fr: 'Éloignez-vous du devant',
ja: '前方から離れる',
cn: '远离正面',
ko: '보스 전방 피하기',
},
sleepTarget: {
en: 'Sleep ${name}',
de: 'Schlaf auf ${name}',
fr: 'Sommeil sur ${name}',
ja: '${name} にスリプル',
cn: '催眠 ${name}',
ko: '${name} 슬리플',
},
stunTarget: {
en: 'Stun ${name}',
de: 'Betäubung auf ${name}',
fr: 'Étourdissement sur ${name}',
ja: '${name} にスタン',
cn: '眩晕 ${name}',
ko: '${name}기절',
},
interruptTarget: {
en: 'interrupt ${name}',
de: 'unterbreche ${name}',
fr: 'Interrompez ${name}',
ja: '${name} に沈黙',
cn: '打断${name}',
ko: '${name}기술 시전 끊기',
},
preyOnYou: {
en: 'Prey on YOU',
de: 'Marker auf DIR',
fr: 'Marquage sur VOUS',
ja: '自分に捕食',
cn: '掠食点名',
ko: '홍옥징 대상자',
},
preyOnPlayer: {
en: 'Prey on ${player}',
de: 'Marker auf ${player}',
fr: 'Marquage sur ${player}',
ja: '${player}に捕食',
cn: '掠食点名${player}',
ko: '"${player}" 홍옥징',
},
awayFromGroup: {
en: 'Away from Group',
de: 'Weg von der Gruppe',
fr: 'Éloignez-vous du groupe',
ja: '外へ',
cn: '远离人群',
ko: '다른 사람들이랑 떨어지기',
},
awayFromPlayer: {
en: 'Away from ${player}',
de: 'Weg von ${player}',
fr: 'Éloignez-vous de ${player}',
ja: '${player}から離れ',
cn: '远离${player}',
ko: '"${player}"에서 멀어지기',
},
meteorOnYou: {
en: 'Meteor on YOU',
de: 'Meteor auf DIR',
fr: 'Météore sur VOUS',
ja: '自分にメテオ',
cn: '陨石点名',
ko: '나에게 메테오징',
},
stopMoving: {
en: 'Stop Moving!',
de: 'Bewegung stoppen!',
fr: 'Ne bougez pas !',
ja: '移動禁止!',
cn: '停止移动!',
ko: '이동 멈추기!',
},
stopEverything: {
en: 'Stop Everything!',
de: 'Stoppe Alles!',
fr: 'Arrêtez TOUT !',
ja: '行動禁止!',
cn: '停止行动!',
ko: '행동 멈추기!',
},
moveAway: {
// move away to dodge aoes
en: 'Move!',
de: 'Bewegen!',
fr: 'Bougez !',
ja: '避けて!',
cn: '快躲开!',
ko: '이동하기!',
},
moveAround: {
// move around (e.g. jumping) to avoid being frozen
en: 'Move!',
de: 'Bewegen!',
fr: 'Bougez !',
ja: '動く!',
cn: '快动!',
ko: '움직이기!',
},
breakChains: {
en: 'Break chains',
de: 'Kette zerbrechen',
fr: 'Brisez les chaines',
ja: '線を切る',
cn: '切断连线',
ko: '선 끊기',
},
moveChainsTogether: {
en: 'Move chains together',
de: 'Ketten zusammen bewegen',
fr: 'Bougez les chaines ensemble',
ja: '線同士一緒に移動',
cn: '连线一起移动',
ko: '선 붙어서 같이 움직이기',
},
earthshakerOnYou: {
en: 'Earth Shaker on YOU',
de: 'Erdstoß auf DIR',
fr: 'Marque de terre sur VOUS',
ja: '自分にアースシェイカー',
cn: '大地摇动点名',
ko: '어스징 대상자',
},
wakeUp: {
en: 'WAKE UP',
de: 'AUFWACHEN',
fr: 'RÉVEILLES-TOI',
ja: '目を覚めて!',
cn: '醒醒!动一动!!',
ko: '강제 퇴장 7분 전',
},
closeTethersWithPlayer: {
en: 'Close Tethers (${player})',
de: 'Nahe Verbindungen (${player})',
fr: 'Liens proches avec (${player})',
ja: '(${player})に近づく',
cn: '靠近连线 (${player})',
ko: '상대와 가까이 붙기 (${player})',
},
farTethersWithPlayer: {
en: 'Far Tethers (${player})',
de: 'Entfernte Verbindungen (${player})',
fr: 'Liens éloignés avec (${player})',
ja: ' (${player})から離れる',
cn: '远离连线 (${player})',
ko: '상대와 떨어지기 (${player})',
},
unknownTarget: {
en: '???',
de: '???',
fr: '???',
ja: '???',
cn: '???',
ko: '???',
},
north: {
en: 'North',
de: 'Norden',
fr: 'Nord',
ja: '北',
cn: '北',
ko: '북쪽',
},
south: {
en: 'South',
de: 'Süden',
fr: 'Sud',
ja: '南',
cn: '南',
ko: '남쪽',
},
east: {
en: 'East',
de: 'Osten',
fr: 'Est',
ja: '東',
cn: '东',
ko: '동쪽',
},
west: {
en: 'West',
de: 'Westen',
fr: 'Ouest',
ja: '西',
cn: '西',
ko: '서쪽',
},
northwest: {
en: 'Northwest',
de: 'Nordwesten',
fr: 'nord-ouest',
ja: '北西',
cn: '西北',
ko: '북서',
},
northeast: {
en: 'Northeast',
de: 'Nordosten',
fr: 'nord-est',
ja: '北東',
cn: '东北',
ko: '북동',
},
southwest: {
en: 'Southwest',
de: 'Südwesten',
fr: 'sud-ouest',
ja: '南西',
cn: '西南',
ko: '남서',
},
southeast: {
en: 'Southeast',
de: 'Südosten',
fr: 'sud-est',
ja: '南東',
cn: '东南',
ko: '남동',
},
dirN: {
en: 'N',
de: 'N',
fr: 'N',
ja: '北',
cn: '北',
ko: '북쪽',
},
dirS: {
en: 'S',
de: 'S',
fr: 'S',
ja: '南',
cn: '南',
ko: '남쪽',
},
dirE: {
en: 'E',
de: 'O',
fr: 'E',
ja: '東',
cn: '东',
ko: '동쪽',
},
dirW: {
en: 'W',
de: 'W',
fr: 'O',
ja: '西',
cn: '西',
ko: '서쪽',
},
dirNW: {
en: 'NW',
de: 'NW',
fr: 'NO',
ja: '北西',
cn: '西北',
ko: '북서',
},
dirNE: {
en: 'NE',
de: 'NO',
fr: 'NE',
ja: '北東',
cn: '东北',
ko: '북동',
},
dirSW: {
en: 'SW',
de: 'SW',
fr: 'SO',
ja: '南西',
cn: '西南',
ko: '남서',
},
dirSE: {
en: 'SE',
de: 'SO',
fr: 'SE',
ja: '南東',
cn: '东南',
ko: '남동',
},
// Literal numbers.
num0: numberToOutputString(0),
num1: numberToOutputString(1),
num2: numberToOutputString(2),
num3: numberToOutputString(3),
num4: numberToOutputString(4),
num5: numberToOutputString(5),
num6: numberToOutputString(6),
num7: numberToOutputString(7),
num8: numberToOutputString(8),
num9: numberToOutputString(9),
});
export default Outputs;
| ThrowOnInvalidProxy | identifier_name |
lib.rs | //! The Enso parser. Parsing is a multi-stage process:
//!
//! # Lexing.
//! First, the source code is feed to [`lexer::Lexer`], which consumes it and outputs a stream of
//! [`Token`]. Tokens are chunks of the input with a generic description attached, like "operator",
//! or "identifier".
//!
//! # Building macro registry.
//! Macros in Enso are a very powerful mechanism and are used to transform group of tokens into
//! almost any statement. First, macros need to be discovered and registered. Currently, there is no
//! real macro discovery process, as there is no support for user-defined macros. Instead, there is
//! a set of hardcoded macros defined in the compiler.
//!
//! Each macro defines one or more segments. Every segment starts with a predefined token and can
//! contain any number of other tokens. For example, the macro `if ... then ... else ...` contains
//! three segments. Macros can also accept prefix tokens, a set of tokens on the left of the first
//! segment. A good example is the lambda macro `... -> ...`.
//!
//! In this step, a [`MacroMatchTree`] is built. Basically, it is a map from the possible next
//! segment name to information of what other segments are required and what is the macro definition
//! in case these segments were found. For example, let's consider two macros: `if ... then ...`,
//! and `if ... then ... else ...`. In such a case, the macro registry will contain only one entry,
//! "if", and two sets of possible resolution paths: ["then"], and ["then", "else"], each associated
//! with the corresponding macro definition.
//!
//! # Splitting the token stream by the macro segments.
//! The input token stream is being iterated and is being split based on the segments of the
//! registered macros. For example, for the input `if a b then c d else e f`, the token stream will
//! be split into three segments, `a b`, `c d`, and `e f`, which will be associated with the
//! `if ... then ... else ...` macro definition.
//!
//! The splitting process is hierarchical. It means that a new macro can start being resolved during
//! resolution of a parent macro. For example, `if if a then b then c else d` is a correct
//! expression. After finding the first `if` token, the token stream will be split. The next `if`
//! token starts a new token stream splitting. The first `then` token belongs to the nested macro,
//! however, as soon as the resolver sees the second `then` token, it will consider the nested macro
//! to be finished, and will come back to parent macro resolution.
//!
//! # Resolving right-hand-side patterns of macro segments.
//! In the next steps, each macro is being analyzed, started from the most nested ones. For each
//! macro, the [`Pattern`] of last segment is being run to check which tokens belong to that macro,
//! and which tokens should be transferred to parent macro definition. For example, consider the
//! following code `process (read file) content-> print content`. The `(...)` is a macro with two
//! sections `(` and `)`. Let's mark the token splitting with `[` and `]` characters. The previous
//! macro resolution steps would output such split of the token stream:
//! `process [(read file][) content[-> print content]]`. In this step, the most inner macro will be
//! analyzed first. The pattern of the last segment of the inner macro (`->`) defines that it
//! consumes all tokens, so all the tokens `print content` are left as they are. Now, the resolution
//! moves to the parent macro. Its last segment starts with the `)` token, which pattern defines
//! that it does not consume any tokens, so all of its current tokens (`content[-> print content]]`)
//! are popped to a parent definition, forming `process [(read file][)] content[-> print content]`.
//!
//! Please note, that root of the expression is considered a special macro as well. It is done for
//! the algorithm unification purposes.
//!
//! # Resolving left-hand-side patterns of macro segments.
//! In this step, each macro is being analyzed, started from the most nested ones. For each macro,
//! the [`Pattern`] of the macro prefix is being run to check which tokens belong to the prefix of
//! the macro (in case the macro defines the prefix). In the example above, the macro `->` defines
//! complex prefix rules: if the token on the left of the arrow used no space, then only a single
//! token will be consumed. As a result of this step, the following token split will occur:
//! `[process [(read file][)] [content-> print content]`, which is exactly what we wanted.
//!
//! # Resolving patterns of macro segments.
//! In this step, all macro segment patterns are being resolved and errors are reported in case it
//! was not possible. If tokens in a segment match the segment pattern, they are sent to the
//! operator precedence resolver for final transformation.
//!
//! # Operator precedence resolution.
//! Each token stream sent to the operator resolver is processed by a modified Shunting Yard
//! algorithm, which handles such situations as multiple operators placed next to each other,
//! multiple identifiers placed next to each other, and also takes spacing into consideration in
//! order to implement spacing-aware precedence rules. After all segments are resolved, the macro
//! is being treated as a single token in one of the segments of the parent macro, and is being
//! processed by the operator precedence resolver as well. In the end, a single [`syntax::Tree`] is
//! produced, containing the parsed expression.
#![recursion_limit = "256"]
// === Features ===
#![allow(incomplete_features)]
#![feature(let_chains)]
#![feature(allocator_api)]
#![feature(exact_size_is_empty)]
#![feature(test)]
#![feature(specialization)]
#![feature(if_let_guard)]
#![feature(box_patterns)]
#![feature(option_get_or_insert_default)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![allow(clippy::option_map_unit_fn)]
#![allow(clippy::precedence)]
#![allow(dead_code)]
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
use crate::prelude::*;
// ==============
// === Export ===
// ==============
pub mod lexer;
pub mod macros;
pub mod metadata;
pub mod serialization;
pub mod source;
pub mod syntax;
/// Popular utilities, imported by most modules of this crate.
pub mod prelude {
pub use enso_prelude::serde_reexports::*;
pub use enso_prelude::*;
pub use enso_reflect as reflect;
pub use enso_reflect::Reflect;
pub use enso_types::traits::*;
pub use enso_types::unit2::Bytes;
/// Wraps return value for functions whose implementations don't handle all cases yet. When the
/// parser is complete, this type will be eliminated.
pub type WipResult<T> = Result<T, String>;
/// Return type for functions that will only fail in case of a bug in the implementation.
#[derive(Debug, Default)]
pub struct ParseResult<T> {
/// The result of the operation. If `internal_error` is set, this is a best-effort value
/// that cannot be assumed to be accurate; otherwise, it should be correct.
pub value: T,
/// Internal error encountered while computing this result.
pub internal_error: Option<String>,
}
impl<T> ParseResult<T> {
/// Return a new [`ParseResult`] whose value is the result of applying the given function to
/// the input's value, and whose `internal_error` field is the same as the input.
pub fn map<U, F>(self, f: F) -> ParseResult<U>
where F: FnOnce(T) -> U {
let ParseResult { value, internal_error } = self;
let value = f(value);
ParseResult { value, internal_error }
}
/// Panic if the result contains an internal error; otherwise, return the contained value.
pub fn unwrap(self) -> T {
assert_eq!(self.internal_error, None);
self.value
}
}
}
// ==============
// === Parser ===
// ==============
/// Enso parser. See the module documentation to learn more about how it works.
#[allow(missing_docs)]
#[derive(Debug)]
pub struct Parser {
pub macros: macros::resolver::MacroMap,
}
impl Parser {
/// Constructor.
pub fn new() -> Self {
let macros = macros::built_in::all();
Self { macros }
}
/// Main entry point.
pub fn run<'s>(&self, code: &'s str) -> syntax::Tree<'s> {
let tokens = lexer::run(code);
let resolver = macros::resolver::Resolver::new_statement();
let result = tokens.map(|tokens| resolver.run(&self.macros, tokens));
let value = result.value;
if let Some(error) = result.internal_error {
return value.with_error(format!("Internal error: {error}"));
}
value
}
}
impl Default for Parser {
fn default() -> Self {
Self::new()
}
}
// == Parsing helpers ==
/// Reinterpret an expression in a statement context (i.e. as a top level member of a block).
///
/// In statement context, an expression that has an assignment operator at its top level is
/// interpreted as a variable assignment or method definition.
fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
let mut left_offset = source::span::Offset::default();
if let Tree { variant: box Variant::Annotated(annotated), .. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::AnnotatedBuiltin(annotated), .. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::Documented(documented), .. } = &mut tree {
documented.expression = documented.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::TypeAnnotated(annotated), span } = tree {
let colon = annotated.operator;
let type_ = annotated.type_;
let variable = annotated.expression;
let mut tree = Tree::type_signature(variable, colon, type_);
tree.span.left_offset += span.left_offset;
return tree;
}
let tree_ = &mut tree;
let opr_app = match tree_ {
Tree { variant: box Variant::OprApp(opr_app), span } => {
left_offset += &span.left_offset;
opr_app
}
_ => return tree,
};
if let OprApp { lhs: Some(lhs), opr: Ok(opr), rhs } = opr_app && opr.properties.is_assignment() {
let (leftmost, args) = collect_arguments(lhs.clone());
if let Some(rhs) = rhs {
if let Variant::Ident(ident) = &*leftmost.variant && ident.token.variant.is_type {
// If the LHS is a type, this is a (destructuring) assignment.
let lhs = expression_to_pattern(mem::take(lhs));
let mut result = Tree::assignment(lhs, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
if args.is_empty() && !is_body_block(rhs) {
// If the LHS has no arguments, and there is a RHS, and the RHS is not a body block,
// this is a variable assignment.
let mut result = Tree::assignment(leftmost, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
if is_qualified_name(&leftmost) {
// If this is not a variable assignment, and the leftmost leaf of the `App` tree is
// a qualified name, this is a function definition.
let mut result = Tree::function(leftmost, args, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
tree
}
fn is_qualified_name(tree: &syntax::Tree) -> bool {
use syntax::tree::*;
match &*tree.variant {
Variant::Ident(_) => true,
Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) })
if matches!(&*rhs.variant, Variant::Ident(_)) && opr.properties.is_dot() =>
is_qualified_name(lhs),
_ => false,
}
}
fn expression_to_type(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast, .. }) => expression_to_type(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_type(body)), close),
box Variant::OprApp(OprApp { lhs, opr, rhs }) =>
Tree::opr_app(lhs.map(expression_to_type), opr, rhs.map(expression_to_type)),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_type(func), expression_to_type(arg)),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn expression_to_pattern(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast, .. }) => expression_to_pattern(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_pattern(body)), close),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_pattern(func), expression_to_pattern(arg)),
box Variant::TypeAnnotated(TypeAnnotated { expression, operator, type_ }) =>
Tree::type_annotated(expression_to_pattern(expression), operator, type_),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn collect_arguments(tree: syntax::Tree) -> (syntax::Tree, Vec<syntax::tree::ArgumentDefinition>) {
let mut args = vec![];
let tree = unroll_arguments(tree, &mut args);
args.reverse();
(tree, args)
}
fn collect_arguments_inclusive(tree: syntax::Tree) -> Vec<syntax::tree::ArgumentDefinition> {
let mut args = vec![];
let first = unroll_arguments(tree, &mut args);
args.push(parse_argument_definition(first));
args.reverse();
args
}
fn unroll_arguments<'s>(
mut tree: syntax::Tree<'s>,
args: &mut Vec<syntax::tree::ArgumentDefinition<'s>>,
) -> syntax::Tree<'s> {
while let Some(arg) = parse_argument_application(&mut tree) {
args.push(arg);
}
tree
}
/// Try to parse the expression as an application of a function to an `ArgumentDefinition`. If it
/// matches, replace the expression with its LHS, and return the `ArgumentDefinition` node.
pub fn parse_argument_application<'s>(
expression: &'_ mut syntax::Tree<'s>,
) -> Option<syntax::tree::ArgumentDefinition<'s>> {
use syntax::tree::*;
match &mut expression.variant {
box Variant::App(App { func, arg }) => {
let arg = parse_argument_definition(arg.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(arg)
}
box Variant::NamedApp(NamedApp { func, open, name, equals, arg, close }) => {
let open = mem::take(open);
let close = mem::take(close);
let equals = equals.clone();
let pattern = Tree::ident(name.clone());
let open2 = default();
let suspension = default();
let close2 = default();
let type_ = default();
let default = Some(ArgumentDefault { equals, expression: arg.clone() });
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open,
open2,
pattern,
suspension,
default,
close2,
type_,
close,
})
}
box Variant::DefaultApp(DefaultApp { func, default: default_ }) => {
let pattern = Tree::ident(default_.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open: default(),
open2: default(),
suspension: default(),
pattern,
type_: default(),
close2: default(),
default: default(),
close: default(),
})
}
_ => None,
}
}
/// Interpret the expression as an element of an argument definition sequence.
pub fn | (mut pattern: syntax::Tree) -> syntax::tree::ArgumentDefinition {
use syntax::tree::*;
let mut open1 = default();
let mut close1 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open1 = open;
close1 = close;
pattern = body;
}
let mut default_ = default();
if let Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_assignment() {
let left_offset = pattern.span.left_offset;
default_ = Some(ArgumentDefault { equals: opr.clone(), expression: rhs.clone() });
pattern = lhs.clone();
pattern.span.left_offset += left_offset;
}
let mut open2 = default();
let mut close2 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open2 = open;
close2 = close;
pattern = body;
}
let mut type__ = default();
if let box Variant::TypeAnnotated(TypeAnnotated { mut expression, operator, type_ }) =
pattern.variant
{
expression.span.left_offset += pattern.span.left_offset;
type__ = Some(ArgumentType { operator, type_ });
pattern = expression;
}
let mut suspension = default();
if let box Variant::TemplateFunction(TemplateFunction { mut ast, .. }) = pattern.variant {
ast.span.left_offset += pattern.span.left_offset;
pattern = ast;
}
if let Variant::UnaryOprApp(UnaryOprApp { opr, rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_suspension() {
let mut opr = opr.clone();
opr.left_offset += pattern.span.left_offset;
suspension = Some(opr);
pattern = rhs.clone();
}
let pattern = expression_to_pattern(pattern);
let open = open1;
let close = close1;
let type_ = type__;
ArgumentDefinition { open, open2, pattern, suspension, default: default_, close2, type_, close }
}
/// Return whether the expression is a body block.
fn is_body_block(expression: &syntax::tree::Tree<'_>) -> bool {
matches!(&*expression.variant, syntax::tree::Variant::BodyBlock { .. })
}
// ==================
// === Benchmarks ===
// ==================
#[cfg(test)]
mod benches {
use super::*;
extern crate test;
use test::Bencher;
#[bench]
fn bench_parsing_type_defs(bencher: &mut Bencher) {
let reps = 1_000;
let str = "type Option a b c\n".repeat(reps);
let parser = Parser::new();
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_blocks(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 10_000;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let mut indent = 0u32;
for _ in 0..lines {
// Indent:
// 1/8 chance of increasing.
// 1/8 chance of decreasing.
// 3/4 chance of leaving unchanged.
match rng.gen_range(0..8) {
0u32 => indent = indent.saturating_sub(1),
1 => indent += 1,
_ => (),
}
for _ in 0..indent {
str.push(' ');
}
// 1/4 chance of operator-block line syntax.
if rng.gen_range(0..4) == 0u32 {
str.push_str("* ");
}
str.push('x');
// Equal chance of the next line being interpreted as a body block or argument block
// line, if it is indented and doesn't match the operator-block syntax.
// The `=` operator is chosen to exercise the expression-to-statement conversion path.
if rng.gen() {
str.push_str(" =");
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_expressions(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 100;
let avg_group_len = 20;
let avg_groups_per_line = 20;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let normal = rand_distr::StandardNormal;
for _ in 0..lines {
let operators = ['=', '+', '-', '*', ':'];
let groups: f64 = normal.sample(&mut rng);
let groups = (groups * avg_groups_per_line as f64) as usize;
for _ in 0..groups {
let len: f64 = normal.sample(&mut rng);
let len = (len * avg_group_len as f64) as usize;
str.push('x');
for _ in 0..len {
let i = rng.gen_range(0..operators.len());
str.push(operators[i]);
str.push('x');
}
str.push(' ');
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
}
| parse_argument_definition | identifier_name |
lib.rs | //! The Enso parser. Parsing is a multi-stage process:
//!
//! # Lexing.
//! First, the source code is feed to [`lexer::Lexer`], which consumes it and outputs a stream of
//! [`Token`]. Tokens are chunks of the input with a generic description attached, like "operator",
//! or "identifier".
//!
//! # Building macro registry.
//! Macros in Enso are a very powerful mechanism and are used to transform group of tokens into
//! almost any statement. First, macros need to be discovered and registered. Currently, there is no
//! real macro discovery process, as there is no support for user-defined macros. Instead, there is
//! a set of hardcoded macros defined in the compiler.
//!
//! Each macro defines one or more segments. Every segment starts with a predefined token and can
//! contain any number of other tokens. For example, the macro `if ... then ... else ...` contains
//! three segments. Macros can also accept prefix tokens, a set of tokens on the left of the first
//! segment. A good example is the lambda macro `... -> ...`.
//!
//! In this step, a [`MacroMatchTree`] is built. Basically, it is a map from the possible next
//! segment name to information of what other segments are required and what is the macro definition | //! # Splitting the token stream by the macro segments.
//! The input token stream is being iterated and is being split based on the segments of the
//! registered macros. For example, for the input `if a b then c d else e f`, the token stream will
//! be split into three segments, `a b`, `c d`, and `e f`, which will be associated with the
//! `if ... then ... else ...` macro definition.
//!
//! The splitting process is hierarchical. It means that a new macro can start being resolved during
//! resolution of a parent macro. For example, `if if a then b then c else d` is a correct
//! expression. After finding the first `if` token, the token stream will be split. The next `if`
//! token starts a new token stream splitting. The first `then` token belongs to the nested macro,
//! however, as soon as the resolver sees the second `then` token, it will consider the nested macro
//! to be finished, and will come back to parent macro resolution.
//!
//! # Resolving right-hand-side patterns of macro segments.
//! In the next steps, each macro is being analyzed, started from the most nested ones. For each
//! macro, the [`Pattern`] of last segment is being run to check which tokens belong to that macro,
//! and which tokens should be transferred to parent macro definition. For example, consider the
//! following code `process (read file) content-> print content`. The `(...)` is a macro with two
//! sections `(` and `)`. Let's mark the token splitting with `[` and `]` characters. The previous
//! macro resolution steps would output such split of the token stream:
//! `process [(read file][) content[-> print content]]`. In this step, the most inner macro will be
//! analyzed first. The pattern of the last segment of the inner macro (`->`) defines that it
//! consumes all tokens, so all the tokens `print content` are left as they are. Now, the resolution
//! moves to the parent macro. Its last segment starts with the `)` token, which pattern defines
//! that it does not consume any tokens, so all of its current tokens (`content[-> print content]]`)
//! are popped to a parent definition, forming `process [(read file][)] content[-> print content]`.
//!
//! Please note, that root of the expression is considered a special macro as well. It is done for
//! the algorithm unification purposes.
//!
//! # Resolving left-hand-side patterns of macro segments.
//! In this step, each macro is being analyzed, started from the most nested ones. For each macro,
//! the [`Pattern`] of the macro prefix is being run to check which tokens belong to the prefix of
//! the macro (in case the macro defines the prefix). In the example above, the macro `->` defines
//! complex prefix rules: if the token on the left of the arrow used no space, then only a single
//! token will be consumed. As a result of this step, the following token split will occur:
//! `[process [(read file][)] [content-> print content]`, which is exactly what we wanted.
//!
//! # Resolving patterns of macro segments.
//! In this step, all macro segment patterns are being resolved and errors are reported in case it
//! was not possible. If tokens in a segment match the segment pattern, they are sent to the
//! operator precedence resolver for final transformation.
//!
//! # Operator precedence resolution.
//! Each token stream sent to the operator resolver is processed by a modified Shunting Yard
//! algorithm, which handles such situations as multiple operators placed next to each other,
//! multiple identifiers placed next to each other, and also takes spacing into consideration in
//! order to implement spacing-aware precedence rules. After all segments are resolved, the macro
//! is being treated as a single token in one of the segments of the parent macro, and is being
//! processed by the operator precedence resolver as well. In the end, a single [`syntax::Tree`] is
//! produced, containing the parsed expression.
#![recursion_limit = "256"]
// === Features ===
#![allow(incomplete_features)]
#![feature(let_chains)]
#![feature(allocator_api)]
#![feature(exact_size_is_empty)]
#![feature(test)]
#![feature(specialization)]
#![feature(if_let_guard)]
#![feature(box_patterns)]
#![feature(option_get_or_insert_default)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![allow(clippy::option_map_unit_fn)]
#![allow(clippy::precedence)]
#![allow(dead_code)]
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
use crate::prelude::*;
// ==============
// === Export ===
// ==============
pub mod lexer;
pub mod macros;
pub mod metadata;
pub mod serialization;
pub mod source;
pub mod syntax;
/// Popular utilities, imported by most modules of this crate.
pub mod prelude {
pub use enso_prelude::serde_reexports::*;
pub use enso_prelude::*;
pub use enso_reflect as reflect;
pub use enso_reflect::Reflect;
pub use enso_types::traits::*;
pub use enso_types::unit2::Bytes;
/// Wraps return value for functions whose implementations don't handle all cases yet. When the
/// parser is complete, this type will be eliminated.
pub type WipResult<T> = Result<T, String>;
/// Return type for functions that will only fail in case of a bug in the implementation.
#[derive(Debug, Default)]
pub struct ParseResult<T> {
/// The result of the operation. If `internal_error` is set, this is a best-effort value
/// that cannot be assumed to be accurate; otherwise, it should be correct.
pub value: T,
/// Internal error encountered while computing this result.
pub internal_error: Option<String>,
}
impl<T> ParseResult<T> {
/// Return a new [`ParseResult`] whose value is the result of applying the given function to
/// the input's value, and whose `internal_error` field is the same as the input.
pub fn map<U, F>(self, f: F) -> ParseResult<U>
where F: FnOnce(T) -> U {
let ParseResult { value, internal_error } = self;
let value = f(value);
ParseResult { value, internal_error }
}
/// Panic if the result contains an internal error; otherwise, return the contained value.
pub fn unwrap(self) -> T {
assert_eq!(self.internal_error, None);
self.value
}
}
}
// ==============
// === Parser ===
// ==============
/// Enso parser. See the module documentation to learn more about how it works.
#[allow(missing_docs)]
#[derive(Debug)]
pub struct Parser {
pub macros: macros::resolver::MacroMap,
}
impl Parser {
/// Constructor.
pub fn new() -> Self {
let macros = macros::built_in::all();
Self { macros }
}
/// Main entry point.
pub fn run<'s>(&self, code: &'s str) -> syntax::Tree<'s> {
let tokens = lexer::run(code);
let resolver = macros::resolver::Resolver::new_statement();
let result = tokens.map(|tokens| resolver.run(&self.macros, tokens));
let value = result.value;
if let Some(error) = result.internal_error {
return value.with_error(format!("Internal error: {error}"));
}
value
}
}
impl Default for Parser {
fn default() -> Self {
Self::new()
}
}
// == Parsing helpers ==
/// Reinterpret an expression in a statement context (i.e. as a top level member of a block).
///
/// In statement context, an expression that has an assignment operator at its top level is
/// interpreted as a variable assignment or method definition.
fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
let mut left_offset = source::span::Offset::default();
if let Tree { variant: box Variant::Annotated(annotated), .. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::AnnotatedBuiltin(annotated), .. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::Documented(documented), .. } = &mut tree {
documented.expression = documented.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::TypeAnnotated(annotated), span } = tree {
let colon = annotated.operator;
let type_ = annotated.type_;
let variable = annotated.expression;
let mut tree = Tree::type_signature(variable, colon, type_);
tree.span.left_offset += span.left_offset;
return tree;
}
let tree_ = &mut tree;
let opr_app = match tree_ {
Tree { variant: box Variant::OprApp(opr_app), span } => {
left_offset += &span.left_offset;
opr_app
}
_ => return tree,
};
if let OprApp { lhs: Some(lhs), opr: Ok(opr), rhs } = opr_app && opr.properties.is_assignment() {
let (leftmost, args) = collect_arguments(lhs.clone());
if let Some(rhs) = rhs {
if let Variant::Ident(ident) = &*leftmost.variant && ident.token.variant.is_type {
// If the LHS is a type, this is a (destructuring) assignment.
let lhs = expression_to_pattern(mem::take(lhs));
let mut result = Tree::assignment(lhs, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
if args.is_empty() && !is_body_block(rhs) {
// If the LHS has no arguments, and there is a RHS, and the RHS is not a body block,
// this is a variable assignment.
let mut result = Tree::assignment(leftmost, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
if is_qualified_name(&leftmost) {
// If this is not a variable assignment, and the leftmost leaf of the `App` tree is
// a qualified name, this is a function definition.
let mut result = Tree::function(leftmost, args, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
tree
}
fn is_qualified_name(tree: &syntax::Tree) -> bool {
use syntax::tree::*;
match &*tree.variant {
Variant::Ident(_) => true,
Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) })
if matches!(&*rhs.variant, Variant::Ident(_)) && opr.properties.is_dot() =>
is_qualified_name(lhs),
_ => false,
}
}
fn expression_to_type(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast, .. }) => expression_to_type(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_type(body)), close),
box Variant::OprApp(OprApp { lhs, opr, rhs }) =>
Tree::opr_app(lhs.map(expression_to_type), opr, rhs.map(expression_to_type)),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_type(func), expression_to_type(arg)),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn expression_to_pattern(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast, .. }) => expression_to_pattern(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_pattern(body)), close),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_pattern(func), expression_to_pattern(arg)),
box Variant::TypeAnnotated(TypeAnnotated { expression, operator, type_ }) =>
Tree::type_annotated(expression_to_pattern(expression), operator, type_),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn collect_arguments(tree: syntax::Tree) -> (syntax::Tree, Vec<syntax::tree::ArgumentDefinition>) {
let mut args = vec![];
let tree = unroll_arguments(tree, &mut args);
args.reverse();
(tree, args)
}
fn collect_arguments_inclusive(tree: syntax::Tree) -> Vec<syntax::tree::ArgumentDefinition> {
let mut args = vec![];
let first = unroll_arguments(tree, &mut args);
args.push(parse_argument_definition(first));
args.reverse();
args
}
fn unroll_arguments<'s>(
mut tree: syntax::Tree<'s>,
args: &mut Vec<syntax::tree::ArgumentDefinition<'s>>,
) -> syntax::Tree<'s> {
while let Some(arg) = parse_argument_application(&mut tree) {
args.push(arg);
}
tree
}
/// Try to parse the expression as an application of a function to an `ArgumentDefinition`. If it
/// matches, replace the expression with its LHS, and return the `ArgumentDefinition` node.
pub fn parse_argument_application<'s>(
expression: &'_ mut syntax::Tree<'s>,
) -> Option<syntax::tree::ArgumentDefinition<'s>> {
use syntax::tree::*;
match &mut expression.variant {
box Variant::App(App { func, arg }) => {
let arg = parse_argument_definition(arg.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(arg)
}
box Variant::NamedApp(NamedApp { func, open, name, equals, arg, close }) => {
let open = mem::take(open);
let close = mem::take(close);
let equals = equals.clone();
let pattern = Tree::ident(name.clone());
let open2 = default();
let suspension = default();
let close2 = default();
let type_ = default();
let default = Some(ArgumentDefault { equals, expression: arg.clone() });
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open,
open2,
pattern,
suspension,
default,
close2,
type_,
close,
})
}
box Variant::DefaultApp(DefaultApp { func, default: default_ }) => {
let pattern = Tree::ident(default_.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open: default(),
open2: default(),
suspension: default(),
pattern,
type_: default(),
close2: default(),
default: default(),
close: default(),
})
}
_ => None,
}
}
/// Interpret the expression as an element of an argument definition sequence.
pub fn parse_argument_definition(mut pattern: syntax::Tree) -> syntax::tree::ArgumentDefinition {
use syntax::tree::*;
let mut open1 = default();
let mut close1 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open1 = open;
close1 = close;
pattern = body;
}
let mut default_ = default();
if let Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_assignment() {
let left_offset = pattern.span.left_offset;
default_ = Some(ArgumentDefault { equals: opr.clone(), expression: rhs.clone() });
pattern = lhs.clone();
pattern.span.left_offset += left_offset;
}
let mut open2 = default();
let mut close2 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open2 = open;
close2 = close;
pattern = body;
}
let mut type__ = default();
if let box Variant::TypeAnnotated(TypeAnnotated { mut expression, operator, type_ }) =
pattern.variant
{
expression.span.left_offset += pattern.span.left_offset;
type__ = Some(ArgumentType { operator, type_ });
pattern = expression;
}
let mut suspension = default();
if let box Variant::TemplateFunction(TemplateFunction { mut ast, .. }) = pattern.variant {
ast.span.left_offset += pattern.span.left_offset;
pattern = ast;
}
if let Variant::UnaryOprApp(UnaryOprApp { opr, rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_suspension() {
let mut opr = opr.clone();
opr.left_offset += pattern.span.left_offset;
suspension = Some(opr);
pattern = rhs.clone();
}
let pattern = expression_to_pattern(pattern);
let open = open1;
let close = close1;
let type_ = type__;
ArgumentDefinition { open, open2, pattern, suspension, default: default_, close2, type_, close }
}
/// Return whether the expression is a body block.
fn is_body_block(expression: &syntax::tree::Tree<'_>) -> bool {
matches!(&*expression.variant, syntax::tree::Variant::BodyBlock { .. })
}
// ==================
// === Benchmarks ===
// ==================
#[cfg(test)]
mod benches {
use super::*;
extern crate test;
use test::Bencher;
#[bench]
fn bench_parsing_type_defs(bencher: &mut Bencher) {
let reps = 1_000;
let str = "type Option a b c\n".repeat(reps);
let parser = Parser::new();
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_blocks(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 10_000;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let mut indent = 0u32;
for _ in 0..lines {
// Indent:
// 1/8 chance of increasing.
// 1/8 chance of decreasing.
// 3/4 chance of leaving unchanged.
match rng.gen_range(0..8) {
0u32 => indent = indent.saturating_sub(1),
1 => indent += 1,
_ => (),
}
for _ in 0..indent {
str.push(' ');
}
// 1/4 chance of operator-block line syntax.
if rng.gen_range(0..4) == 0u32 {
str.push_str("* ");
}
str.push('x');
// Equal chance of the next line being interpreted as a body block or argument block
// line, if it is indented and doesn't match the operator-block syntax.
// The `=` operator is chosen to exercise the expression-to-statement conversion path.
if rng.gen() {
str.push_str(" =");
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_expressions(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 100;
let avg_group_len = 20;
let avg_groups_per_line = 20;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let normal = rand_distr::StandardNormal;
for _ in 0..lines {
let operators = ['=', '+', '-', '*', ':'];
let groups: f64 = normal.sample(&mut rng);
let groups = (groups * avg_groups_per_line as f64) as usize;
for _ in 0..groups {
let len: f64 = normal.sample(&mut rng);
let len = (len * avg_group_len as f64) as usize;
str.push('x');
for _ in 0..len {
let i = rng.gen_range(0..operators.len());
str.push(operators[i]);
str.push('x');
}
str.push(' ');
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
} | //! in case these segments were found. For example, let's consider two macros: `if ... then ...`,
//! and `if ... then ... else ...`. In such a case, the macro registry will contain only one entry,
//! "if", and two sets of possible resolution paths: ["then"], and ["then", "else"], each associated
//! with the corresponding macro definition.
//! | random_line_split |
lib.rs | //! The Enso parser. Parsing is a multi-stage process:
//!
//! # Lexing.
//! First, the source code is feed to [`lexer::Lexer`], which consumes it and outputs a stream of
//! [`Token`]. Tokens are chunks of the input with a generic description attached, like "operator",
//! or "identifier".
//!
//! # Building macro registry.
//! Macros in Enso are a very powerful mechanism and are used to transform group of tokens into
//! almost any statement. First, macros need to be discovered and registered. Currently, there is no
//! real macro discovery process, as there is no support for user-defined macros. Instead, there is
//! a set of hardcoded macros defined in the compiler.
//!
//! Each macro defines one or more segments. Every segment starts with a predefined token and can
//! contain any number of other tokens. For example, the macro `if ... then ... else ...` contains
//! three segments. Macros can also accept prefix tokens, a set of tokens on the left of the first
//! segment. A good example is the lambda macro `... -> ...`.
//!
//! In this step, a [`MacroMatchTree`] is built. Basically, it is a map from the possible next
//! segment name to information of what other segments are required and what is the macro definition
//! in case these segments were found. For example, let's consider two macros: `if ... then ...`,
//! and `if ... then ... else ...`. In such a case, the macro registry will contain only one entry,
//! "if", and two sets of possible resolution paths: ["then"], and ["then", "else"], each associated
//! with the corresponding macro definition.
//!
//! # Splitting the token stream by the macro segments.
//! The input token stream is being iterated and is being split based on the segments of the
//! registered macros. For example, for the input `if a b then c d else e f`, the token stream will
//! be split into three segments, `a b`, `c d`, and `e f`, which will be associated with the
//! `if ... then ... else ...` macro definition.
//!
//! The splitting process is hierarchical. It means that a new macro can start being resolved during
//! resolution of a parent macro. For example, `if if a then b then c else d` is a correct
//! expression. After finding the first `if` token, the token stream will be split. The next `if`
//! token starts a new token stream splitting. The first `then` token belongs to the nested macro,
//! however, as soon as the resolver sees the second `then` token, it will consider the nested macro
//! to be finished, and will come back to parent macro resolution.
//!
//! # Resolving right-hand-side patterns of macro segments.
//! In the next steps, each macro is being analyzed, started from the most nested ones. For each
//! macro, the [`Pattern`] of last segment is being run to check which tokens belong to that macro,
//! and which tokens should be transferred to parent macro definition. For example, consider the
//! following code `process (read file) content-> print content`. The `(...)` is a macro with two
//! sections `(` and `)`. Let's mark the token splitting with `[` and `]` characters. The previous
//! macro resolution steps would output such split of the token stream:
//! `process [(read file][) content[-> print content]]`. In this step, the most inner macro will be
//! analyzed first. The pattern of the last segment of the inner macro (`->`) defines that it
//! consumes all tokens, so all the tokens `print content` are left as they are. Now, the resolution
//! moves to the parent macro. Its last segment starts with the `)` token, which pattern defines
//! that it does not consume any tokens, so all of its current tokens (`content[-> print content]]`)
//! are popped to a parent definition, forming `process [(read file][)] content[-> print content]`.
//!
//! Please note, that root of the expression is considered a special macro as well. It is done for
//! the algorithm unification purposes.
//!
//! # Resolving left-hand-side patterns of macro segments.
//! In this step, each macro is being analyzed, started from the most nested ones. For each macro,
//! the [`Pattern`] of the macro prefix is being run to check which tokens belong to the prefix of
//! the macro (in case the macro defines the prefix). In the example above, the macro `->` defines
//! complex prefix rules: if the token on the left of the arrow used no space, then only a single
//! token will be consumed. As a result of this step, the following token split will occur:
//! `[process [(read file][)] [content-> print content]`, which is exactly what we wanted.
//!
//! # Resolving patterns of macro segments.
//! In this step, all macro segment patterns are being resolved and errors are reported in case it
//! was not possible. If tokens in a segment match the segment pattern, they are sent to the
//! operator precedence resolver for final transformation.
//!
//! # Operator precedence resolution.
//! Each token stream sent to the operator resolver is processed by a modified Shunting Yard
//! algorithm, which handles such situations as multiple operators placed next to each other,
//! multiple identifiers placed next to each other, and also takes spacing into consideration in
//! order to implement spacing-aware precedence rules. After all segments are resolved, the macro
//! is being treated as a single token in one of the segments of the parent macro, and is being
//! processed by the operator precedence resolver as well. In the end, a single [`syntax::Tree`] is
//! produced, containing the parsed expression.
#![recursion_limit = "256"]
// === Features ===
#![allow(incomplete_features)]
#![feature(let_chains)]
#![feature(allocator_api)]
#![feature(exact_size_is_empty)]
#![feature(test)]
#![feature(specialization)]
#![feature(if_let_guard)]
#![feature(box_patterns)]
#![feature(option_get_or_insert_default)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![allow(clippy::option_map_unit_fn)]
#![allow(clippy::precedence)]
#![allow(dead_code)]
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
use crate::prelude::*;
// ==============
// === Export ===
// ==============
pub mod lexer;
pub mod macros;
pub mod metadata;
pub mod serialization;
pub mod source;
pub mod syntax;
/// Popular utilities, imported by most modules of this crate.
pub mod prelude {
pub use enso_prelude::serde_reexports::*;
pub use enso_prelude::*;
pub use enso_reflect as reflect;
pub use enso_reflect::Reflect;
pub use enso_types::traits::*;
pub use enso_types::unit2::Bytes;
/// Wraps return value for functions whose implementations don't handle all cases yet. When the
/// parser is complete, this type will be eliminated.
pub type WipResult<T> = Result<T, String>;
/// Return type for functions that will only fail in case of a bug in the implementation.
#[derive(Debug, Default)]
pub struct ParseResult<T> {
/// The result of the operation. If `internal_error` is set, this is a best-effort value
/// that cannot be assumed to be accurate; otherwise, it should be correct.
pub value: T,
/// Internal error encountered while computing this result.
pub internal_error: Option<String>,
}
impl<T> ParseResult<T> {
/// Return a new [`ParseResult`] whose value is the result of applying the given function to
/// the input's value, and whose `internal_error` field is the same as the input.
pub fn map<U, F>(self, f: F) -> ParseResult<U>
where F: FnOnce(T) -> U {
let ParseResult { value, internal_error } = self;
let value = f(value);
ParseResult { value, internal_error }
}
/// Panic if the result contains an internal error; otherwise, return the contained value.
pub fn unwrap(self) -> T {
assert_eq!(self.internal_error, None);
self.value
}
}
}
// ==============
// === Parser ===
// ==============
/// Enso parser. See the module documentation to learn more about how it works.
#[allow(missing_docs)]
#[derive(Debug)]
pub struct Parser {
pub macros: macros::resolver::MacroMap,
}
impl Parser {
/// Constructor.
pub fn new() -> Self {
let macros = macros::built_in::all();
Self { macros }
}
/// Main entry point.
pub fn run<'s>(&self, code: &'s str) -> syntax::Tree<'s> {
let tokens = lexer::run(code);
let resolver = macros::resolver::Resolver::new_statement();
let result = tokens.map(|tokens| resolver.run(&self.macros, tokens));
let value = result.value;
if let Some(error) = result.internal_error {
return value.with_error(format!("Internal error: {error}"));
}
value
}
}
impl Default for Parser {
fn default() -> Self {
Self::new()
}
}
// == Parsing helpers ==
/// Reinterpret an expression in a statement context (i.e. as a top level member of a block).
///
/// In statement context, an expression that has an assignment operator at its top level is
/// interpreted as a variable assignment or method definition.
fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
let mut left_offset = source::span::Offset::default();
if let Tree { variant: box Variant::Annotated(annotated), .. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::AnnotatedBuiltin(annotated), .. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::Documented(documented), .. } = &mut tree {
documented.expression = documented.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::TypeAnnotated(annotated), span } = tree {
let colon = annotated.operator;
let type_ = annotated.type_;
let variable = annotated.expression;
let mut tree = Tree::type_signature(variable, colon, type_);
tree.span.left_offset += span.left_offset;
return tree;
}
let tree_ = &mut tree;
let opr_app = match tree_ {
Tree { variant: box Variant::OprApp(opr_app), span } => {
left_offset += &span.left_offset;
opr_app
}
_ => return tree,
};
if let OprApp { lhs: Some(lhs), opr: Ok(opr), rhs } = opr_app && opr.properties.is_assignment() {
let (leftmost, args) = collect_arguments(lhs.clone());
if let Some(rhs) = rhs {
if let Variant::Ident(ident) = &*leftmost.variant && ident.token.variant.is_type {
// If the LHS is a type, this is a (destructuring) assignment.
let lhs = expression_to_pattern(mem::take(lhs));
let mut result = Tree::assignment(lhs, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
if args.is_empty() && !is_body_block(rhs) {
// If the LHS has no arguments, and there is a RHS, and the RHS is not a body block,
// this is a variable assignment.
let mut result = Tree::assignment(leftmost, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
if is_qualified_name(&leftmost) {
// If this is not a variable assignment, and the leftmost leaf of the `App` tree is
// a qualified name, this is a function definition.
let mut result = Tree::function(leftmost, args, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
tree
}
fn is_qualified_name(tree: &syntax::Tree) -> bool {
use syntax::tree::*;
match &*tree.variant {
Variant::Ident(_) => true,
Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) })
if matches!(&*rhs.variant, Variant::Ident(_)) && opr.properties.is_dot() =>
is_qualified_name(lhs),
_ => false,
}
}
fn expression_to_type(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast, .. }) => expression_to_type(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_type(body)), close),
box Variant::OprApp(OprApp { lhs, opr, rhs }) =>
Tree::opr_app(lhs.map(expression_to_type), opr, rhs.map(expression_to_type)),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_type(func), expression_to_type(arg)),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn expression_to_pattern(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast, .. }) => expression_to_pattern(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_pattern(body)), close),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_pattern(func), expression_to_pattern(arg)),
box Variant::TypeAnnotated(TypeAnnotated { expression, operator, type_ }) =>
Tree::type_annotated(expression_to_pattern(expression), operator, type_),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn collect_arguments(tree: syntax::Tree) -> (syntax::Tree, Vec<syntax::tree::ArgumentDefinition>) {
let mut args = vec![];
let tree = unroll_arguments(tree, &mut args);
args.reverse();
(tree, args)
}
fn collect_arguments_inclusive(tree: syntax::Tree) -> Vec<syntax::tree::ArgumentDefinition> {
let mut args = vec![];
let first = unroll_arguments(tree, &mut args);
args.push(parse_argument_definition(first));
args.reverse();
args
}
fn unroll_arguments<'s>(
mut tree: syntax::Tree<'s>,
args: &mut Vec<syntax::tree::ArgumentDefinition<'s>>,
) -> syntax::Tree<'s> {
while let Some(arg) = parse_argument_application(&mut tree) {
args.push(arg);
}
tree
}
/// Try to parse the expression as an application of a function to an `ArgumentDefinition`. If it
/// matches, replace the expression with its LHS, and return the `ArgumentDefinition` node.
pub fn parse_argument_application<'s>(
expression: &'_ mut syntax::Tree<'s>,
) -> Option<syntax::tree::ArgumentDefinition<'s>> {
use syntax::tree::*;
match &mut expression.variant {
box Variant::App(App { func, arg }) => {
let arg = parse_argument_definition(arg.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(arg)
}
box Variant::NamedApp(NamedApp { func, open, name, equals, arg, close }) => {
let open = mem::take(open);
let close = mem::take(close);
let equals = equals.clone();
let pattern = Tree::ident(name.clone());
let open2 = default();
let suspension = default();
let close2 = default();
let type_ = default();
let default = Some(ArgumentDefault { equals, expression: arg.clone() });
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open,
open2,
pattern,
suspension,
default,
close2,
type_,
close,
})
}
box Variant::DefaultApp(DefaultApp { func, default: default_ }) => {
let pattern = Tree::ident(default_.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open: default(),
open2: default(),
suspension: default(),
pattern,
type_: default(),
close2: default(),
default: default(),
close: default(),
})
}
_ => None,
}
}
/// Interpret the expression as an element of an argument definition sequence.
pub fn parse_argument_definition(mut pattern: syntax::Tree) -> syntax::tree::ArgumentDefinition {
use syntax::tree::*;
let mut open1 = default();
let mut close1 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open1 = open;
close1 = close;
pattern = body;
}
let mut default_ = default();
if let Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_assignment() {
let left_offset = pattern.span.left_offset;
default_ = Some(ArgumentDefault { equals: opr.clone(), expression: rhs.clone() });
pattern = lhs.clone();
pattern.span.left_offset += left_offset;
}
let mut open2 = default();
let mut close2 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open2 = open;
close2 = close;
pattern = body;
}
let mut type__ = default();
if let box Variant::TypeAnnotated(TypeAnnotated { mut expression, operator, type_ }) =
pattern.variant
{
expression.span.left_offset += pattern.span.left_offset;
type__ = Some(ArgumentType { operator, type_ });
pattern = expression;
}
let mut suspension = default();
if let box Variant::TemplateFunction(TemplateFunction { mut ast, .. }) = pattern.variant {
ast.span.left_offset += pattern.span.left_offset;
pattern = ast;
}
if let Variant::UnaryOprApp(UnaryOprApp { opr, rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_suspension() {
let mut opr = opr.clone();
opr.left_offset += pattern.span.left_offset;
suspension = Some(opr);
pattern = rhs.clone();
}
let pattern = expression_to_pattern(pattern);
let open = open1;
let close = close1;
let type_ = type__;
ArgumentDefinition { open, open2, pattern, suspension, default: default_, close2, type_, close }
}
/// Return whether the expression is a body block.
fn is_body_block(expression: &syntax::tree::Tree<'_>) -> bool {
matches!(&*expression.variant, syntax::tree::Variant::BodyBlock { .. })
}
// ==================
// === Benchmarks ===
// ==================
#[cfg(test)]
mod benches {
use super::*;
extern crate test;
use test::Bencher;
#[bench]
fn bench_parsing_type_defs(bencher: &mut Bencher) {
let reps = 1_000;
let str = "type Option a b c\n".repeat(reps);
let parser = Parser::new();
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_blocks(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 10_000;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let mut indent = 0u32;
for _ in 0..lines {
// Indent:
// 1/8 chance of increasing.
// 1/8 chance of decreasing.
// 3/4 chance of leaving unchanged.
match rng.gen_range(0..8) {
0u32 => indent = indent.saturating_sub(1),
1 => indent += 1,
_ => (),
}
for _ in 0..indent {
str.push(' ');
}
// 1/4 chance of operator-block line syntax.
if rng.gen_range(0..4) == 0u32 |
str.push('x');
// Equal chance of the next line being interpreted as a body block or argument block
// line, if it is indented and doesn't match the operator-block syntax.
// The `=` operator is chosen to exercise the expression-to-statement conversion path.
if rng.gen() {
str.push_str(" =");
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_expressions(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 100;
let avg_group_len = 20;
let avg_groups_per_line = 20;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let normal = rand_distr::StandardNormal;
for _ in 0..lines {
let operators = ['=', '+', '-', '*', ':'];
let groups: f64 = normal.sample(&mut rng);
let groups = (groups * avg_groups_per_line as f64) as usize;
for _ in 0..groups {
let len: f64 = normal.sample(&mut rng);
let len = (len * avg_group_len as f64) as usize;
str.push('x');
for _ in 0..len {
let i = rng.gen_range(0..operators.len());
str.push(operators[i]);
str.push('x');
}
str.push(' ');
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
}
| {
str.push_str("* ");
} | conditional_block |
lib.rs | //! The Enso parser. Parsing is a multi-stage process:
//!
//! # Lexing.
//! First, the source code is feed to [`lexer::Lexer`], which consumes it and outputs a stream of
//! [`Token`]. Tokens are chunks of the input with a generic description attached, like "operator",
//! or "identifier".
//!
//! # Building macro registry.
//! Macros in Enso are a very powerful mechanism and are used to transform group of tokens into
//! almost any statement. First, macros need to be discovered and registered. Currently, there is no
//! real macro discovery process, as there is no support for user-defined macros. Instead, there is
//! a set of hardcoded macros defined in the compiler.
//!
//! Each macro defines one or more segments. Every segment starts with a predefined token and can
//! contain any number of other tokens. For example, the macro `if ... then ... else ...` contains
//! three segments. Macros can also accept prefix tokens, a set of tokens on the left of the first
//! segment. A good example is the lambda macro `... -> ...`.
//!
//! In this step, a [`MacroMatchTree`] is built. Basically, it is a map from the possible next
//! segment name to information of what other segments are required and what is the macro definition
//! in case these segments were found. For example, let's consider two macros: `if ... then ...`,
//! and `if ... then ... else ...`. In such a case, the macro registry will contain only one entry,
//! "if", and two sets of possible resolution paths: ["then"], and ["then", "else"], each associated
//! with the corresponding macro definition.
//!
//! # Splitting the token stream by the macro segments.
//! The input token stream is being iterated and is being split based on the segments of the
//! registered macros. For example, for the input `if a b then c d else e f`, the token stream will
//! be split into three segments, `a b`, `c d`, and `e f`, which will be associated with the
//! `if ... then ... else ...` macro definition.
//!
//! The splitting process is hierarchical. It means that a new macro can start being resolved during
//! resolution of a parent macro. For example, `if if a then b then c else d` is a correct
//! expression. After finding the first `if` token, the token stream will be split. The next `if`
//! token starts a new token stream splitting. The first `then` token belongs to the nested macro,
//! however, as soon as the resolver sees the second `then` token, it will consider the nested macro
//! to be finished, and will come back to parent macro resolution.
//!
//! # Resolving right-hand-side patterns of macro segments.
//! In the next steps, each macro is being analyzed, started from the most nested ones. For each
//! macro, the [`Pattern`] of last segment is being run to check which tokens belong to that macro,
//! and which tokens should be transferred to parent macro definition. For example, consider the
//! following code `process (read file) content-> print content`. The `(...)` is a macro with two
//! sections `(` and `)`. Let's mark the token splitting with `[` and `]` characters. The previous
//! macro resolution steps would output such split of the token stream:
//! `process [(read file][) content[-> print content]]`. In this step, the most inner macro will be
//! analyzed first. The pattern of the last segment of the inner macro (`->`) defines that it
//! consumes all tokens, so all the tokens `print content` are left as they are. Now, the resolution
//! moves to the parent macro. Its last segment starts with the `)` token, which pattern defines
//! that it does not consume any tokens, so all of its current tokens (`content[-> print content]]`)
//! are popped to a parent definition, forming `process [(read file][)] content[-> print content]`.
//!
//! Please note, that root of the expression is considered a special macro as well. It is done for
//! the algorithm unification purposes.
//!
//! # Resolving left-hand-side patterns of macro segments.
//! In this step, each macro is being analyzed, started from the most nested ones. For each macro,
//! the [`Pattern`] of the macro prefix is being run to check which tokens belong to the prefix of
//! the macro (in case the macro defines the prefix). In the example above, the macro `->` defines
//! complex prefix rules: if the token on the left of the arrow used no space, then only a single
//! token will be consumed. As a result of this step, the following token split will occur:
//! `[process [(read file][)] [content-> print content]`, which is exactly what we wanted.
//!
//! # Resolving patterns of macro segments.
//! In this step, all macro segment patterns are being resolved and errors are reported in case it
//! was not possible. If tokens in a segment match the segment pattern, they are sent to the
//! operator precedence resolver for final transformation.
//!
//! # Operator precedence resolution.
//! Each token stream sent to the operator resolver is processed by a modified Shunting Yard
//! algorithm, which handles such situations as multiple operators placed next to each other,
//! multiple identifiers placed next to each other, and also takes spacing into consideration in
//! order to implement spacing-aware precedence rules. After all segments are resolved, the macro
//! is being treated as a single token in one of the segments of the parent macro, and is being
//! processed by the operator precedence resolver as well. In the end, a single [`syntax::Tree`] is
//! produced, containing the parsed expression.
#![recursion_limit = "256"]
// === Features ===
#![allow(incomplete_features)]
#![feature(let_chains)]
#![feature(allocator_api)]
#![feature(exact_size_is_empty)]
#![feature(test)]
#![feature(specialization)]
#![feature(if_let_guard)]
#![feature(box_patterns)]
#![feature(option_get_or_insert_default)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![allow(clippy::option_map_unit_fn)]
#![allow(clippy::precedence)]
#![allow(dead_code)]
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
use crate::prelude::*;
// ==============
// === Export ===
// ==============
pub mod lexer;
pub mod macros;
pub mod metadata;
pub mod serialization;
pub mod source;
pub mod syntax;
/// Popular utilities, imported by most modules of this crate.
pub mod prelude {
pub use enso_prelude::serde_reexports::*;
pub use enso_prelude::*;
pub use enso_reflect as reflect;
pub use enso_reflect::Reflect;
pub use enso_types::traits::*;
pub use enso_types::unit2::Bytes;
/// Wraps return value for functions whose implementations don't handle all cases yet. When the
/// parser is complete, this type will be eliminated.
pub type WipResult<T> = Result<T, String>;
/// Return type for functions that will only fail in case of a bug in the implementation.
#[derive(Debug, Default)]
pub struct ParseResult<T> {
/// The result of the operation. If `internal_error` is set, this is a best-effort value
/// that cannot be assumed to be accurate; otherwise, it should be correct.
pub value: T,
/// Internal error encountered while computing this result.
pub internal_error: Option<String>,
}
impl<T> ParseResult<T> {
/// Return a new [`ParseResult`] whose value is the result of applying the given function to
/// the input's value, and whose `internal_error` field is the same as the input.
pub fn map<U, F>(self, f: F) -> ParseResult<U>
where F: FnOnce(T) -> U {
let ParseResult { value, internal_error } = self;
let value = f(value);
ParseResult { value, internal_error }
}
/// Panic if the result contains an internal error; otherwise, return the contained value.
pub fn unwrap(self) -> T {
assert_eq!(self.internal_error, None);
self.value
}
}
}
// ==============
// === Parser ===
// ==============
/// Enso parser. See the module documentation to learn more about how it works.
#[allow(missing_docs)]
#[derive(Debug)]
pub struct Parser {
pub macros: macros::resolver::MacroMap,
}
impl Parser {
/// Constructor.
pub fn new() -> Self {
let macros = macros::built_in::all();
Self { macros }
}
/// Main entry point.
pub fn run<'s>(&self, code: &'s str) -> syntax::Tree<'s> {
let tokens = lexer::run(code);
let resolver = macros::resolver::Resolver::new_statement();
let result = tokens.map(|tokens| resolver.run(&self.macros, tokens));
let value = result.value;
if let Some(error) = result.internal_error {
return value.with_error(format!("Internal error: {error}"));
}
value
}
}
impl Default for Parser {
fn default() -> Self {
Self::new()
}
}
// == Parsing helpers ==
/// Reinterpret an expression in a statement context (i.e. as a top level member of a block).
///
/// In statement context, an expression that has an assignment operator at its top level is
/// interpreted as a variable assignment or method definition.
fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
let mut left_offset = source::span::Offset::default();
if let Tree { variant: box Variant::Annotated(annotated), .. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::AnnotatedBuiltin(annotated), .. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::Documented(documented), .. } = &mut tree {
documented.expression = documented.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::TypeAnnotated(annotated), span } = tree {
let colon = annotated.operator;
let type_ = annotated.type_;
let variable = annotated.expression;
let mut tree = Tree::type_signature(variable, colon, type_);
tree.span.left_offset += span.left_offset;
return tree;
}
let tree_ = &mut tree;
let opr_app = match tree_ {
Tree { variant: box Variant::OprApp(opr_app), span } => {
left_offset += &span.left_offset;
opr_app
}
_ => return tree,
};
if let OprApp { lhs: Some(lhs), opr: Ok(opr), rhs } = opr_app && opr.properties.is_assignment() {
let (leftmost, args) = collect_arguments(lhs.clone());
if let Some(rhs) = rhs {
if let Variant::Ident(ident) = &*leftmost.variant && ident.token.variant.is_type {
// If the LHS is a type, this is a (destructuring) assignment.
let lhs = expression_to_pattern(mem::take(lhs));
let mut result = Tree::assignment(lhs, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
if args.is_empty() && !is_body_block(rhs) {
// If the LHS has no arguments, and there is a RHS, and the RHS is not a body block,
// this is a variable assignment.
let mut result = Tree::assignment(leftmost, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
if is_qualified_name(&leftmost) {
// If this is not a variable assignment, and the leftmost leaf of the `App` tree is
// a qualified name, this is a function definition.
let mut result = Tree::function(leftmost, args, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
tree
}
fn is_qualified_name(tree: &syntax::Tree) -> bool {
use syntax::tree::*;
match &*tree.variant {
Variant::Ident(_) => true,
Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) })
if matches!(&*rhs.variant, Variant::Ident(_)) && opr.properties.is_dot() =>
is_qualified_name(lhs),
_ => false,
}
}
fn expression_to_type(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast, .. }) => expression_to_type(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_type(body)), close),
box Variant::OprApp(OprApp { lhs, opr, rhs }) =>
Tree::opr_app(lhs.map(expression_to_type), opr, rhs.map(expression_to_type)),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_type(func), expression_to_type(arg)),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn expression_to_pattern(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast, .. }) => expression_to_pattern(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_pattern(body)), close),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_pattern(func), expression_to_pattern(arg)),
box Variant::TypeAnnotated(TypeAnnotated { expression, operator, type_ }) =>
Tree::type_annotated(expression_to_pattern(expression), operator, type_),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn collect_arguments(tree: syntax::Tree) -> (syntax::Tree, Vec<syntax::tree::ArgumentDefinition>) {
let mut args = vec![];
let tree = unroll_arguments(tree, &mut args);
args.reverse();
(tree, args)
}
fn collect_arguments_inclusive(tree: syntax::Tree) -> Vec<syntax::tree::ArgumentDefinition> |
fn unroll_arguments<'s>(
mut tree: syntax::Tree<'s>,
args: &mut Vec<syntax::tree::ArgumentDefinition<'s>>,
) -> syntax::Tree<'s> {
while let Some(arg) = parse_argument_application(&mut tree) {
args.push(arg);
}
tree
}
/// Try to parse the expression as an application of a function to an `ArgumentDefinition`. If it
/// matches, replace the expression with its LHS, and return the `ArgumentDefinition` node.
pub fn parse_argument_application<'s>(
expression: &'_ mut syntax::Tree<'s>,
) -> Option<syntax::tree::ArgumentDefinition<'s>> {
use syntax::tree::*;
match &mut expression.variant {
box Variant::App(App { func, arg }) => {
let arg = parse_argument_definition(arg.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(arg)
}
box Variant::NamedApp(NamedApp { func, open, name, equals, arg, close }) => {
let open = mem::take(open);
let close = mem::take(close);
let equals = equals.clone();
let pattern = Tree::ident(name.clone());
let open2 = default();
let suspension = default();
let close2 = default();
let type_ = default();
let default = Some(ArgumentDefault { equals, expression: arg.clone() });
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open,
open2,
pattern,
suspension,
default,
close2,
type_,
close,
})
}
box Variant::DefaultApp(DefaultApp { func, default: default_ }) => {
let pattern = Tree::ident(default_.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open: default(),
open2: default(),
suspension: default(),
pattern,
type_: default(),
close2: default(),
default: default(),
close: default(),
})
}
_ => None,
}
}
/// Interpret the expression as an element of an argument definition sequence.
pub fn parse_argument_definition(mut pattern: syntax::Tree) -> syntax::tree::ArgumentDefinition {
use syntax::tree::*;
let mut open1 = default();
let mut close1 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open1 = open;
close1 = close;
pattern = body;
}
let mut default_ = default();
if let Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_assignment() {
let left_offset = pattern.span.left_offset;
default_ = Some(ArgumentDefault { equals: opr.clone(), expression: rhs.clone() });
pattern = lhs.clone();
pattern.span.left_offset += left_offset;
}
let mut open2 = default();
let mut close2 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open2 = open;
close2 = close;
pattern = body;
}
let mut type__ = default();
if let box Variant::TypeAnnotated(TypeAnnotated { mut expression, operator, type_ }) =
pattern.variant
{
expression.span.left_offset += pattern.span.left_offset;
type__ = Some(ArgumentType { operator, type_ });
pattern = expression;
}
let mut suspension = default();
if let box Variant::TemplateFunction(TemplateFunction { mut ast, .. }) = pattern.variant {
ast.span.left_offset += pattern.span.left_offset;
pattern = ast;
}
if let Variant::UnaryOprApp(UnaryOprApp { opr, rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_suspension() {
let mut opr = opr.clone();
opr.left_offset += pattern.span.left_offset;
suspension = Some(opr);
pattern = rhs.clone();
}
let pattern = expression_to_pattern(pattern);
let open = open1;
let close = close1;
let type_ = type__;
ArgumentDefinition { open, open2, pattern, suspension, default: default_, close2, type_, close }
}
/// Return whether the expression is a body block.
fn is_body_block(expression: &syntax::tree::Tree<'_>) -> bool {
matches!(&*expression.variant, syntax::tree::Variant::BodyBlock { .. })
}
// ==================
// === Benchmarks ===
// ==================
#[cfg(test)]
mod benches {
use super::*;
extern crate test;
use test::Bencher;
#[bench]
fn bench_parsing_type_defs(bencher: &mut Bencher) {
let reps = 1_000;
let str = "type Option a b c\n".repeat(reps);
let parser = Parser::new();
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_blocks(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 10_000;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let mut indent = 0u32;
for _ in 0..lines {
// Indent:
// 1/8 chance of increasing.
// 1/8 chance of decreasing.
// 3/4 chance of leaving unchanged.
match rng.gen_range(0..8) {
0u32 => indent = indent.saturating_sub(1),
1 => indent += 1,
_ => (),
}
for _ in 0..indent {
str.push(' ');
}
// 1/4 chance of operator-block line syntax.
if rng.gen_range(0..4) == 0u32 {
str.push_str("* ");
}
str.push('x');
// Equal chance of the next line being interpreted as a body block or argument block
// line, if it is indented and doesn't match the operator-block syntax.
// The `=` operator is chosen to exercise the expression-to-statement conversion path.
if rng.gen() {
str.push_str(" =");
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_expressions(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 100;
let avg_group_len = 20;
let avg_groups_per_line = 20;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let normal = rand_distr::StandardNormal;
for _ in 0..lines {
let operators = ['=', '+', '-', '*', ':'];
let groups: f64 = normal.sample(&mut rng);
let groups = (groups * avg_groups_per_line as f64) as usize;
for _ in 0..groups {
let len: f64 = normal.sample(&mut rng);
let len = (len * avg_group_len as f64) as usize;
str.push('x');
for _ in 0..len {
let i = rng.gen_range(0..operators.len());
str.push(operators[i]);
str.push('x');
}
str.push(' ');
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
}
| {
let mut args = vec![];
let first = unroll_arguments(tree, &mut args);
args.push(parse_argument_definition(first));
args.reverse();
args
} | identifier_body |
scan.py | import numpy as np
import torch
import torch.distributed as dist
from torch import nn
from torch.nn.functional import pad
from qd.qd_common import get_mpi_size as get_world_size
from .scan_utils import EncoderFeature, EncoderText, ContrastiveLoss
class SCANEmbedding(object):
"""
Structure that holds SCAN embeddings and provides .to function to
be able to move all necessary tensors between gpu and cpu.
"""
def __init__(self, img_emb, img_length, cap_emb, cap_length):
self.img_emb = img_emb
self.img_length = img_length
self.cap_emb = cap_emb
self.cap_length = cap_length
def to(self, *args, **kwargs):
cast_img_emb = self.img_emb.to(*args, **kwargs)
cast_cap_emb = self.cap_emb.to(*args, **kwargs)
cast_img_length = self.img_length.to(*args, **kwargs)
cast_cap_length = self.cap_length.to(*args, **kwargs)
return SCANEmbedding(cast_img_emb, cast_img_length,
cast_cap_emb, cast_cap_length)
class SCAN(nn.Module):
def __init__(self, cfg, bbox_proposal_model=None):
super(SCAN, self).__init__()
if cfg.MODEL.RPN_ONLY:
raise ValueError("SCAN model can't operate in RPN_ONLY regime, "
"since it requires an object detection head")
if bbox_proposal_model:
self.img_dim = bbox_proposal_model.roi_heads.box.feature_dim
else:
self.img_dim = cfg.MODEL.SCAN.IMG_FEATURES_DIM
self.img_enc = EncoderFeature(
self.img_dim, cfg.MODEL.SCAN.EMBED_SIZE,
precomp_enc_type=cfg.MODEL.SCAN.PRECOMP_ENC_TYPE,
no_featnorm=cfg.MODEL.SCAN.NO_IMG_NORM,
)
self.text_features_as_input = cfg.MODEL.SCAN.TEXT_FEATURES_AS_INPUT
if self.text_features_as_input:
self.text_dim = cfg.MODEL.SCAN.TEXT_FEATURES_DIM
else:
self.text_dim = cfg.MODEL.SCAN.VOCAB_SIZE
self.txt_enc = EncoderText(
self.text_dim, cfg.MODEL.SCAN.WORD_DIM,
cfg.MODEL.SCAN.EMBED_SIZE, cfg.MODEL.SCAN.NUM_LAYERS,
use_bi_gru=cfg.MODEL.SCAN.BI_GRU,
no_txtnorm=cfg.MODEL.SCAN.NO_TXT_NORM,
features_as_input=cfg.MODEL.SCAN.TEXT_FEATURES_AS_INPUT,
)
self.criterion = ContrastiveLoss(
opt=cfg.MODEL.SCAN,
margin=cfg.MODEL.SCAN.MARGIN,
max_violation=cfg.MODEL.SCAN.MAX_VIOLATION,
)
self.bbox_proposal_model = bbox_proposal_model
self.freeze_backbone = cfg.MODEL.SCAN.FREEZE_BACKBONE
self.use_precomputed_boxes = cfg.MODEL.SCAN.BBOX_AS_INPUT
self.device = cfg.MODEL.DEVICE
self.average_loss = cfg.MODEL.SCAN.AVERAGE_LOSS
self.random_boxes = cfg.MODEL.SCAN.RANDOM_BOXES
def forward(self, images, targets):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets: Tensor containing padded captions information
(and maybe also precomputed bounding boxes)
Returns:
contrastive_loss in the training regime and tuple of image and
caption embeddings in the test/evaluation regime.
"""
images = [image.to(self.device) for image in images]
if self.use_precomputed_boxes:
targets_transposed = list(zip(*targets))
captions = targets_transposed[0]
boxes = targets_transposed[1]
if self.random_boxes:
for boxlist in boxes:
for idx, box in enumerate(boxlist.bbox):
x = np.random.random() * boxlist.size[0]
y = np.random.random() * boxlist.size[1]
w = np.random.random() * (boxlist.size[0] - x)
h = np.random.random() * (boxlist.size[1] - y)
boxlist.bbox[idx] = torch.tensor([x, y, x + w, y + h])
boxes = [box.to(self.device) for box in boxes]
force_boxes = True
else:
captions = targets
boxes = None
force_boxes = False
captions = [caption.to(self.device) for caption in captions]
if self.text_features_as_input:
captions = [caption.reshape(-1, self.text_dim) for caption in captions]
# ideally this processing should be moved to collate_fn in datalayer,
# but that would involve changing main dataset building code
lengths = np.array([len(cap) for cap in captions])
max_tokens = lengths.max()
if self.text_features_as_input:
def pad_features(features, max_length):
length = features.shape[0]
if length == 0:
return torch.zeros((max_length, self.text_dim),
dtype=torch.float32, device=self.device)
return pad(features, [0, 0, 0, max_length - length], 'constant', 0)
captions = torch.stack(tuple(
pad_features(caption, max_tokens) for caption in captions
))
else:
captions = torch.nn.utils.rnn.pad_sequence(captions, batch_first=True)
# sorting data on caption length to use pack_padded_sequence
sorted_indices = np.argsort(-lengths)
lengths = lengths[sorted_indices]
# no matter the input is token or feature,
# captions now is a torch tensor with batch_first=True
captions = captions[sorted_indices]
images = [images[i] for i in sorted_indices]
if self.use_precomputed_boxes:
boxes = [boxes[i] for i in sorted_indices]
cap_emb, cap_lens = self.txt_enc(
captions,
torch.tensor(lengths, dtype=torch.int64, device=self.device),
)
cap_lens = cap_lens.to(self.device)
if self.bbox_proposal_model is not None:
# remembering the current mode to restore it later
detection_training = self.bbox_proposal_model.training
force_boxes_model = self.bbox_proposal_model.force_boxes
force_boxes_box = self.bbox_proposal_model.roi_heads.box.force_boxes
self.bbox_proposal_model.force_boxes = force_boxes
self.bbox_proposal_model.roi_heads.box.force_boxes = force_boxes
self.bbox_proposal_model.roi_heads.box.post_processor.force_boxes = force_boxes
if self.freeze_backbone:
self.bbox_proposal_model.eval()
with torch.no_grad():
predictions = self.bbox_proposal_model(
images, targets=boxes
)
# restoring the mode of detection model
if detection_training is True:
self.bbox_proposal_model.train()
else:
# TODO: consider making a separate parameter to run model in
# inference mode
# setting the whole model to eval mode to ensure we get test-regime
# proposals. However, since we are going to train backbone and
# box head feature extractor, we want to keep them in the training
# regime (to ensure that e.g. batch norm behaves correctly).
backbone_training = self.bbox_proposal_model.backbone.training
self.bbox_proposal_model.eval()
if self.training:
self.bbox_proposal_model.backbone.train()
self.bbox_proposal_model.roi_heads.box.feature_extractor.train()
predictions = self.bbox_proposal_model(images, targets=boxes)
# restoring the mode of detection model
if detection_training is True:
self.bbox_proposal_model.train()
# restoring the mode of backbone and box head
if backbone_training is False:
self.bbox_proposal_model.backbone.eval()
self.bbox_proposal_model.roi_heads.box.feature_extractor.eval()
predictions = [pred.get_field('box_features') for pred in predictions]
self.bbox_proposal_model.force_boxes = force_boxes_model
self.bbox_proposal_model.roi_heads.box.force_boxes = force_boxes_box
self.bbox_proposal_model.roi_heads.box.post_processor.force_boxes = force_boxes_box
else:
# if bbox_proposal_model is None, dataset has to yield features
# for rpn proposals instead of images
predictions = [features.reshape(-1, self.img_dim)
for features in images]
num_proposals = torch.tensor([len(pred) for pred in predictions],
dtype=torch.int64, device=self.device)
max_proposals = num_proposals.max()
def pad_features(features, max_length):
length = features.shape[0]
if length == 0:
return torch.zeros((max_length, self.img_dim),
dtype=torch.float32, device=self.device)
return pad(features, [0, 0, 0, max_length - length], 'constant', 0)
image_features = torch.stack(tuple(
pad_features(pred, max_proposals) for pred in predictions
))
img_emb = self.img_enc(image_features, num_proposals)
if self.training:
# in distributed setting, we need to aggregate all embeddings
# before computing loss, since SCAN loss depends on all elements
# in the batch.
# note that this code will compute exactly the same loss on each
# GPU. This can potentially be optimized to parallel the computation
# of the loss, but since that's not the bottleneck of the model
# we do not try to do that for now.
world_size = get_world_size()
if world_size > 1:
# need to make sure batch size is the same on all processes,
# since for the last batch in epoch it might be different;
# if it's different, we will cut everything to the smallest | for _ in range(world_size)]
dist.all_gather(batch_size_full, batch_size)
# cutting all data to min batch size across all GPUs
min_bs = min([bs.item() for bs in batch_size_full])
if min_bs < batch_size:
num_proposals = num_proposals[:min_bs]
cap_lens = cap_lens[:min_bs]
img_emb = img_emb[:min_bs]
cap_emb = cap_emb[:min_bs]
# exchanging proposals
cap_lens_full = [torch.zeros_like(cap_lens)
for _ in range(world_size)]
num_proposals_full = [torch.zeros_like(num_proposals)
for _ in range(world_size)]
dist.all_gather(cap_lens_full, cap_lens)
dist.all_gather(num_proposals_full, num_proposals)
cap_lens = torch.cat(cap_lens_full, dim=0)
num_proposals = torch.cat(num_proposals_full, dim=0)
# before exchanging embeddings, need to pad them
# to be of the same size
def pad_features(features, max_length):
length = features.shape[1]
return pad(features, [0, 0, 0, max_length - length, 0, 0],
'constant', 0)
img_emb = pad_features(img_emb, num_proposals.max().item())
cap_emb = pad_features(cap_emb, cap_lens.max().item())
img_emb_full = [torch.zeros_like(img_emb)
for _ in range(world_size)]
cap_emb_full = [torch.zeros_like(cap_emb)
for _ in range(world_size)]
dist.all_gather(img_emb_full, img_emb)
dist.all_gather(cap_emb_full, cap_emb)
# need to do this to restore propagation of the gradients
rank = dist.get_rank()
img_emb_full[rank] = img_emb
cap_emb_full[rank] = cap_emb
img_emb = torch.cat(img_emb_full, dim=0)
cap_emb = torch.cat(cap_emb_full, dim=0)
losses = {
'contrastive_loss': self.criterion(img_emb, num_proposals,
cap_emb, cap_lens),
}
if self.average_loss:
losses['contrastive_loss'] /= img_emb.shape[0]
return losses
# in the evaluation we need to return things in the correct order
# in addition we restructure everything to return all results for
# each image separately
orig_indices = np.argsort(sorted_indices)
return [
SCANEmbedding(img_emb, img_len, cap_emb, cap_len)
for img_emb, img_len, cap_emb, cap_len in zip(
img_emb[orig_indices], num_proposals[orig_indices],
cap_emb[orig_indices], cap_lens[orig_indices],
)
]
def unused_params(self):
pms = {}
if self.bbox_proposal_model is None:
return pms
for key, param in self.bbox_proposal_model.named_parameters():
if self.freeze_backbone:
pms['bbox_proposal_model.{}'.format(key)] = param
if not self.freeze_backbone and 'backbone' not in key \
and 'box.feature_extractor' not in key:
pms['bbox_proposal_model.{}'.format(key)] = param
return pms | # batch size across all processes
batch_size = torch.tensor(img_emb.shape[0], device=self.device)
batch_size_full = [torch.zeros_like(batch_size) | random_line_split |
scan.py | import numpy as np
import torch
import torch.distributed as dist
from torch import nn
from torch.nn.functional import pad
from qd.qd_common import get_mpi_size as get_world_size
from .scan_utils import EncoderFeature, EncoderText, ContrastiveLoss
class SCANEmbedding(object):
"""
Structure that holds SCAN embeddings and provides .to function to
be able to move all necessary tensors between gpu and cpu.
"""
def __init__(self, img_emb, img_length, cap_emb, cap_length):
self.img_emb = img_emb
self.img_length = img_length
self.cap_emb = cap_emb
self.cap_length = cap_length
def to(self, *args, **kwargs):
cast_img_emb = self.img_emb.to(*args, **kwargs)
cast_cap_emb = self.cap_emb.to(*args, **kwargs)
cast_img_length = self.img_length.to(*args, **kwargs)
cast_cap_length = self.cap_length.to(*args, **kwargs)
return SCANEmbedding(cast_img_emb, cast_img_length,
cast_cap_emb, cast_cap_length)
class SCAN(nn.Module):
def __init__(self, cfg, bbox_proposal_model=None):
super(SCAN, self).__init__()
if cfg.MODEL.RPN_ONLY:
raise ValueError("SCAN model can't operate in RPN_ONLY regime, "
"since it requires an object detection head")
if bbox_proposal_model:
self.img_dim = bbox_proposal_model.roi_heads.box.feature_dim
else:
self.img_dim = cfg.MODEL.SCAN.IMG_FEATURES_DIM
self.img_enc = EncoderFeature(
self.img_dim, cfg.MODEL.SCAN.EMBED_SIZE,
precomp_enc_type=cfg.MODEL.SCAN.PRECOMP_ENC_TYPE,
no_featnorm=cfg.MODEL.SCAN.NO_IMG_NORM,
)
self.text_features_as_input = cfg.MODEL.SCAN.TEXT_FEATURES_AS_INPUT
if self.text_features_as_input:
self.text_dim = cfg.MODEL.SCAN.TEXT_FEATURES_DIM
else:
self.text_dim = cfg.MODEL.SCAN.VOCAB_SIZE
self.txt_enc = EncoderText(
self.text_dim, cfg.MODEL.SCAN.WORD_DIM,
cfg.MODEL.SCAN.EMBED_SIZE, cfg.MODEL.SCAN.NUM_LAYERS,
use_bi_gru=cfg.MODEL.SCAN.BI_GRU,
no_txtnorm=cfg.MODEL.SCAN.NO_TXT_NORM,
features_as_input=cfg.MODEL.SCAN.TEXT_FEATURES_AS_INPUT,
)
self.criterion = ContrastiveLoss(
opt=cfg.MODEL.SCAN,
margin=cfg.MODEL.SCAN.MARGIN,
max_violation=cfg.MODEL.SCAN.MAX_VIOLATION,
)
self.bbox_proposal_model = bbox_proposal_model
self.freeze_backbone = cfg.MODEL.SCAN.FREEZE_BACKBONE
self.use_precomputed_boxes = cfg.MODEL.SCAN.BBOX_AS_INPUT
self.device = cfg.MODEL.DEVICE
self.average_loss = cfg.MODEL.SCAN.AVERAGE_LOSS
self.random_boxes = cfg.MODEL.SCAN.RANDOM_BOXES
def forward(self, images, targets):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets: Tensor containing padded captions information
(and maybe also precomputed bounding boxes)
Returns:
contrastive_loss in the training regime and tuple of image and
caption embeddings in the test/evaluation regime.
"""
images = [image.to(self.device) for image in images]
if self.use_precomputed_boxes:
targets_transposed = list(zip(*targets))
captions = targets_transposed[0]
boxes = targets_transposed[1]
if self.random_boxes:
for boxlist in boxes:
for idx, box in enumerate(boxlist.bbox):
x = np.random.random() * boxlist.size[0]
y = np.random.random() * boxlist.size[1]
w = np.random.random() * (boxlist.size[0] - x)
h = np.random.random() * (boxlist.size[1] - y)
boxlist.bbox[idx] = torch.tensor([x, y, x + w, y + h])
boxes = [box.to(self.device) for box in boxes]
force_boxes = True
else:
captions = targets
boxes = None
force_boxes = False
captions = [caption.to(self.device) for caption in captions]
if self.text_features_as_input:
captions = [caption.reshape(-1, self.text_dim) for caption in captions]
# ideally this processing should be moved to collate_fn in datalayer,
# but that would involve changing main dataset building code
lengths = np.array([len(cap) for cap in captions])
max_tokens = lengths.max()
if self.text_features_as_input:
def pad_features(features, max_length):
length = features.shape[0]
if length == 0:
return torch.zeros((max_length, self.text_dim),
dtype=torch.float32, device=self.device)
return pad(features, [0, 0, 0, max_length - length], 'constant', 0)
captions = torch.stack(tuple(
pad_features(caption, max_tokens) for caption in captions
))
else:
captions = torch.nn.utils.rnn.pad_sequence(captions, batch_first=True)
# sorting data on caption length to use pack_padded_sequence
sorted_indices = np.argsort(-lengths)
lengths = lengths[sorted_indices]
# no matter the input is token or feature,
# captions now is a torch tensor with batch_first=True
captions = captions[sorted_indices]
images = [images[i] for i in sorted_indices]
if self.use_precomputed_boxes:
boxes = [boxes[i] for i in sorted_indices]
cap_emb, cap_lens = self.txt_enc(
captions,
torch.tensor(lengths, dtype=torch.int64, device=self.device),
)
cap_lens = cap_lens.to(self.device)
if self.bbox_proposal_model is not None:
# remembering the current mode to restore it later
detection_training = self.bbox_proposal_model.training
force_boxes_model = self.bbox_proposal_model.force_boxes
force_boxes_box = self.bbox_proposal_model.roi_heads.box.force_boxes
self.bbox_proposal_model.force_boxes = force_boxes
self.bbox_proposal_model.roi_heads.box.force_boxes = force_boxes
self.bbox_proposal_model.roi_heads.box.post_processor.force_boxes = force_boxes
if self.freeze_backbone:
self.bbox_proposal_model.eval()
with torch.no_grad():
predictions = self.bbox_proposal_model(
images, targets=boxes
)
# restoring the mode of detection model
if detection_training is True:
self.bbox_proposal_model.train()
else:
# TODO: consider making a separate parameter to run model in
# inference mode
# setting the whole model to eval mode to ensure we get test-regime
# proposals. However, since we are going to train backbone and
# box head feature extractor, we want to keep them in the training
# regime (to ensure that e.g. batch norm behaves correctly).
|
predictions = [pred.get_field('box_features') for pred in predictions]
self.bbox_proposal_model.force_boxes = force_boxes_model
self.bbox_proposal_model.roi_heads.box.force_boxes = force_boxes_box
self.bbox_proposal_model.roi_heads.box.post_processor.force_boxes = force_boxes_box
else:
# if bbox_proposal_model is None, dataset has to yield features
# for rpn proposals instead of images
predictions = [features.reshape(-1, self.img_dim)
for features in images]
num_proposals = torch.tensor([len(pred) for pred in predictions],
dtype=torch.int64, device=self.device)
max_proposals = num_proposals.max()
def pad_features(features, max_length):
length = features.shape[0]
if length == 0:
return torch.zeros((max_length, self.img_dim),
dtype=torch.float32, device=self.device)
return pad(features, [0, 0, 0, max_length - length], 'constant', 0)
image_features = torch.stack(tuple(
pad_features(pred, max_proposals) for pred in predictions
))
img_emb = self.img_enc(image_features, num_proposals)
if self.training:
# in distributed setting, we need to aggregate all embeddings
# before computing loss, since SCAN loss depends on all elements
# in the batch.
# note that this code will compute exactly the same loss on each
# GPU. This can potentially be optimized to parallel the computation
# of the loss, but since that's not the bottleneck of the model
# we do not try to do that for now.
world_size = get_world_size()
if world_size > 1:
# need to make sure batch size is the same on all processes,
# since for the last batch in epoch it might be different;
# if it's different, we will cut everything to the smallest
# batch size across all processes
batch_size = torch.tensor(img_emb.shape[0], device=self.device)
batch_size_full = [torch.zeros_like(batch_size)
for _ in range(world_size)]
dist.all_gather(batch_size_full, batch_size)
# cutting all data to min batch size across all GPUs
min_bs = min([bs.item() for bs in batch_size_full])
if min_bs < batch_size:
num_proposals = num_proposals[:min_bs]
cap_lens = cap_lens[:min_bs]
img_emb = img_emb[:min_bs]
cap_emb = cap_emb[:min_bs]
# exchanging proposals
cap_lens_full = [torch.zeros_like(cap_lens)
for _ in range(world_size)]
num_proposals_full = [torch.zeros_like(num_proposals)
for _ in range(world_size)]
dist.all_gather(cap_lens_full, cap_lens)
dist.all_gather(num_proposals_full, num_proposals)
cap_lens = torch.cat(cap_lens_full, dim=0)
num_proposals = torch.cat(num_proposals_full, dim=0)
# before exchanging embeddings, need to pad them
# to be of the same size
def pad_features(features, max_length):
length = features.shape[1]
return pad(features, [0, 0, 0, max_length - length, 0, 0],
'constant', 0)
img_emb = pad_features(img_emb, num_proposals.max().item())
cap_emb = pad_features(cap_emb, cap_lens.max().item())
img_emb_full = [torch.zeros_like(img_emb)
for _ in range(world_size)]
cap_emb_full = [torch.zeros_like(cap_emb)
for _ in range(world_size)]
dist.all_gather(img_emb_full, img_emb)
dist.all_gather(cap_emb_full, cap_emb)
# need to do this to restore propagation of the gradients
rank = dist.get_rank()
img_emb_full[rank] = img_emb
cap_emb_full[rank] = cap_emb
img_emb = torch.cat(img_emb_full, dim=0)
cap_emb = torch.cat(cap_emb_full, dim=0)
losses = {
'contrastive_loss': self.criterion(img_emb, num_proposals,
cap_emb, cap_lens),
}
if self.average_loss:
losses['contrastive_loss'] /= img_emb.shape[0]
return losses
# in the evaluation we need to return things in the correct order
# in addition we restructure everything to return all results for
# each image separately
orig_indices = np.argsort(sorted_indices)
return [
SCANEmbedding(img_emb, img_len, cap_emb, cap_len)
for img_emb, img_len, cap_emb, cap_len in zip(
img_emb[orig_indices], num_proposals[orig_indices],
cap_emb[orig_indices], cap_lens[orig_indices],
)
]
def unused_params(self):
pms = {}
if self.bbox_proposal_model is None:
return pms
for key, param in self.bbox_proposal_model.named_parameters():
if self.freeze_backbone:
pms['bbox_proposal_model.{}'.format(key)] = param
if not self.freeze_backbone and 'backbone' not in key \
and 'box.feature_extractor' not in key:
pms['bbox_proposal_model.{}'.format(key)] = param
return pms
| backbone_training = self.bbox_proposal_model.backbone.training
self.bbox_proposal_model.eval()
if self.training:
self.bbox_proposal_model.backbone.train()
self.bbox_proposal_model.roi_heads.box.feature_extractor.train()
predictions = self.bbox_proposal_model(images, targets=boxes)
# restoring the mode of detection model
if detection_training is True:
self.bbox_proposal_model.train()
# restoring the mode of backbone and box head
if backbone_training is False:
self.bbox_proposal_model.backbone.eval()
self.bbox_proposal_model.roi_heads.box.feature_extractor.eval() | conditional_block |
scan.py | import numpy as np
import torch
import torch.distributed as dist
from torch import nn
from torch.nn.functional import pad
from qd.qd_common import get_mpi_size as get_world_size
from .scan_utils import EncoderFeature, EncoderText, ContrastiveLoss
class SCANEmbedding(object):
"""
Structure that holds SCAN embeddings and provides .to function to
be able to move all necessary tensors between gpu and cpu.
"""
def __init__(self, img_emb, img_length, cap_emb, cap_length):
self.img_emb = img_emb
self.img_length = img_length
self.cap_emb = cap_emb
self.cap_length = cap_length
def to(self, *args, **kwargs):
|
class SCAN(nn.Module):
def __init__(self, cfg, bbox_proposal_model=None):
super(SCAN, self).__init__()
if cfg.MODEL.RPN_ONLY:
raise ValueError("SCAN model can't operate in RPN_ONLY regime, "
"since it requires an object detection head")
if bbox_proposal_model:
self.img_dim = bbox_proposal_model.roi_heads.box.feature_dim
else:
self.img_dim = cfg.MODEL.SCAN.IMG_FEATURES_DIM
self.img_enc = EncoderFeature(
self.img_dim, cfg.MODEL.SCAN.EMBED_SIZE,
precomp_enc_type=cfg.MODEL.SCAN.PRECOMP_ENC_TYPE,
no_featnorm=cfg.MODEL.SCAN.NO_IMG_NORM,
)
self.text_features_as_input = cfg.MODEL.SCAN.TEXT_FEATURES_AS_INPUT
if self.text_features_as_input:
self.text_dim = cfg.MODEL.SCAN.TEXT_FEATURES_DIM
else:
self.text_dim = cfg.MODEL.SCAN.VOCAB_SIZE
self.txt_enc = EncoderText(
self.text_dim, cfg.MODEL.SCAN.WORD_DIM,
cfg.MODEL.SCAN.EMBED_SIZE, cfg.MODEL.SCAN.NUM_LAYERS,
use_bi_gru=cfg.MODEL.SCAN.BI_GRU,
no_txtnorm=cfg.MODEL.SCAN.NO_TXT_NORM,
features_as_input=cfg.MODEL.SCAN.TEXT_FEATURES_AS_INPUT,
)
self.criterion = ContrastiveLoss(
opt=cfg.MODEL.SCAN,
margin=cfg.MODEL.SCAN.MARGIN,
max_violation=cfg.MODEL.SCAN.MAX_VIOLATION,
)
self.bbox_proposal_model = bbox_proposal_model
self.freeze_backbone = cfg.MODEL.SCAN.FREEZE_BACKBONE
self.use_precomputed_boxes = cfg.MODEL.SCAN.BBOX_AS_INPUT
self.device = cfg.MODEL.DEVICE
self.average_loss = cfg.MODEL.SCAN.AVERAGE_LOSS
self.random_boxes = cfg.MODEL.SCAN.RANDOM_BOXES
def forward(self, images, targets):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets: Tensor containing padded captions information
(and maybe also precomputed bounding boxes)
Returns:
contrastive_loss in the training regime and tuple of image and
caption embeddings in the test/evaluation regime.
"""
images = [image.to(self.device) for image in images]
if self.use_precomputed_boxes:
targets_transposed = list(zip(*targets))
captions = targets_transposed[0]
boxes = targets_transposed[1]
if self.random_boxes:
for boxlist in boxes:
for idx, box in enumerate(boxlist.bbox):
x = np.random.random() * boxlist.size[0]
y = np.random.random() * boxlist.size[1]
w = np.random.random() * (boxlist.size[0] - x)
h = np.random.random() * (boxlist.size[1] - y)
boxlist.bbox[idx] = torch.tensor([x, y, x + w, y + h])
boxes = [box.to(self.device) for box in boxes]
force_boxes = True
else:
captions = targets
boxes = None
force_boxes = False
captions = [caption.to(self.device) for caption in captions]
if self.text_features_as_input:
captions = [caption.reshape(-1, self.text_dim) for caption in captions]
# ideally this processing should be moved to collate_fn in datalayer,
# but that would involve changing main dataset building code
lengths = np.array([len(cap) for cap in captions])
max_tokens = lengths.max()
if self.text_features_as_input:
def pad_features(features, max_length):
length = features.shape[0]
if length == 0:
return torch.zeros((max_length, self.text_dim),
dtype=torch.float32, device=self.device)
return pad(features, [0, 0, 0, max_length - length], 'constant', 0)
captions = torch.stack(tuple(
pad_features(caption, max_tokens) for caption in captions
))
else:
captions = torch.nn.utils.rnn.pad_sequence(captions, batch_first=True)
# sorting data on caption length to use pack_padded_sequence
sorted_indices = np.argsort(-lengths)
lengths = lengths[sorted_indices]
# no matter the input is token or feature,
# captions now is a torch tensor with batch_first=True
captions = captions[sorted_indices]
images = [images[i] for i in sorted_indices]
if self.use_precomputed_boxes:
boxes = [boxes[i] for i in sorted_indices]
cap_emb, cap_lens = self.txt_enc(
captions,
torch.tensor(lengths, dtype=torch.int64, device=self.device),
)
cap_lens = cap_lens.to(self.device)
if self.bbox_proposal_model is not None:
# remembering the current mode to restore it later
detection_training = self.bbox_proposal_model.training
force_boxes_model = self.bbox_proposal_model.force_boxes
force_boxes_box = self.bbox_proposal_model.roi_heads.box.force_boxes
self.bbox_proposal_model.force_boxes = force_boxes
self.bbox_proposal_model.roi_heads.box.force_boxes = force_boxes
self.bbox_proposal_model.roi_heads.box.post_processor.force_boxes = force_boxes
if self.freeze_backbone:
self.bbox_proposal_model.eval()
with torch.no_grad():
predictions = self.bbox_proposal_model(
images, targets=boxes
)
# restoring the mode of detection model
if detection_training is True:
self.bbox_proposal_model.train()
else:
# TODO: consider making a separate parameter to run model in
# inference mode
# setting the whole model to eval mode to ensure we get test-regime
# proposals. However, since we are going to train backbone and
# box head feature extractor, we want to keep them in the training
# regime (to ensure that e.g. batch norm behaves correctly).
backbone_training = self.bbox_proposal_model.backbone.training
self.bbox_proposal_model.eval()
if self.training:
self.bbox_proposal_model.backbone.train()
self.bbox_proposal_model.roi_heads.box.feature_extractor.train()
predictions = self.bbox_proposal_model(images, targets=boxes)
# restoring the mode of detection model
if detection_training is True:
self.bbox_proposal_model.train()
# restoring the mode of backbone and box head
if backbone_training is False:
self.bbox_proposal_model.backbone.eval()
self.bbox_proposal_model.roi_heads.box.feature_extractor.eval()
predictions = [pred.get_field('box_features') for pred in predictions]
self.bbox_proposal_model.force_boxes = force_boxes_model
self.bbox_proposal_model.roi_heads.box.force_boxes = force_boxes_box
self.bbox_proposal_model.roi_heads.box.post_processor.force_boxes = force_boxes_box
else:
# if bbox_proposal_model is None, dataset has to yield features
# for rpn proposals instead of images
predictions = [features.reshape(-1, self.img_dim)
for features in images]
num_proposals = torch.tensor([len(pred) for pred in predictions],
dtype=torch.int64, device=self.device)
max_proposals = num_proposals.max()
def pad_features(features, max_length):
length = features.shape[0]
if length == 0:
return torch.zeros((max_length, self.img_dim),
dtype=torch.float32, device=self.device)
return pad(features, [0, 0, 0, max_length - length], 'constant', 0)
image_features = torch.stack(tuple(
pad_features(pred, max_proposals) for pred in predictions
))
img_emb = self.img_enc(image_features, num_proposals)
if self.training:
# in distributed setting, we need to aggregate all embeddings
# before computing loss, since SCAN loss depends on all elements
# in the batch.
# note that this code will compute exactly the same loss on each
# GPU. This can potentially be optimized to parallel the computation
# of the loss, but since that's not the bottleneck of the model
# we do not try to do that for now.
world_size = get_world_size()
if world_size > 1:
# need to make sure batch size is the same on all processes,
# since for the last batch in epoch it might be different;
# if it's different, we will cut everything to the smallest
# batch size across all processes
batch_size = torch.tensor(img_emb.shape[0], device=self.device)
batch_size_full = [torch.zeros_like(batch_size)
for _ in range(world_size)]
dist.all_gather(batch_size_full, batch_size)
# cutting all data to min batch size across all GPUs
min_bs = min([bs.item() for bs in batch_size_full])
if min_bs < batch_size:
num_proposals = num_proposals[:min_bs]
cap_lens = cap_lens[:min_bs]
img_emb = img_emb[:min_bs]
cap_emb = cap_emb[:min_bs]
# exchanging proposals
cap_lens_full = [torch.zeros_like(cap_lens)
for _ in range(world_size)]
num_proposals_full = [torch.zeros_like(num_proposals)
for _ in range(world_size)]
dist.all_gather(cap_lens_full, cap_lens)
dist.all_gather(num_proposals_full, num_proposals)
cap_lens = torch.cat(cap_lens_full, dim=0)
num_proposals = torch.cat(num_proposals_full, dim=0)
# before exchanging embeddings, need to pad them
# to be of the same size
def pad_features(features, max_length):
length = features.shape[1]
return pad(features, [0, 0, 0, max_length - length, 0, 0],
'constant', 0)
img_emb = pad_features(img_emb, num_proposals.max().item())
cap_emb = pad_features(cap_emb, cap_lens.max().item())
img_emb_full = [torch.zeros_like(img_emb)
for _ in range(world_size)]
cap_emb_full = [torch.zeros_like(cap_emb)
for _ in range(world_size)]
dist.all_gather(img_emb_full, img_emb)
dist.all_gather(cap_emb_full, cap_emb)
# need to do this to restore propagation of the gradients
rank = dist.get_rank()
img_emb_full[rank] = img_emb
cap_emb_full[rank] = cap_emb
img_emb = torch.cat(img_emb_full, dim=0)
cap_emb = torch.cat(cap_emb_full, dim=0)
losses = {
'contrastive_loss': self.criterion(img_emb, num_proposals,
cap_emb, cap_lens),
}
if self.average_loss:
losses['contrastive_loss'] /= img_emb.shape[0]
return losses
# in the evaluation we need to return things in the correct order
# in addition we restructure everything to return all results for
# each image separately
orig_indices = np.argsort(sorted_indices)
return [
SCANEmbedding(img_emb, img_len, cap_emb, cap_len)
for img_emb, img_len, cap_emb, cap_len in zip(
img_emb[orig_indices], num_proposals[orig_indices],
cap_emb[orig_indices], cap_lens[orig_indices],
)
]
def unused_params(self):
pms = {}
if self.bbox_proposal_model is None:
return pms
for key, param in self.bbox_proposal_model.named_parameters():
if self.freeze_backbone:
pms['bbox_proposal_model.{}'.format(key)] = param
if not self.freeze_backbone and 'backbone' not in key \
and 'box.feature_extractor' not in key:
pms['bbox_proposal_model.{}'.format(key)] = param
return pms
| cast_img_emb = self.img_emb.to(*args, **kwargs)
cast_cap_emb = self.cap_emb.to(*args, **kwargs)
cast_img_length = self.img_length.to(*args, **kwargs)
cast_cap_length = self.cap_length.to(*args, **kwargs)
return SCANEmbedding(cast_img_emb, cast_img_length,
cast_cap_emb, cast_cap_length) | identifier_body |
scan.py | import numpy as np
import torch
import torch.distributed as dist
from torch import nn
from torch.nn.functional import pad
from qd.qd_common import get_mpi_size as get_world_size
from .scan_utils import EncoderFeature, EncoderText, ContrastiveLoss
class SCANEmbedding(object):
"""
Structure that holds SCAN embeddings and provides .to function to
be able to move all necessary tensors between gpu and cpu.
"""
def __init__(self, img_emb, img_length, cap_emb, cap_length):
self.img_emb = img_emb
self.img_length = img_length
self.cap_emb = cap_emb
self.cap_length = cap_length
def to(self, *args, **kwargs):
cast_img_emb = self.img_emb.to(*args, **kwargs)
cast_cap_emb = self.cap_emb.to(*args, **kwargs)
cast_img_length = self.img_length.to(*args, **kwargs)
cast_cap_length = self.cap_length.to(*args, **kwargs)
return SCANEmbedding(cast_img_emb, cast_img_length,
cast_cap_emb, cast_cap_length)
class SCAN(nn.Module):
def __init__(self, cfg, bbox_proposal_model=None):
super(SCAN, self).__init__()
if cfg.MODEL.RPN_ONLY:
raise ValueError("SCAN model can't operate in RPN_ONLY regime, "
"since it requires an object detection head")
if bbox_proposal_model:
self.img_dim = bbox_proposal_model.roi_heads.box.feature_dim
else:
self.img_dim = cfg.MODEL.SCAN.IMG_FEATURES_DIM
self.img_enc = EncoderFeature(
self.img_dim, cfg.MODEL.SCAN.EMBED_SIZE,
precomp_enc_type=cfg.MODEL.SCAN.PRECOMP_ENC_TYPE,
no_featnorm=cfg.MODEL.SCAN.NO_IMG_NORM,
)
self.text_features_as_input = cfg.MODEL.SCAN.TEXT_FEATURES_AS_INPUT
if self.text_features_as_input:
self.text_dim = cfg.MODEL.SCAN.TEXT_FEATURES_DIM
else:
self.text_dim = cfg.MODEL.SCAN.VOCAB_SIZE
self.txt_enc = EncoderText(
self.text_dim, cfg.MODEL.SCAN.WORD_DIM,
cfg.MODEL.SCAN.EMBED_SIZE, cfg.MODEL.SCAN.NUM_LAYERS,
use_bi_gru=cfg.MODEL.SCAN.BI_GRU,
no_txtnorm=cfg.MODEL.SCAN.NO_TXT_NORM,
features_as_input=cfg.MODEL.SCAN.TEXT_FEATURES_AS_INPUT,
)
self.criterion = ContrastiveLoss(
opt=cfg.MODEL.SCAN,
margin=cfg.MODEL.SCAN.MARGIN,
max_violation=cfg.MODEL.SCAN.MAX_VIOLATION,
)
self.bbox_proposal_model = bbox_proposal_model
self.freeze_backbone = cfg.MODEL.SCAN.FREEZE_BACKBONE
self.use_precomputed_boxes = cfg.MODEL.SCAN.BBOX_AS_INPUT
self.device = cfg.MODEL.DEVICE
self.average_loss = cfg.MODEL.SCAN.AVERAGE_LOSS
self.random_boxes = cfg.MODEL.SCAN.RANDOM_BOXES
def forward(self, images, targets):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets: Tensor containing padded captions information
(and maybe also precomputed bounding boxes)
Returns:
contrastive_loss in the training regime and tuple of image and
caption embeddings in the test/evaluation regime.
"""
images = [image.to(self.device) for image in images]
if self.use_precomputed_boxes:
targets_transposed = list(zip(*targets))
captions = targets_transposed[0]
boxes = targets_transposed[1]
if self.random_boxes:
for boxlist in boxes:
for idx, box in enumerate(boxlist.bbox):
x = np.random.random() * boxlist.size[0]
y = np.random.random() * boxlist.size[1]
w = np.random.random() * (boxlist.size[0] - x)
h = np.random.random() * (boxlist.size[1] - y)
boxlist.bbox[idx] = torch.tensor([x, y, x + w, y + h])
boxes = [box.to(self.device) for box in boxes]
force_boxes = True
else:
captions = targets
boxes = None
force_boxes = False
captions = [caption.to(self.device) for caption in captions]
if self.text_features_as_input:
captions = [caption.reshape(-1, self.text_dim) for caption in captions]
# ideally this processing should be moved to collate_fn in datalayer,
# but that would involve changing main dataset building code
lengths = np.array([len(cap) for cap in captions])
max_tokens = lengths.max()
if self.text_features_as_input:
def pad_features(features, max_length):
length = features.shape[0]
if length == 0:
return torch.zeros((max_length, self.text_dim),
dtype=torch.float32, device=self.device)
return pad(features, [0, 0, 0, max_length - length], 'constant', 0)
captions = torch.stack(tuple(
pad_features(caption, max_tokens) for caption in captions
))
else:
captions = torch.nn.utils.rnn.pad_sequence(captions, batch_first=True)
# sorting data on caption length to use pack_padded_sequence
sorted_indices = np.argsort(-lengths)
lengths = lengths[sorted_indices]
# no matter the input is token or feature,
# captions now is a torch tensor with batch_first=True
captions = captions[sorted_indices]
images = [images[i] for i in sorted_indices]
if self.use_precomputed_boxes:
boxes = [boxes[i] for i in sorted_indices]
cap_emb, cap_lens = self.txt_enc(
captions,
torch.tensor(lengths, dtype=torch.int64, device=self.device),
)
cap_lens = cap_lens.to(self.device)
if self.bbox_proposal_model is not None:
# remembering the current mode to restore it later
detection_training = self.bbox_proposal_model.training
force_boxes_model = self.bbox_proposal_model.force_boxes
force_boxes_box = self.bbox_proposal_model.roi_heads.box.force_boxes
self.bbox_proposal_model.force_boxes = force_boxes
self.bbox_proposal_model.roi_heads.box.force_boxes = force_boxes
self.bbox_proposal_model.roi_heads.box.post_processor.force_boxes = force_boxes
if self.freeze_backbone:
self.bbox_proposal_model.eval()
with torch.no_grad():
predictions = self.bbox_proposal_model(
images, targets=boxes
)
# restoring the mode of detection model
if detection_training is True:
self.bbox_proposal_model.train()
else:
# TODO: consider making a separate parameter to run model in
# inference mode
# setting the whole model to eval mode to ensure we get test-regime
# proposals. However, since we are going to train backbone and
# box head feature extractor, we want to keep them in the training
# regime (to ensure that e.g. batch norm behaves correctly).
backbone_training = self.bbox_proposal_model.backbone.training
self.bbox_proposal_model.eval()
if self.training:
self.bbox_proposal_model.backbone.train()
self.bbox_proposal_model.roi_heads.box.feature_extractor.train()
predictions = self.bbox_proposal_model(images, targets=boxes)
# restoring the mode of detection model
if detection_training is True:
self.bbox_proposal_model.train()
# restoring the mode of backbone and box head
if backbone_training is False:
self.bbox_proposal_model.backbone.eval()
self.bbox_proposal_model.roi_heads.box.feature_extractor.eval()
predictions = [pred.get_field('box_features') for pred in predictions]
self.bbox_proposal_model.force_boxes = force_boxes_model
self.bbox_proposal_model.roi_heads.box.force_boxes = force_boxes_box
self.bbox_proposal_model.roi_heads.box.post_processor.force_boxes = force_boxes_box
else:
# if bbox_proposal_model is None, dataset has to yield features
# for rpn proposals instead of images
predictions = [features.reshape(-1, self.img_dim)
for features in images]
num_proposals = torch.tensor([len(pred) for pred in predictions],
dtype=torch.int64, device=self.device)
max_proposals = num_proposals.max()
def pad_features(features, max_length):
length = features.shape[0]
if length == 0:
return torch.zeros((max_length, self.img_dim),
dtype=torch.float32, device=self.device)
return pad(features, [0, 0, 0, max_length - length], 'constant', 0)
image_features = torch.stack(tuple(
pad_features(pred, max_proposals) for pred in predictions
))
img_emb = self.img_enc(image_features, num_proposals)
if self.training:
# in distributed setting, we need to aggregate all embeddings
# before computing loss, since SCAN loss depends on all elements
# in the batch.
# note that this code will compute exactly the same loss on each
# GPU. This can potentially be optimized to parallel the computation
# of the loss, but since that's not the bottleneck of the model
# we do not try to do that for now.
world_size = get_world_size()
if world_size > 1:
# need to make sure batch size is the same on all processes,
# since for the last batch in epoch it might be different;
# if it's different, we will cut everything to the smallest
# batch size across all processes
batch_size = torch.tensor(img_emb.shape[0], device=self.device)
batch_size_full = [torch.zeros_like(batch_size)
for _ in range(world_size)]
dist.all_gather(batch_size_full, batch_size)
# cutting all data to min batch size across all GPUs
min_bs = min([bs.item() for bs in batch_size_full])
if min_bs < batch_size:
num_proposals = num_proposals[:min_bs]
cap_lens = cap_lens[:min_bs]
img_emb = img_emb[:min_bs]
cap_emb = cap_emb[:min_bs]
# exchanging proposals
cap_lens_full = [torch.zeros_like(cap_lens)
for _ in range(world_size)]
num_proposals_full = [torch.zeros_like(num_proposals)
for _ in range(world_size)]
dist.all_gather(cap_lens_full, cap_lens)
dist.all_gather(num_proposals_full, num_proposals)
cap_lens = torch.cat(cap_lens_full, dim=0)
num_proposals = torch.cat(num_proposals_full, dim=0)
# before exchanging embeddings, need to pad them
# to be of the same size
def pad_features(features, max_length):
length = features.shape[1]
return pad(features, [0, 0, 0, max_length - length, 0, 0],
'constant', 0)
img_emb = pad_features(img_emb, num_proposals.max().item())
cap_emb = pad_features(cap_emb, cap_lens.max().item())
img_emb_full = [torch.zeros_like(img_emb)
for _ in range(world_size)]
cap_emb_full = [torch.zeros_like(cap_emb)
for _ in range(world_size)]
dist.all_gather(img_emb_full, img_emb)
dist.all_gather(cap_emb_full, cap_emb)
# need to do this to restore propagation of the gradients
rank = dist.get_rank()
img_emb_full[rank] = img_emb
cap_emb_full[rank] = cap_emb
img_emb = torch.cat(img_emb_full, dim=0)
cap_emb = torch.cat(cap_emb_full, dim=0)
losses = {
'contrastive_loss': self.criterion(img_emb, num_proposals,
cap_emb, cap_lens),
}
if self.average_loss:
losses['contrastive_loss'] /= img_emb.shape[0]
return losses
# in the evaluation we need to return things in the correct order
# in addition we restructure everything to return all results for
# each image separately
orig_indices = np.argsort(sorted_indices)
return [
SCANEmbedding(img_emb, img_len, cap_emb, cap_len)
for img_emb, img_len, cap_emb, cap_len in zip(
img_emb[orig_indices], num_proposals[orig_indices],
cap_emb[orig_indices], cap_lens[orig_indices],
)
]
def | (self):
pms = {}
if self.bbox_proposal_model is None:
return pms
for key, param in self.bbox_proposal_model.named_parameters():
if self.freeze_backbone:
pms['bbox_proposal_model.{}'.format(key)] = param
if not self.freeze_backbone and 'backbone' not in key \
and 'box.feature_extractor' not in key:
pms['bbox_proposal_model.{}'.format(key)] = param
return pms
| unused_params | identifier_name |
youtube_tops_en_US_.py | # -*- coding: utf-8 -*-
from ..common_spider_es import *
from ..video_spider_es import VideoSpider
from ...feeds_back_utils import *
import datetime
from urllib import unquote
import re
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
import lxml
class YoutubeSpider(VideoSpider):
name = 'youtube_tops_en_US'
download_delay = 3
video_type = 'mp4'
datasource_type = 4
download_maxsize = 104857600 * 5
download_warnsize = 104857600 * 5
default_section = 60 * 60 * 24 * 1
hd = {'pragma': 'no-cache',
'User-Agent': '',
'cache-control': 'no-cache'}
response_url = None
content_list = []
channel_list = []
browse_times = 0
def __init__(self, *a, **kw):
super(YoutubeSpider, self).__init__(*a, **kw)
self.channel_list = get_channel_list('youtube_tops', 'United States of America')
dispatcher.connect(self.spider_idle, signals.spider_idle)
def spider_idle(self):
if self.channel_list:
for rq in self.start_requests():
self.crawler.engine.crawl(rq, self)
def start_requests(self):
|
def parse_page(self, response):
# print response.url
body_instance = response.body_as_unicode()
tree = lxml.html.fromstring(body_instance)
raw = {}
# title_selector = '//*[@id="page-container"]/div[2]/div/div/div[1]/div/div/div/div[@class="ProfileHeaderCard"]/h1[@class="ProfileHeaderCard-name"]/a/text()'
# title_selector = '//*[@id="watch-header"]/div[@id="watch7-headline"]/div[@id="watch-headline-title"]/h1[@class="watch-title-container"]/span/text()'
subtitle_selector = '//*[@id="watch-header"]/div[@id="watch7-user-header"]/div/a/text()'
thumbnails_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/span[@itemprop="thumbnail"]/link/@href'
duration_raw_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="duration"]/@content'
video_id_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="videoId"]/@content'
title_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="name"]/@content'
published_date_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="datePublished"]/@content'
hitcount_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="interactionCount"]/@content'
width_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="width"]/@content'
height_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="height"]/@content'
genre_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="genre"]/@content'
raw['title'] = tree.xpath(title_selector)[0].strip()
raw['subtitle'] = tree.xpath(subtitle_selector)[0]
raw['publisher'] = tree.xpath(subtitle_selector)[0]
raw['source_url'] = response.url
raw['thumbnails'] = [tree.xpath(thumbnails_selector)[0]]
raw['time'] = tree.xpath(published_date_selector)[0]
raw['doc_id'] = tree.xpath(video_id_selector)[0]
raw['video_width'] = tree.xpath(width_selector)[0]
raw['video_height'] = tree.xpath(height_selector)[0]
raw['genre'] = tree.xpath(genre_selector)[0]
raw['hit_counts'] = tree.xpath(hitcount_selector)[0]
# 正则获取播放时间
m_value, s_value = \
re.findall('PT([0-9]+)M([0-9]+)S', tree.xpath(duration_raw_selector)[0])[0]
# second_value = re.findall('<meta itemprop="duration" content="PT[0-9]+M([0-9]+)S">', body_instance)[0]
raw['duration'] = int(m_value) * 60 + int(s_value)
# if raw['duration'] > self.max_duration:
# print('duration > %d' % self.max_duration)
# return
yield Request(
raw['source_url'],
headers=self.hd,
meta=raw,
dont_filter=True,
callback=self.parse_video_from_other
)
def parse_video_from_other(self, response):
target_url = "https://www.findyoutube.net/result"
post_dict = {
"url": response.url,
"submit": "Download"
}
r = requests.post(target_url, data=post_dict)
body_instance = r.content.replace('amp;', '')
tree = lxml.html.fromstring(body_instance)
video_selector = '/html/body/div[2]/div/div[1]/table/tbody/tr[3]/td[3]/button/a/@href'
raw = dict()
raw.update(response.meta)
raw['video'] = tree.xpath(video_selector)[0]
self.logger.warning("parse_video_from_other!!")
for request in self.parse_raw(raw):
yield request
def parse_video(self, response):
def _parse_stream_map(text):
videoinfo = {
"itag": [],
"url": [],
"quality": [],
"fallback_host": [],
"s": [],
"type": []
}
videos = text.split(",")
videos = [video.split("&") for video in videos]
for video in videos:
for kv in video:
key, value = kv.split("=")
videoinfo.get(key, []).append(unquote(value))
return videoinfo
ENCODING = {
# Flash Video
5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"],
6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"],
34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"],
35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"],
# 3GP
36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"],
13: ["3gp", "N/A", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"],
17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"],
# MPEG-4
18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"],
22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"],
37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"],
38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"],
82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"],
83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"],
84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"],
85: ["mp4", "1080p", "H.264", "3D", "2-2.9", "AAC", "152"],
# WebM
43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"],
44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"],
45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"],
46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"],
100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"],
101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"],
102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"]
}
ENCODING_KEYS = (
'extension',
'resolution',
'video_codec',
'profile',
'video_bitrate',
'audio_codec',
'audio_bitrate'
)
def _extract_fmt(text):
itag = re.findall('itag=(\d+)', text)
if itag and len(itag) is 1:
itag = int(itag[0])
attr = ENCODING.get(itag, None)
if not attr:
return itag, None
return itag, dict(zip(ENCODING_KEYS, attr))
content = response.body_as_unicode()
try:
player_conf = content[18 + content.find("ytplayer.config = "):]
bracket_count = 0
i = 0
for i, char in enumerate(player_conf):
if char == "{":
bracket_count += 1
elif char == "}":
bracket_count -= 1
if bracket_count == 0:
break
else:
self.logger.warning("Cannot get JSON from HTML")
index = i + 1
data = json.loads(player_conf[:index])
# self.logger.warning(data)
except Exception, e:
self.logger.warning(e)
return
stream_map = _parse_stream_map(data["args"]["url_encoded_fmt_stream_map"])
video_urls = stream_map["url"]
raw = dict()
raw.update(response.meta)
for i, url in enumerate(video_urls):
try:
fmt, fmt_data = _extract_fmt(url)
if fmt_data["extension"] == "mp4" and fmt_data["profile"] == "Baseline":
raw['video'] = url
self.logger.warning(url)
break
except KeyError:
continue
# self.logger.warning(raw)
for request in self.parse_raw(raw):
yield request
def normalize_thumbnails(self, article_info):
return self.normalize_thumbnails_fallvideo(article_info)
def download_video(self, article_info):
account = article_info['account']
video_url = article_info['video']
hd = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'pragma': 'no-cache',
'cache-control': 'no-cache'
}
ta = ['23.95.180.17:13228', '23.95.180.43:13228', '23.95.180.86:13228', '23.95.180.135:13228',
'23.95.180.199:13228', '198.23.220.14:13228', '198.23.220.76:13228', '198.23.220.131:13228',
'198.23.220.171:13228', '198.23.220.216:13228']
tp = random.choice(ta)
proxies = {
'http': 'http://{}'.format(tp),
'https': 'http://{}'.format(tp)
}
r = requests.get(video_url, headers=hd, proxies=proxies, verify=False).content
# 过滤掉尺寸小于1的视频
if len(r) <= 1024 * 1024 * 1:
self.logger.warning('video size smaller than 1MB' + '(' + str(article_info['source_url'] + ')'))
return
account = article_info['account']
if account not in self.image_cache:
return
self.image_cache[account]['video_data'] = r
self.send_msg(article_info)
def get_title_from_raw(self, raw):
return raw['title']
def get_subtitle_from_raw(self, raw):
return raw['subtitle']
def get_thumbnails_from_raw(self, raw):
return raw['thumbnails']
def get_doc_id_from_raw(self, raw):
return hashlib.md5(raw['doc_id']).hexdigest()
def get_source_url_from_raw(self, raw):
return raw['source_url']
def get_time_from_raw(self, raw):
return str(datetime.datetime.now())[:19]
def get_html_from_raw(self, raw):
return ''
def get_content_from_raw(self, raw):
return []
def get_duration_from_raw(self, raw):
return raw['duration']
def get_video_from_raw(self, raw):
return raw['video']
def get_raw_tags_from_raw(self, raw):
return [u'触宝_视频']
def title_duplicate(self, ttl):
ttl_md5 = hashlib.md5(ttl).hexdigest()
return bool(self.redis.hget('feeds_title', ttl_md5))
def get_chinese_name(self, sb):
tmd5 = hashlib.md5(sb).hexdigest()
ans = ''
for ind in range(29):
tmp = int(tmd5[ind: ind + 4], 16)
if 19968 <= tmp <= 40869:
ans += unichr(tmp)
if len(ans) >= 3:
return ans[-3:]
return u'美女如云'
def title_contain_chinese(self, sb):
ans = re.findall(ur'[\u4e00-\u9fa5]+', sb)
if not ans:
return False
tmax = max(map(lambda x: len(x), ans))
if tmax < 2:
return False
return True
def get_locale_from_raw(self, raw):
return 'en_US'
def get_locales_from_raw(self, raw):
return ['en_US']
| channel_url = self.channel_list.pop(0)
yield Request(
channel_url,
headers=self.hd,
dont_filter=True,
callback=self.parse_page
) | identifier_body |
youtube_tops_en_US_.py | # -*- coding: utf-8 -*-
from ..common_spider_es import *
from ..video_spider_es import VideoSpider
from ...feeds_back_utils import *
import datetime
from urllib import unquote
import re
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
import lxml
class YoutubeSpider(VideoSpider):
name = 'youtube_tops_en_US'
download_delay = 3
video_type = 'mp4'
datasource_type = 4
download_maxsize = 104857600 * 5
download_warnsize = 104857600 * 5
default_section = 60 * 60 * 24 * 1
hd = {'pragma': 'no-cache',
'User-Agent': '',
'cache-control': 'no-cache'}
response_url = None
content_list = []
channel_list = []
browse_times = 0
def __init__(self, *a, **kw):
super(YoutubeSpider, self).__init__(*a, **kw)
self.channel_list = get_channel_list('youtube_tops', 'United States of America')
dispatcher.connect(self.spider_idle, signals.spider_idle)
def spider_idle(self):
if self.channel_list:
for rq in self.start_requests():
self.crawler.engine.crawl(rq, self)
def start_requests(self):
channel_url = self.channel_list.pop(0)
yield Request(
channel_url,
headers=self.hd,
dont_filter=True,
callback=self.parse_page
)
def parse_page(self, response):
# print response.url
body_instance = response.body_as_unicode()
tree = lxml.html.fromstring(body_instance)
raw = {}
# title_selector = '//*[@id="page-container"]/div[2]/div/div/div[1]/div/div/div/div[@class="ProfileHeaderCard"]/h1[@class="ProfileHeaderCard-name"]/a/text()'
# title_selector = '//*[@id="watch-header"]/div[@id="watch7-headline"]/div[@id="watch-headline-title"]/h1[@class="watch-title-container"]/span/text()'
subtitle_selector = '//*[@id="watch-header"]/div[@id="watch7-user-header"]/div/a/text()'
thumbnails_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/span[@itemprop="thumbnail"]/link/@href'
duration_raw_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="duration"]/@content'
video_id_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="videoId"]/@content'
title_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="name"]/@content'
published_date_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="datePublished"]/@content'
hitcount_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="interactionCount"]/@content'
width_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="width"]/@content'
height_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="height"]/@content'
genre_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="genre"]/@content'
raw['title'] = tree.xpath(title_selector)[0].strip()
raw['subtitle'] = tree.xpath(subtitle_selector)[0]
raw['publisher'] = tree.xpath(subtitle_selector)[0]
raw['source_url'] = response.url
raw['thumbnails'] = [tree.xpath(thumbnails_selector)[0]]
raw['time'] = tree.xpath(published_date_selector)[0]
raw['doc_id'] = tree.xpath(video_id_selector)[0]
raw['video_width'] = tree.xpath(width_selector)[0]
raw['video_height'] = tree.xpath(height_selector)[0]
raw['genre'] = tree.xpath(genre_selector)[0]
raw['hit_counts'] = tree.xpath(hitcount_selector)[0]
# 正则获取播放时间
m_value, s_value = \
re.findall('PT([0-9]+)M([0-9]+)S', tree.xpath(duration_raw_selector)[0])[0]
# second_value = re.findall('<meta itemprop="duration" content="PT[0-9]+M([0-9]+)S">', body_instance)[0]
raw['duration'] = int(m_value) * 60 + int(s_value)
# if raw['duration'] > self.max_duration:
# print('duration > %d' % self.max_duration)
# return
yield Request(
raw['source_url'],
headers=self.hd,
meta=raw,
dont_filter=True,
callback=self.parse_video_from_other
)
def parse_video_from_other(self, response):
target_url = "https://www.findyoutube.net/result"
post_dict = {
"url": response.url,
"submit": "Download"
}
r = requests.post(target_url, data=post_dict)
body_instance = r.content.replace('amp;', '')
tree = lxml.html.fromstring(body_instance)
video_selector = '/html/body/div[2]/div/div[1]/table/tbody/tr[3]/td[3]/button/a/@href'
raw = dict()
raw.update(response.meta)
raw['video'] = tree.xpath(video_selector)[0]
self.logger.warning("parse_video_from_other!!")
for request in self.parse_raw(raw):
yield request
def parse_video(self, response):
def _parse_stream_map(text):
videoinfo = {
"itag": [],
"url": [],
"quality": [],
"fallback_host": [],
"s": [],
"type": []
}
videos = text.split(",")
videos = [video.split("&") for video in videos]
for video in videos:
for kv in video:
key, value = kv.split("=")
videoinfo.get(key, []).append(unquote(value))
return videoinfo
ENCODING = {
# Flash Video
5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"],
6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"],
34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"],
35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"],
# 3GP
36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"],
13: ["3gp", "N/A", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"],
17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"],
# MPEG-4
18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"],
22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"],
37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"],
38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"],
82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"],
83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"],
84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"],
85: ["mp4", "1080p", "H.264", "3D", "2-2.9", "AAC", "152"],
# WebM
43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"],
44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"],
45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"],
46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"],
100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"],
101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"],
102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"]
}
ENCODING_KEYS = (
'extension',
'resolution',
'video_codec',
'profile',
'video_bitrate',
'audio_codec',
'audio_bitrate'
)
def _extract_fmt(text):
itag = re.findall('itag=(\d+)', text)
if itag and len(itag) is 1:
itag = int(itag[0])
attr = ENCODING.get(itag, None)
if not attr:
return itag, None
return itag, dict(zip(ENCODING_KEYS, attr))
content = response.body_as_unicode()
try:
player_conf = content[18 + content.find("ytplayer.config = "):]
bracket_count = 0
i = 0
for i, char in enumerate(player_conf):
if char == "{":
bracket_count += 1
elif char == "}":
bracket_count -= 1 | else:
self.logger.warning("Cannot get JSON from HTML")
index = i + 1
data = json.loads(player_conf[:index])
# self.logger.warning(data)
except Exception, e:
self.logger.warning(e)
return
stream_map = _parse_stream_map(data["args"]["url_encoded_fmt_stream_map"])
video_urls = stream_map["url"]
raw = dict()
raw.update(response.meta)
for i, url in enumerate(video_urls):
try:
fmt, fmt_data = _extract_fmt(url)
if fmt_data["extension"] == "mp4" and fmt_data["profile"] == "Baseline":
raw['video'] = url
self.logger.warning(url)
break
except KeyError:
continue
# self.logger.warning(raw)
for request in self.parse_raw(raw):
yield request
def normalize_thumbnails(self, article_info):
return self.normalize_thumbnails_fallvideo(article_info)
def download_video(self, article_info):
account = article_info['account']
video_url = article_info['video']
hd = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'pragma': 'no-cache',
'cache-control': 'no-cache'
}
ta = ['23.95.180.17:13228', '23.95.180.43:13228', '23.95.180.86:13228', '23.95.180.135:13228',
'23.95.180.199:13228', '198.23.220.14:13228', '198.23.220.76:13228', '198.23.220.131:13228',
'198.23.220.171:13228', '198.23.220.216:13228']
tp = random.choice(ta)
proxies = {
'http': 'http://{}'.format(tp),
'https': 'http://{}'.format(tp)
}
r = requests.get(video_url, headers=hd, proxies=proxies, verify=False).content
# 过滤掉尺寸小于1的视频
if len(r) <= 1024 * 1024 * 1:
self.logger.warning('video size smaller than 1MB' + '(' + str(article_info['source_url'] + ')'))
return
account = article_info['account']
if account not in self.image_cache:
return
self.image_cache[account]['video_data'] = r
self.send_msg(article_info)
def get_title_from_raw(self, raw):
return raw['title']
def get_subtitle_from_raw(self, raw):
return raw['subtitle']
def get_thumbnails_from_raw(self, raw):
return raw['thumbnails']
def get_doc_id_from_raw(self, raw):
return hashlib.md5(raw['doc_id']).hexdigest()
def get_source_url_from_raw(self, raw):
return raw['source_url']
def get_time_from_raw(self, raw):
return str(datetime.datetime.now())[:19]
def get_html_from_raw(self, raw):
return ''
def get_content_from_raw(self, raw):
return []
def get_duration_from_raw(self, raw):
return raw['duration']
def get_video_from_raw(self, raw):
return raw['video']
def get_raw_tags_from_raw(self, raw):
return [u'触宝_视频']
def title_duplicate(self, ttl):
ttl_md5 = hashlib.md5(ttl).hexdigest()
return bool(self.redis.hget('feeds_title', ttl_md5))
def get_chinese_name(self, sb):
tmd5 = hashlib.md5(sb).hexdigest()
ans = ''
for ind in range(29):
tmp = int(tmd5[ind: ind + 4], 16)
if 19968 <= tmp <= 40869:
ans += unichr(tmp)
if len(ans) >= 3:
return ans[-3:]
return u'美女如云'
def title_contain_chinese(self, sb):
ans = re.findall(ur'[\u4e00-\u9fa5]+', sb)
if not ans:
return False
tmax = max(map(lambda x: len(x), ans))
if tmax < 2:
return False
return True
def get_locale_from_raw(self, raw):
return 'en_US'
def get_locales_from_raw(self, raw):
return ['en_US'] | if bracket_count == 0:
break | random_line_split |
youtube_tops_en_US_.py | # -*- coding: utf-8 -*-
from ..common_spider_es import *
from ..video_spider_es import VideoSpider
from ...feeds_back_utils import *
import datetime
from urllib import unquote
import re
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
import lxml
class YoutubeSpider(VideoSpider):
name = 'youtube_tops_en_US'
download_delay = 3
video_type = 'mp4'
datasource_type = 4
download_maxsize = 104857600 * 5
download_warnsize = 104857600 * 5
default_section = 60 * 60 * 24 * 1
hd = {'pragma': 'no-cache',
'User-Agent': '',
'cache-control': 'no-cache'}
response_url = None
content_list = []
channel_list = []
browse_times = 0
def __init__(self, *a, **kw):
super(YoutubeSpider, self).__init__(*a, **kw)
self.channel_list = get_channel_list('youtube_tops', 'United States of America')
dispatcher.connect(self.spider_idle, signals.spider_idle)
def spider_idle(self):
if self.channel_list:
for rq in self.start_requests():
self.crawler.engine.crawl(rq, self)
def start_requests(self):
channel_url = self.channel_list.pop(0)
yield Request(
channel_url,
headers=self.hd,
dont_filter=True,
callback=self.parse_page
)
def parse_page(self, response):
# print response.url
body_instance = response.body_as_unicode()
tree = lxml.html.fromstring(body_instance)
raw = {}
# title_selector = '//*[@id="page-container"]/div[2]/div/div/div[1]/div/div/div/div[@class="ProfileHeaderCard"]/h1[@class="ProfileHeaderCard-name"]/a/text()'
# title_selector = '//*[@id="watch-header"]/div[@id="watch7-headline"]/div[@id="watch-headline-title"]/h1[@class="watch-title-container"]/span/text()'
subtitle_selector = '//*[@id="watch-header"]/div[@id="watch7-user-header"]/div/a/text()'
thumbnails_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/span[@itemprop="thumbnail"]/link/@href'
duration_raw_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="duration"]/@content'
video_id_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="videoId"]/@content'
title_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="name"]/@content'
published_date_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="datePublished"]/@content'
hitcount_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="interactionCount"]/@content'
width_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="width"]/@content'
height_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="height"]/@content'
genre_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="genre"]/@content'
raw['title'] = tree.xpath(title_selector)[0].strip()
raw['subtitle'] = tree.xpath(subtitle_selector)[0]
raw['publisher'] = tree.xpath(subtitle_selector)[0]
raw['source_url'] = response.url
raw['thumbnails'] = [tree.xpath(thumbnails_selector)[0]]
raw['time'] = tree.xpath(published_date_selector)[0]
raw['doc_id'] = tree.xpath(video_id_selector)[0]
raw['video_width'] = tree.xpath(width_selector)[0]
raw['video_height'] = tree.xpath(height_selector)[0]
raw['genre'] = tree.xpath(genre_selector)[0]
raw['hit_counts'] = tree.xpath(hitcount_selector)[0]
# 正则获取播放时间
m_value, s_value = \
re.findall('PT([0-9]+)M([0-9]+)S', tree.xpath(duration_raw_selector)[0])[0]
# second_value = re.findall('<meta itemprop="duration" content="PT[0-9]+M([0-9]+)S">', body_instance)[0]
raw['duration'] = int(m_value) * 60 + int(s_value)
# if raw['duration'] > self.max_duration:
# print('duration > %d' % self.max_duration)
# return
yield Request(
raw['source_url'],
headers=self.hd,
meta=raw,
dont_filter=True,
callback=self.parse_video_from_other
)
def parse_video_from_other(self, response):
target_url = "https://www.findyoutube.net/result"
post_dict = {
"url": response.url,
"submit": "Download"
}
r = requests.post(target_url, data=post_dict)
body_instance = r.content.replace('amp;', '')
tree = lxml.html.fromstring(body_instance)
video_selector = '/html/body/div[2]/div/div[1]/table/tbody/tr[3]/td[3]/button/a/@href'
raw = dict()
raw.update(response.meta)
raw['video'] = tree.xpath(video_selector)[0]
self.logger.warning("parse_video_from_other!!")
for request in self.parse_raw(raw):
yield request
def parse_video(self, response):
def _parse_stream_map(text):
videoinfo = {
"itag": [],
"url": [],
"quality": [],
"fallback_host": [],
"s": [],
"type": []
}
videos = text.split(",")
videos = [video.split("&") for video in videos]
for video in videos:
for kv in video:
key, value = kv.split("=")
videoinfo.get(key, []).append(unquote(value))
return videoinfo
ENCODING = {
# Flash Video
5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"],
6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"],
34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"],
35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"],
# 3GP
36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"],
13: ["3gp", "N/A", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"],
17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"],
# MPEG-4
18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"],
22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"],
37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"],
38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"],
82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"],
83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"],
84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"],
85: ["mp4", "1080p", "H.264", "3D", "2-2.9", "AAC", "152"],
# WebM
43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"],
44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"],
45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"],
46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"],
100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"],
101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"],
102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"]
}
ENCODING_KEYS = (
'extension',
'resolution',
'video_codec',
'profile',
'video_bitrate',
'audio_codec',
'audio_bitrate'
)
def _extract_fmt(text):
itag = re.findall('itag=(\d+)', text)
if itag and len(itag) is 1:
itag = int(itag[0])
attr = ENCODING.get(itag, None)
if not attr:
return itag, None
return itag, dict(zip(ENCODING_KEYS, attr))
content = response.body_as_unicode()
try:
player_conf = content[18 + content.find("ytplayer.config = "):]
bracket_count = 0
i = 0
for i, char in enumerate(player_conf):
if char == "{":
bracket_count += 1
elif char == "}":
bracket_count -= 1
if bracket_count == 0:
break
else:
self.logger.warning("Cannot get JSON from HTML")
index = i + 1
data = json.loads(player_conf[:index])
# self.logger.warning(data)
except Exception, e:
self.logger.warning(e)
return
stream_map = _parse_stream_map(data["args"]["url_encoded_fmt_stream_map"])
video_urls = stream_map["url"]
raw = dict()
raw.update(response.meta)
for i, url in enumerate(video_urls):
try:
fmt, fmt_data = _extract_fmt(url)
if fmt_data["extension"] == "mp4" and fmt_data["profile"] == "Baseline":
raw['video'] = url
self.logger.warning(url)
break
except KeyError:
continue
# self.logger.warning(raw)
for request in self.parse_raw(raw):
yield request
def normalize_thumbnails(self, article_info):
return self.normalize_thumbnails_fallvideo(article_info)
def download_video(self, article_info):
account = article_info['account']
video_url = article_info['video']
hd = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'pragma': 'no-cache',
'cache-control': 'no-cache'
}
ta = ['23.95.180.17:13228', '23.95.180.43:13228', '23.95.180.86:13228', '23.95.180.135:13228',
'23.95.180.199:13228', '198.23.220.14:13228', '198.23.220.76:13228', '198.23.220.131:13228',
'198.23.220.171:13228', '198.23.220.216:13228']
tp = random.choice(ta)
proxies = {
'http': 'http://{}'.format(tp),
'https': 'http://{}'.format(tp)
}
r = requests.get(video_url, headers=hd, proxies=proxies, verify=False).content
# 过滤掉尺寸小于1的视频
if len(r) <= 1024 * 1024 * 1:
self.logger.warning('video size smaller than 1MB' + '(' + str(article_info['source_url'] + ')'))
return
account = article_info['account']
if account not in self.image_cache:
return
self.image_cache[account]['video_data'] = r
self.send_msg(article_info)
def get_title_from_raw(self, raw):
return raw['title']
def get_subtitle_from_raw(self, raw):
return raw['subtitle']
def get_thumbnails_from_raw(self, raw):
return raw['thumbnails']
def get_doc_id_from_raw(self, raw):
| md5(raw['doc_id']).hexdigest()
def get_source_url_from_raw(self, raw):
return raw['source_url']
def get_time_from_raw(self, raw):
return str(datetime.datetime.now())[:19]
def get_html_from_raw(self, raw):
return ''
def get_content_from_raw(self, raw):
return []
def get_duration_from_raw(self, raw):
return raw['duration']
def get_video_from_raw(self, raw):
return raw['video']
def get_raw_tags_from_raw(self, raw):
return [u'触宝_视频']
def title_duplicate(self, ttl):
ttl_md5 = hashlib.md5(ttl).hexdigest()
return bool(self.redis.hget('feeds_title', ttl_md5))
def get_chinese_name(self, sb):
tmd5 = hashlib.md5(sb).hexdigest()
ans = ''
for ind in range(29):
tmp = int(tmd5[ind: ind + 4], 16)
if 19968 <= tmp <= 40869:
ans += unichr(tmp)
if len(ans) >= 3:
return ans[-3:]
return u'美女如云'
def title_contain_chinese(self, sb):
ans = re.findall(ur'[\u4e00-\u9fa5]+', sb)
if not ans:
return False
tmax = max(map(lambda x: len(x), ans))
if tmax < 2:
return False
return True
def get_locale_from_raw(self, raw):
return 'en_US'
def get_locales_from_raw(self, raw):
return ['en_US']
| return hashlib. | identifier_name |
youtube_tops_en_US_.py | # -*- coding: utf-8 -*-
from ..common_spider_es import *
from ..video_spider_es import VideoSpider
from ...feeds_back_utils import *
import datetime
from urllib import unquote
import re
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
import lxml
class YoutubeSpider(VideoSpider):
name = 'youtube_tops_en_US'
download_delay = 3
video_type = 'mp4'
datasource_type = 4
download_maxsize = 104857600 * 5
download_warnsize = 104857600 * 5
default_section = 60 * 60 * 24 * 1
hd = {'pragma': 'no-cache',
'User-Agent': '',
'cache-control': 'no-cache'}
response_url = None
content_list = []
channel_list = []
browse_times = 0
def __init__(self, *a, **kw):
super(YoutubeSpider, self).__init__(*a, **kw)
self.channel_list = get_channel_list('youtube_tops', 'United States of America')
dispatcher.connect(self.spider_idle, signals.spider_idle)
def spider_idle(self):
if self.channel_list:
for rq in self.start_requests():
self.crawler.engine.crawl(rq, self)
def start_requests(self):
channel_url = self.channel_list.pop(0)
yield Request(
channel_url,
headers=self.hd,
dont_filter=True,
callback=self.parse_page
)
def parse_page(self, response):
# print response.url
body_instance = response.body_as_unicode()
tree = lxml.html.fromstring(body_instance)
raw = {}
# title_selector = '//*[@id="page-container"]/div[2]/div/div/div[1]/div/div/div/div[@class="ProfileHeaderCard"]/h1[@class="ProfileHeaderCard-name"]/a/text()'
# title_selector = '//*[@id="watch-header"]/div[@id="watch7-headline"]/div[@id="watch-headline-title"]/h1[@class="watch-title-container"]/span/text()'
subtitle_selector = '//*[@id="watch-header"]/div[@id="watch7-user-header"]/div/a/text()'
thumbnails_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/span[@itemprop="thumbnail"]/link/@href'
duration_raw_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="duration"]/@content'
video_id_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="videoId"]/@content'
title_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="name"]/@content'
published_date_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="datePublished"]/@content'
hitcount_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="interactionCount"]/@content'
width_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="width"]/@content'
height_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="height"]/@content'
genre_selector = '//*[@id="watch7-main-container"]/div[@id="watch7-main"]/div[@id="watch7-content"]/meta[@itemprop="genre"]/@content'
raw['title'] = tree.xpath(title_selector)[0].strip()
raw['subtitle'] = tree.xpath(subtitle_selector)[0]
raw['publisher'] = tree.xpath(subtitle_selector)[0]
raw['source_url'] = response.url
raw['thumbnails'] = [tree.xpath(thumbnails_selector)[0]]
raw['time'] = tree.xpath(published_date_selector)[0]
raw['doc_id'] = tree.xpath(video_id_selector)[0]
raw['video_width'] = tree.xpath(width_selector)[0]
raw['video_height'] = tree.xpath(height_selector)[0]
raw['genre'] = tree.xpath(genre_selector)[0]
raw['hit_counts'] = tree.xpath(hitcount_selector)[0]
# 正则获取播放时间
m_value, s_value = \
re.findall('PT([0-9]+)M([0-9]+)S', tree.xpath(duration_raw_selector)[0])[0]
# second_value = re.findall('<meta itemprop="duration" content="PT[0-9]+M([0-9]+)S">', body_instance)[0]
raw['duration'] = int(m_value) * 60 + int(s_value)
# if raw['duration'] > self.max_duration:
# print('duration > %d' % self.max_duration)
# return
yield Request(
raw['source_url'],
headers=self.hd,
meta=raw,
dont_filter=True,
callback=self.parse_video_from_other
)
def parse_video_from_other(self, response):
target_url = "https://www.findyoutube.net/result"
post_dict = {
"url": response.url,
"submit": "Download"
}
r = requests.post(target_url, data=post_dict)
body_instance = r.content.replace('amp;', '')
tree = lxml.html.fromstring(body_instance)
video_selector = '/html/body/div[2]/div/div[1]/table/tbody/tr[3]/td[3]/button/a/@href'
raw = dict()
raw.update(response.meta)
raw['video'] = tree.xpath(video_selector)[0]
self.logger.warning("parse_video_from_other!!")
for request in self.parse_raw(raw):
yield request
| video(self, response):
def _parse_stream_map(text):
videoinfo = {
"itag": [],
"url": [],
"quality": [],
"fallback_host": [],
"s": [],
"type": []
}
videos = text.split(",")
videos = [video.split("&") for video in videos]
for video in videos:
for kv in video:
key, value = kv.split("=")
videoinfo.get(key, []).append(unquote(value))
return videoinfo
ENCODING = {
# Flash Video
5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"],
6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"],
34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"],
35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"],
# 3GP
36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"],
13: ["3gp", "N/A", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"],
17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"],
# MPEG-4
18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"],
22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"],
37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"],
38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"],
82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"],
83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"],
84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"],
85: ["mp4", "1080p", "H.264", "3D", "2-2.9", "AAC", "152"],
# WebM
43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"],
44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"],
45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"],
46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"],
100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"],
101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"],
102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"]
}
ENCODING_KEYS = (
'extension',
'resolution',
'video_codec',
'profile',
'video_bitrate',
'audio_codec',
'audio_bitrate'
)
def _extract_fmt(text):
itag = re.findall('itag=(\d+)', text)
if itag and len(itag) is 1:
itag = int(itag[0])
attr = ENCODING.get(itag, None)
if not attr:
return itag, None
return itag, dict(zip(ENCODING_KEYS, attr))
content = response.body_as_unicode()
try:
player_conf = content[18 + content.find("ytplayer.config = "):]
bracket_count = 0
i = 0
for i, char in enumerate(player_conf):
if char == "{":
bracket_count += 1
elif char == "}":
bracket_count -= 1
if bracket_count == 0:
break
else:
self.logger.warning("Cannot get JSON from HTML")
index = i + 1
data = json.loads(player_conf[:index])
# self.logger.warning(data)
except Exception, e:
self.logger.warning(e)
return
stream_map = _parse_stream_map(data["args"]["url_encoded_fmt_stream_map"])
video_urls = stream_map["url"]
raw = dict()
raw.update(response.meta)
for i, url in enumerate(video_urls):
try:
fmt, fmt_data = _extract_fmt(url)
if fmt_data["extension"] == "mp4" and fmt_data["profile"] == "Baseline":
raw['video'] = url
self.logger.warning(url)
break
except KeyError:
continue
# self.logger.warning(raw)
for request in self.parse_raw(raw):
yield request
def normalize_thumbnails(self, article_info):
return self.normalize_thumbnails_fallvideo(article_info)
def download_video(self, article_info):
account = article_info['account']
video_url = article_info['video']
hd = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'pragma': 'no-cache',
'cache-control': 'no-cache'
}
ta = ['23.95.180.17:13228', '23.95.180.43:13228', '23.95.180.86:13228', '23.95.180.135:13228',
'23.95.180.199:13228', '198.23.220.14:13228', '198.23.220.76:13228', '198.23.220.131:13228',
'198.23.220.171:13228', '198.23.220.216:13228']
tp = random.choice(ta)
proxies = {
'http': 'http://{}'.format(tp),
'https': 'http://{}'.format(tp)
}
r = requests.get(video_url, headers=hd, proxies=proxies, verify=False).content
# 过滤掉尺寸小于1的视频
if len(r) <= 1024 * 1024 * 1:
self.logger.warning('video size smaller than 1MB' + '(' + str(article_info['source_url'] + ')'))
return
account = article_info['account']
if account not in self.image_cache:
return
self.image_cache[account]['video_data'] = r
self.send_msg(article_info)
def get_title_from_raw(self, raw):
return raw['title']
def get_subtitle_from_raw(self, raw):
return raw['subtitle']
def get_thumbnails_from_raw(self, raw):
return raw['thumbnails']
def get_doc_id_from_raw(self, raw):
return hashlib.md5(raw['doc_id']).hexdigest()
def get_source_url_from_raw(self, raw):
return raw['source_url']
def get_time_from_raw(self, raw):
return str(datetime.datetime.now())[:19]
def get_html_from_raw(self, raw):
return ''
def get_content_from_raw(self, raw):
return []
def get_duration_from_raw(self, raw):
return raw['duration']
def get_video_from_raw(self, raw):
return raw['video']
def get_raw_tags_from_raw(self, raw):
return [u'触宝_视频']
def title_duplicate(self, ttl):
ttl_md5 = hashlib.md5(ttl).hexdigest()
return bool(self.redis.hget('feeds_title', ttl_md5))
def get_chinese_name(self, sb):
tmd5 = hashlib.md5(sb).hexdigest()
ans = ''
for ind in range(29):
tmp = int(tmd5[ind: ind + 4], 16)
if 19968 <= tmp <= 40869:
ans += unichr(tmp)
if len(ans) >= 3:
return ans[-3:]
return u'美女如云'
def title_contain_chinese(self, sb):
ans = re.findall(ur'[\u4e00-\u9fa5]+', sb)
if not ans:
return False
tmax = max(map(lambda x: len(x), ans))
if tmax < 2:
return False
return True
def get_locale_from_raw(self, raw):
return 'en_US'
def get_locales_from_raw(self, raw):
return ['en_US']
| def parse_ | conditional_block |
generate_go_schema.go | /*
Copyright (c) 2016 IBM Corporation and other Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
Contributors:
Kim Letkeman - Initial Contribution
*/
// IoT Blockchain Demo Smart Contract
// KL 03 Mar 2016 Generate schema and event subschema Go files for contract v3.1
// KL 04-07 Mar 2016 testing of schema, adaptation of output to contract 3.0.2,
// addition of config file generate.yaml
// KL 13 Mar 2016 Changed from yaml (lesser GPL) to JSON for config
// KL 8 June 2016 Supporting complex events and the "oneOf" keyword, better support
// for arrays, cleanup lint issues
package main
import (
"bufio"
"io/ioutil"
"os"
"strings"
"fmt"
"encoding/json"
"time"
"path/filepath"
)
// Config defines contents of "generate.json" colocated in scripts folder with this script
type Config struct {
Schemas struct {
SchemaFilename string `json:"schemaFilename"`
GoSchemaFilename string `json:"goSchemaFilename"`
GoSchemaElements []string `json:"goSchemaElements"`
API []string `json:"API"`
} `json:"schemas"`
Samples struct {
GoSampleFilename string `json:"goSampleFilename"`
GoSampleElements []string `json:"goSampleElements"`
} `json:"samples"`
ObjectModels struct {
ObjectModelElements []string `json:"generateGoObjectsFrom"`
} `json:"objectModels"`
}
// can print very accurate syntax errors as found by the JSON marshaler
// relies on the offset table created when reading the schema JSON file and expunging
// comments and blank lines
func printSyntaxError(js string, off *[5000]int, err interface{}) {
syntax, ok := err.(*json.SyntaxError)
if !ok {
fmt.Println("*********** ERR trying to get syntax error location **************\n", err)
return
}
start, end := strings.LastIndex(js[:syntax.Offset], "\n")+1, len(js)
if idx := strings.Index(js[start:], "\n"); idx >= 0 {
end = start + idx
}
line, pos := strings.Count(js[:start], "\n"), int(syntax.Offset) - start -1
fmt.Printf("Error in line %d: %s \n", off[line]+1, err)
fmt.Printf("%s\n%s^\n\n", js[start:end], strings.Repeat(" ", pos))
}
// retrieves a subschema object via the reference path; handles root node references and
// references starting after definitions; does not handle external file references yet
func getObject (schema map[string]interface{}, objName string) (map[string]interface{}) {
// return a copy of the selected object
// handles full path, or path starting after definitions
if !strings.HasPrefix(objName, "#/definitions/") {
objName = "#/definitions/" + objName
}
s := strings.Split(objName, "/")
// crawl the levels, skipping the # root
for i := 1; i < len(s); i++ {
props, found := (schema["properties"]).(map[string]interface{})
if found {
schema, found = (props[s[i]]).(map[string]interface{})
} else {
schema, found = (schema[s[i]]).(map[string]interface{})
}
if !found {
fmt.Printf("schema[s[i]] called %s looks like: %+v\n", objName, schema[s[i]])
fmt.Printf("** ERR ** getObject illegal selector %s at level %d called %s\n", objName, i, s[i])
return nil
}
}
return schema
}
// replaces all references recursively in the passed-in object (subschema) using the passed-in schema
func replaceReferences (schema map[string]interface{}, obj interface{}) (interface{}) {
oArr, isArr := obj.([]interface{})
oMap, isMap := obj.(map[string]interface{})
switch {
default:
return obj
case isArr:
//fmt.Printf("ARR [%s:%+v]\n", k, v)
for k, v := range oArr {
r, found := v.(map[string]interface{})
if found {
ref, found := r["$ref"]
if found {
// it is a reference so replace it and recursively replace references
oArr[k] = replaceReferences(schema, getObject(schema, ref.(string)))
} else {
oArr[k] = replaceReferences(schema, v)
}
} else {
//fmt.Printf("** WARN ** array member not a map object [%d:%+v]\n", k, v)
}
}
return oArr
case isMap:
//fmt.Printf("MAP [%s:%+v]\n", k, v)
for k, v := range oMap {
if k == "$ref" {
// it is a reference so replace it and recursively replace references
//fmt.Printf("** INFO ** Should be $ref [%s:%+v]\n", k, v)
oMap = replaceReferences(schema, getObject(schema, v.(string))).(map[string]interface{})
} else {
oMap[k] = replaceReferences(schema, v)
}
}
return oMap
}
}
// If a reference exists at any level in the passed-in schema, this will return true
// Recurses through every level of the map
func referencesExist (schema map[string]interface{}) (bool) {
_, exists := schema["$ref"]
if exists {
return true
}
for _, v := range schema {
switch v.(type) {
case map[string]interface{}:
if referencesExist(v.(map[string]interface{})) {
return true
}
}
}
return false
}
// Generates a file <munged elementName>.go to contain a string literal for the pretty version
// of the schema with all references resolved. In the same file, creates a sample JSON that
// can be used to show a complete structure of the object.
func generateGoSchemaFile(schema map[string]interface{}, config Config) {
var obj map[string]interface{}
var schemas = make(map[string]interface{})
var outString = "package main\n\nvar schemas = `\n"
var filename = config.Schemas.GoSchemaFilename
var apiFunctions = config.Schemas.API
var elementNames = config.Schemas.GoSchemaElements
var functionKey = "API"
var objectModelKey = "objectModelSchemas"
schemas[functionKey] = interface{}(make(map[string]interface{}))
schemas[objectModelKey] = interface{}(make(map[string]interface{}))
fmt.Printf("Generate Go SCHEMA file %s for: \n %s and: \n %s\n", filename, apiFunctions, elementNames)
// grab the event API functions for input
for i := range apiFunctions {
functionSchemaName := "#/definitions/API/" + apiFunctions[i]
functionName := apiFunctions[i]
obj = getObject(schema, functionSchemaName)
if obj == nil {
fmt.Printf("** WARN ** %s returned nil from getObject\n", functionSchemaName)
return
}
schemas[functionKey].(map[string]interface{})[functionName] = obj
}
// grab the elements requested (these are useful separately even though
// they obviously appear already as part of the event API functions)
for i := range elementNames {
elementName := elementNames[i]
obj = getObject(schema, elementName)
if obj == nil {
fmt.Printf("** ERR ** %s returned nil from getObject\n", elementName)
return
}
schemas[objectModelKey].(map[string]interface{})[elementName] = obj
}
// marshal for output to file
schemaOut, err := json.MarshalIndent(&schemas, "", " ")
if err != nil {
fmt.Printf("** ERR ** cannot marshal schema file output for writing\n")
return
}
outString += string(schemaOut) + "`"
ioutil.WriteFile(filename, []byte(outString), 0644)
}
func sampleType(obj interface{}, elementName string) (interface{}) {
o, found := obj.(map[string]interface{})
if (!found) {
return "SCHEMA ELEMENT " + elementName + " IS NOT MAP"
}
t, found := o["type"].(string)
if !found {
//fmt.Printf("** WARN ** Element %s has no type field\n", elementName)
//fmt.Printf("Element missing type is: %s [%v]\n\n", elementName, o)
if elementName == "oneOf" {
return o
}
return "NO TYPE PROPERTY"
}
switch t {
default :
fmt.Printf("** WARN ** Unknown type in sampleType %s\n", t)
case "number" :
return 123.456
case "integer" :
return 789
case "string" :
if strings.ToLower(elementName) == "timestamp" {
return time.Now().Format(time.RFC3339Nano)
}
example, found := o["example"].(string)
if found && len(example) > 0 {
return example
}
def, found := o["default"].(string)
if found && len(def) > 0 {
return def
}
enum, found := o["enum"].([]interface{})
if found {
if len(enum) > 1 {
return enum[1]
}
if len(enum) > 0 {
return enum[0]
}
}
// description is a good alternate choice for sample data since it
// explains the prospective contents
desc, found := o["description"].(string)
if found && len(desc) > 0 {
return desc
}
// if nothing else ...
return "carpe noctem"
case "null" :
return nil
case "boolean" :
return true
case "array" :
var items, found = o["items"].(map[string]interface{})
if (!found) {
fmt.Printf("** WARN ** Element %s is array with no items property\n", elementName)
return "ARRAY WITH NO ITEMS PROPERTY"
}
return arrayFromSchema(items, elementName)
case "object" : {
props, found := o["properties"]
if !found {
fmt.Printf("** WARN ** %s is type object yet has no properties in SampleType\n", elementName)
return "INVALID OBJECT - MISSING PROPERTIES"
}
objOut := make(map[string]interface{})
for k, v := range props.(map[string]interface{}) {
//fmt.Printf("Visiting key %s with value %s\n", k, v)
if v == nil {
fmt.Printf("** WARN ** Key %s has NIL value in SampleType\n", k)
return "INVALID OBJECT - " + fmt.Sprintf("Key %s has NIL value in SampleType\n", k)
}
aArr, isArr := v.([]interface{})
aMap, isMap := v.(map[string]interface{})
switch {
case isArr:
if "oneOf" == k {
aOut := make([]interface{}, len(aArr))
// outer loop is anonymous objects
for k2, v2 := range aArr {
//fmt.Printf("SAMTYP outer OneOf: %d [%v]\n", k2, v2)
vObj, found := v2.(map[string]interface{})
if found {
// inner loop should find one named object
for k3, v3 := range vObj {
tmp := make(map[string]interface{}, 1)
//fmt.Printf("SAMTYP inner OneOf: %s [%v]\n", k3, v3)
//printObject(k3, v3)
tmp[k3] = sampleType(v3, k3)
aOut[k2] = tmp
}
}
objOut[k] = aOut
}
} else {
objOut[k] = "UNKNOWN ARRAY OBJECT"
}
case isMap:
objOut[k] = sampleType(aMap, k)
}
}
return objOut
}
}
fmt.Printf("** WARN ** UNKNOWN TYPE in SampleType: %s\n", t)
return fmt.Sprintf("UNKNOWN TYPE in SampleType: %s\n", t)
}
func printObject(elementName string, obj interface{}) {
| aArr, isArr := obj.([]interface{})
switch {
case isArr:
fmt.Printf("Element: %s is an ARRAY\n", elementName)
for k, v := range aArr {
fmt.Printf("[%d] : %+v\n\n", k, v)
}
case isMap:
fmt.Printf("Element: %s is a MAP\n", elementName)
for k, v := range aMap {
fmt.Printf("[%s] : %+v\n\n", k, v)
}
default:
fmt.Printf("Element: %s is of UNKNOWN shape\n", elementName)
}
}
// Generate a sample array from a schema
func arrayFromSchema(schema map[string]interface{}, elementName string) (interface{}) {
enum, found := schema["enum"]
if found {
// there is a set of enums, just use it
return enum
}
return []interface{}{sampleType(schema, elementName)}
}
// Generates a file <munged elementName>.go to contain a string literal for the pretty version
// of the schema with all references resolved. In the same file, creates a sample JSON that
// can be used to show a complete structure of the object.
func generateGoSampleFile(schema map[string]interface{}, config Config) {
var obj map[string]interface{}
var samples = make(map[string]interface{})
var outString = "package main\n\nvar samples = `\n"
var filename = config.Samples.GoSampleFilename
var elementNames = config.Samples.GoSampleElements
fmt.Printf("Generate Go SAMPLE file %s for: \n %s\n", filename, elementNames)
for i := range elementNames {
elementName := elementNames[i]
if elementName == "schema" {
// sample of the entire schema, can it even work?
obj = schema
} else {
// use the schema subset
obj = getObject(schema, elementName)
if obj == nil {
fmt.Printf("** WARN ** %s returned nil from getObject\n", elementName)
return
}
}
samples[elementName] = sampleType(obj, elementName)
}
samplesOut, err := json.MarshalIndent(&samples, "", " ")
if err != nil {
fmt.Println("** ERR ** cannot marshal sample file output for writing")
return
}
outString += string(samplesOut) + "`"
ioutil.WriteFile(filename, []byte(outString), 0644)
}
func generateGoObjectModel(schema map[string]interface{}, config Config) () {
for i := range config.ObjectModels.ObjectModelElements {
fmt.Println("Generating object model for: ",
config.ObjectModels.ObjectModelElements[i])
obj := getObject(schema, config.ObjectModels.ObjectModelElements[i])
fmt.Printf("%s: %s\n\n", config.ObjectModels.ObjectModelElements[i], obj)
}
}
// Reads payloadschema.json api file
// encodes as a string literal in payloadschema.go
func main() {
var configFileName = "generate.json"
var api string
var line = 1
var lineOut = 1
var offsets [5000]int
// read the configuration from the json file
filename, _ := filepath.Abs("./scripts/" + configFileName)
fmt.Printf("JSON CONFIG FILEPATH:\n %s\n", filename)
jsonFile, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("error reading json file")
panic(err)
}
var config Config
err = json.Unmarshal(jsonFile, &config)
if err != nil {
fmt.Println("error unmarshaling json config")
panic(err)
}
// read the schema file, stripping comments and blank lines, calculate offsets for error output
file, err := os.Open(config.Schemas.SchemaFilename)
if err != nil {
fmt.Printf("** ERR ** [%s] opening schema file at %s\n", err, config.Schemas.SchemaFilename)
return
}
defer file.Close()
reader := bufio.NewReader(file)
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ts := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(ts, "#") {
fmt.Println("Line: ", line, " is a comment")
} else if ts == "" {
fmt.Println("Line: ", line, " is blank")
} else {
api += ts + "\n"
lineOut++
}
offsets[lineOut] = line
line++
}
// verify the JSON format by unmarshaling it into a map
var schema map[string]interface{}
err = json.Unmarshal([]byte(api), &schema)
if err != nil {
fmt.Println("*********** UNMARSHAL ERR **************\n", err)
printSyntaxError(api, &offsets, err)
return
}
// Looks tricky, but simply creates an output with references resolved
// from the schema, and another object and passes it back. I used to
// call it for each object, but much simpler to call it once for the
// whole schema and simply pick off the objects we want for subschemas
// and samples.
schema = replaceReferences(schema, schema).(map[string]interface{})
// generate the Go files that the contract needs -- for now, complete schema and
// event schema and sample object
generateGoSchemaFile(schema, config)
generateGoSampleFile(schema, config)
// experimental
//generateGoObjectModel(schema, config)
// TODO generate js object model?? Java??
} | aMap, isMap := obj.(map[string]interface{})
| random_line_split |
generate_go_schema.go | /*
Copyright (c) 2016 IBM Corporation and other Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
Contributors:
Kim Letkeman - Initial Contribution
*/
// IoT Blockchain Demo Smart Contract
// KL 03 Mar 2016 Generate schema and event subschema Go files for contract v3.1
// KL 04-07 Mar 2016 testing of schema, adaptation of output to contract 3.0.2,
// addition of config file generate.yaml
// KL 13 Mar 2016 Changed from yaml (lesser GPL) to JSON for config
// KL 8 June 2016 Supporting complex events and the "oneOf" keyword, better support
// for arrays, cleanup lint issues
package main
import (
"bufio"
"io/ioutil"
"os"
"strings"
"fmt"
"encoding/json"
"time"
"path/filepath"
)
// Config defines contents of "generate.json" colocated in scripts folder with this script
type Config struct {
Schemas struct {
SchemaFilename string `json:"schemaFilename"`
GoSchemaFilename string `json:"goSchemaFilename"`
GoSchemaElements []string `json:"goSchemaElements"`
API []string `json:"API"`
} `json:"schemas"`
Samples struct {
GoSampleFilename string `json:"goSampleFilename"`
GoSampleElements []string `json:"goSampleElements"`
} `json:"samples"`
ObjectModels struct {
ObjectModelElements []string `json:"generateGoObjectsFrom"`
} `json:"objectModels"`
}
// can print very accurate syntax errors as found by the JSON marshaler
// relies on the offset table created when reading the schema JSON file and expunging
// comments and blank lines
func printSyntaxError(js string, off *[5000]int, err interface{}) {
syntax, ok := err.(*json.SyntaxError)
if !ok {
fmt.Println("*********** ERR trying to get syntax error location **************\n", err)
return
}
start, end := strings.LastIndex(js[:syntax.Offset], "\n")+1, len(js)
if idx := strings.Index(js[start:], "\n"); idx >= 0 {
end = start + idx
}
line, pos := strings.Count(js[:start], "\n"), int(syntax.Offset) - start -1
fmt.Printf("Error in line %d: %s \n", off[line]+1, err)
fmt.Printf("%s\n%s^\n\n", js[start:end], strings.Repeat(" ", pos))
}
// retrieves a subschema object via the reference path; handles root node references and
// references starting after definitions; does not handle external file references yet
func getObject (schema map[string]interface{}, objName string) (map[string]interface{}) {
// return a copy of the selected object
// handles full path, or path starting after definitions
if !strings.HasPrefix(objName, "#/definitions/") {
objName = "#/definitions/" + objName
}
s := strings.Split(objName, "/")
// crawl the levels, skipping the # root
for i := 1; i < len(s); i++ {
props, found := (schema["properties"]).(map[string]interface{})
if found {
schema, found = (props[s[i]]).(map[string]interface{})
} else {
schema, found = (schema[s[i]]).(map[string]interface{})
}
if !found {
fmt.Printf("schema[s[i]] called %s looks like: %+v\n", objName, schema[s[i]])
fmt.Printf("** ERR ** getObject illegal selector %s at level %d called %s\n", objName, i, s[i])
return nil
}
}
return schema
}
// replaces all references recursively in the passed-in object (subschema) using the passed-in schema
func replaceReferences (schema map[string]interface{}, obj interface{}) (interface{}) {
oArr, isArr := obj.([]interface{})
oMap, isMap := obj.(map[string]interface{})
switch {
default:
return obj
case isArr:
//fmt.Printf("ARR [%s:%+v]\n", k, v)
for k, v := range oArr {
r, found := v.(map[string]interface{})
if found {
ref, found := r["$ref"]
if found {
// it is a reference so replace it and recursively replace references
oArr[k] = replaceReferences(schema, getObject(schema, ref.(string)))
} else {
oArr[k] = replaceReferences(schema, v)
}
} else {
//fmt.Printf("** WARN ** array member not a map object [%d:%+v]\n", k, v)
}
}
return oArr
case isMap:
//fmt.Printf("MAP [%s:%+v]\n", k, v)
for k, v := range oMap {
if k == "$ref" {
// it is a reference so replace it and recursively replace references
//fmt.Printf("** INFO ** Should be $ref [%s:%+v]\n", k, v)
oMap = replaceReferences(schema, getObject(schema, v.(string))).(map[string]interface{})
} else {
oMap[k] = replaceReferences(schema, v)
}
}
return oMap
}
}
// If a reference exists at any level in the passed-in schema, this will return true
// Recurses through every level of the map
func referencesExist (schema map[string]interface{}) (bool) {
_, exists := schema["$ref"]
if exists {
return true
}
for _, v := range schema {
switch v.(type) {
case map[string]interface{}:
if referencesExist(v.(map[string]interface{})) {
return true
}
}
}
return false
}
// Generates a file <munged elementName>.go to contain a string literal for the pretty version
// of the schema with all references resolved. In the same file, creates a sample JSON that
// can be used to show a complete structure of the object.
func generateGoSchemaFile(schema map[string]interface{}, config Config) {
var obj map[string]interface{}
var schemas = make(map[string]interface{})
var outString = "package main\n\nvar schemas = `\n"
var filename = config.Schemas.GoSchemaFilename
var apiFunctions = config.Schemas.API
var elementNames = config.Schemas.GoSchemaElements
var functionKey = "API"
var objectModelKey = "objectModelSchemas"
schemas[functionKey] = interface{}(make(map[string]interface{}))
schemas[objectModelKey] = interface{}(make(map[string]interface{}))
fmt.Printf("Generate Go SCHEMA file %s for: \n %s and: \n %s\n", filename, apiFunctions, elementNames)
// grab the event API functions for input
for i := range apiFunctions {
functionSchemaName := "#/definitions/API/" + apiFunctions[i]
functionName := apiFunctions[i]
obj = getObject(schema, functionSchemaName)
if obj == nil {
fmt.Printf("** WARN ** %s returned nil from getObject\n", functionSchemaName)
return
}
schemas[functionKey].(map[string]interface{})[functionName] = obj
}
// grab the elements requested (these are useful separately even though
// they obviously appear already as part of the event API functions)
for i := range elementNames {
elementName := elementNames[i]
obj = getObject(schema, elementName)
if obj == nil {
fmt.Printf("** ERR ** %s returned nil from getObject\n", elementName)
return
}
schemas[objectModelKey].(map[string]interface{})[elementName] = obj
}
// marshal for output to file
schemaOut, err := json.MarshalIndent(&schemas, "", " ")
if err != nil {
fmt.Printf("** ERR ** cannot marshal schema file output for writing\n")
return
}
outString += string(schemaOut) + "`"
ioutil.WriteFile(filename, []byte(outString), 0644)
}
func sampleType(obj interface{}, elementName string) (interface{}) {
o, found := obj.(map[string]interface{})
if (!found) {
return "SCHEMA ELEMENT " + elementName + " IS NOT MAP"
}
t, found := o["type"].(string)
if !found {
//fmt.Printf("** WARN ** Element %s has no type field\n", elementName)
//fmt.Printf("Element missing type is: %s [%v]\n\n", elementName, o)
if elementName == "oneOf" {
return o
}
return "NO TYPE PROPERTY"
}
switch t {
default :
fmt.Printf("** WARN ** Unknown type in sampleType %s\n", t)
case "number" :
return 123.456
case "integer" :
return 789
case "string" :
if strings.ToLower(elementName) == "timestamp" {
return time.Now().Format(time.RFC3339Nano)
}
example, found := o["example"].(string)
if found && len(example) > 0 {
return example
}
def, found := o["default"].(string)
if found && len(def) > 0 {
return def
}
enum, found := o["enum"].([]interface{})
if found {
if len(enum) > 1 {
return enum[1]
}
if len(enum) > 0 {
return enum[0]
}
}
// description is a good alternate choice for sample data since it
// explains the prospective contents
desc, found := o["description"].(string)
if found && len(desc) > 0 {
return desc
}
// if nothing else ...
return "carpe noctem"
case "null" :
return nil
case "boolean" :
return true
case "array" :
var items, found = o["items"].(map[string]interface{})
if (!found) {
fmt.Printf("** WARN ** Element %s is array with no items property\n", elementName)
return "ARRAY WITH NO ITEMS PROPERTY"
}
return arrayFromSchema(items, elementName)
case "object" : {
props, found := o["properties"]
if !found {
fmt.Printf("** WARN ** %s is type object yet has no properties in SampleType\n", elementName)
return "INVALID OBJECT - MISSING PROPERTIES"
}
objOut := make(map[string]interface{})
for k, v := range props.(map[string]interface{}) {
//fmt.Printf("Visiting key %s with value %s\n", k, v)
if v == nil {
fmt.Printf("** WARN ** Key %s has NIL value in SampleType\n", k)
return "INVALID OBJECT - " + fmt.Sprintf("Key %s has NIL value in SampleType\n", k)
}
aArr, isArr := v.([]interface{})
aMap, isMap := v.(map[string]interface{})
switch {
case isArr:
if "oneOf" == k {
aOut := make([]interface{}, len(aArr))
// outer loop is anonymous objects
for k2, v2 := range aArr {
//fmt.Printf("SAMTYP outer OneOf: %d [%v]\n", k2, v2)
vObj, found := v2.(map[string]interface{})
if found {
// inner loop should find one named object
for k3, v3 := range vObj {
tmp := make(map[string]interface{}, 1)
//fmt.Printf("SAMTYP inner OneOf: %s [%v]\n", k3, v3)
//printObject(k3, v3)
tmp[k3] = sampleType(v3, k3)
aOut[k2] = tmp
}
}
objOut[k] = aOut
}
} else {
objOut[k] = "UNKNOWN ARRAY OBJECT"
}
case isMap:
objOut[k] = sampleType(aMap, k)
}
}
return objOut
}
}
fmt.Printf("** WARN ** UNKNOWN TYPE in SampleType: %s\n", t)
return fmt.Sprintf("UNKNOWN TYPE in SampleType: %s\n", t)
}
func printObject(elementName string, obj interface{}) {
aMap, isMap := obj.(map[string]interface{})
aArr, isArr := obj.([]interface{})
switch {
case isArr:
fmt.Printf("Element: %s is an ARRAY\n", elementName)
for k, v := range aArr {
fmt.Printf("[%d] : %+v\n\n", k, v)
}
case isMap:
fmt.Printf("Element: %s is a MAP\n", elementName)
for k, v := range aMap {
fmt.Printf("[%s] : %+v\n\n", k, v)
}
default:
fmt.Printf("Element: %s is of UNKNOWN shape\n", elementName)
}
}
// Generate a sample array from a schema
func arrayFromSchema(schema map[string]interface{}, elementName string) (interface{}) {
enum, found := schema["enum"]
if found {
// there is a set of enums, just use it
return enum
}
return []interface{}{sampleType(schema, elementName)}
}
// Generates a file <munged elementName>.go to contain a string literal for the pretty version
// of the schema with all references resolved. In the same file, creates a sample JSON that
// can be used to show a complete structure of the object.
func | (schema map[string]interface{}, config Config) {
var obj map[string]interface{}
var samples = make(map[string]interface{})
var outString = "package main\n\nvar samples = `\n"
var filename = config.Samples.GoSampleFilename
var elementNames = config.Samples.GoSampleElements
fmt.Printf("Generate Go SAMPLE file %s for: \n %s\n", filename, elementNames)
for i := range elementNames {
elementName := elementNames[i]
if elementName == "schema" {
// sample of the entire schema, can it even work?
obj = schema
} else {
// use the schema subset
obj = getObject(schema, elementName)
if obj == nil {
fmt.Printf("** WARN ** %s returned nil from getObject\n", elementName)
return
}
}
samples[elementName] = sampleType(obj, elementName)
}
samplesOut, err := json.MarshalIndent(&samples, "", " ")
if err != nil {
fmt.Println("** ERR ** cannot marshal sample file output for writing")
return
}
outString += string(samplesOut) + "`"
ioutil.WriteFile(filename, []byte(outString), 0644)
}
func generateGoObjectModel(schema map[string]interface{}, config Config) () {
for i := range config.ObjectModels.ObjectModelElements {
fmt.Println("Generating object model for: ",
config.ObjectModels.ObjectModelElements[i])
obj := getObject(schema, config.ObjectModels.ObjectModelElements[i])
fmt.Printf("%s: %s\n\n", config.ObjectModels.ObjectModelElements[i], obj)
}
}
// Reads payloadschema.json api file
// encodes as a string literal in payloadschema.go
func main() {
var configFileName = "generate.json"
var api string
var line = 1
var lineOut = 1
var offsets [5000]int
// read the configuration from the json file
filename, _ := filepath.Abs("./scripts/" + configFileName)
fmt.Printf("JSON CONFIG FILEPATH:\n %s\n", filename)
jsonFile, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("error reading json file")
panic(err)
}
var config Config
err = json.Unmarshal(jsonFile, &config)
if err != nil {
fmt.Println("error unmarshaling json config")
panic(err)
}
// read the schema file, stripping comments and blank lines, calculate offsets for error output
file, err := os.Open(config.Schemas.SchemaFilename)
if err != nil {
fmt.Printf("** ERR ** [%s] opening schema file at %s\n", err, config.Schemas.SchemaFilename)
return
}
defer file.Close()
reader := bufio.NewReader(file)
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ts := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(ts, "#") {
fmt.Println("Line: ", line, " is a comment")
} else if ts == "" {
fmt.Println("Line: ", line, " is blank")
} else {
api += ts + "\n"
lineOut++
}
offsets[lineOut] = line
line++
}
// verify the JSON format by unmarshaling it into a map
var schema map[string]interface{}
err = json.Unmarshal([]byte(api), &schema)
if err != nil {
fmt.Println("*********** UNMARSHAL ERR **************\n", err)
printSyntaxError(api, &offsets, err)
return
}
// Looks tricky, but simply creates an output with references resolved
// from the schema, and another object and passes it back. I used to
// call it for each object, but much simpler to call it once for the
// whole schema and simply pick off the objects we want for subschemas
// and samples.
schema = replaceReferences(schema, schema).(map[string]interface{})
// generate the Go files that the contract needs -- for now, complete schema and
// event schema and sample object
generateGoSchemaFile(schema, config)
generateGoSampleFile(schema, config)
// experimental
//generateGoObjectModel(schema, config)
// TODO generate js object model?? Java??
} | generateGoSampleFile | identifier_name |
generate_go_schema.go | /*
Copyright (c) 2016 IBM Corporation and other Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
Contributors:
Kim Letkeman - Initial Contribution
*/
// IoT Blockchain Demo Smart Contract
// KL 03 Mar 2016 Generate schema and event subschema Go files for contract v3.1
// KL 04-07 Mar 2016 testing of schema, adaptation of output to contract 3.0.2,
// addition of config file generate.yaml
// KL 13 Mar 2016 Changed from yaml (lesser GPL) to JSON for config
// KL 8 June 2016 Supporting complex events and the "oneOf" keyword, better support
// for arrays, cleanup lint issues
package main
import (
"bufio"
"io/ioutil"
"os"
"strings"
"fmt"
"encoding/json"
"time"
"path/filepath"
)
// Config defines contents of "generate.json" colocated in scripts folder with this script
type Config struct {
Schemas struct {
SchemaFilename string `json:"schemaFilename"`
GoSchemaFilename string `json:"goSchemaFilename"`
GoSchemaElements []string `json:"goSchemaElements"`
API []string `json:"API"`
} `json:"schemas"`
Samples struct {
GoSampleFilename string `json:"goSampleFilename"`
GoSampleElements []string `json:"goSampleElements"`
} `json:"samples"`
ObjectModels struct {
ObjectModelElements []string `json:"generateGoObjectsFrom"`
} `json:"objectModels"`
}
// can print very accurate syntax errors as found by the JSON marshaler
// relies on the offset table created when reading the schema JSON file and expunging
// comments and blank lines
func printSyntaxError(js string, off *[5000]int, err interface{}) |
// retrieves a subschema object via the reference path; handles root node references and
// references starting after definitions; does not handle external file references yet
func getObject (schema map[string]interface{}, objName string) (map[string]interface{}) {
// return a copy of the selected object
// handles full path, or path starting after definitions
if !strings.HasPrefix(objName, "#/definitions/") {
objName = "#/definitions/" + objName
}
s := strings.Split(objName, "/")
// crawl the levels, skipping the # root
for i := 1; i < len(s); i++ {
props, found := (schema["properties"]).(map[string]interface{})
if found {
schema, found = (props[s[i]]).(map[string]interface{})
} else {
schema, found = (schema[s[i]]).(map[string]interface{})
}
if !found {
fmt.Printf("schema[s[i]] called %s looks like: %+v\n", objName, schema[s[i]])
fmt.Printf("** ERR ** getObject illegal selector %s at level %d called %s\n", objName, i, s[i])
return nil
}
}
return schema
}
// replaces all references recursively in the passed-in object (subschema) using the passed-in schema
func replaceReferences (schema map[string]interface{}, obj interface{}) (interface{}) {
oArr, isArr := obj.([]interface{})
oMap, isMap := obj.(map[string]interface{})
switch {
default:
return obj
case isArr:
//fmt.Printf("ARR [%s:%+v]\n", k, v)
for k, v := range oArr {
r, found := v.(map[string]interface{})
if found {
ref, found := r["$ref"]
if found {
// it is a reference so replace it and recursively replace references
oArr[k] = replaceReferences(schema, getObject(schema, ref.(string)))
} else {
oArr[k] = replaceReferences(schema, v)
}
} else {
//fmt.Printf("** WARN ** array member not a map object [%d:%+v]\n", k, v)
}
}
return oArr
case isMap:
//fmt.Printf("MAP [%s:%+v]\n", k, v)
for k, v := range oMap {
if k == "$ref" {
// it is a reference so replace it and recursively replace references
//fmt.Printf("** INFO ** Should be $ref [%s:%+v]\n", k, v)
oMap = replaceReferences(schema, getObject(schema, v.(string))).(map[string]interface{})
} else {
oMap[k] = replaceReferences(schema, v)
}
}
return oMap
}
}
// If a reference exists at any level in the passed-in schema, this will return true
// Recurses through every level of the map
func referencesExist (schema map[string]interface{}) (bool) {
_, exists := schema["$ref"]
if exists {
return true
}
for _, v := range schema {
switch v.(type) {
case map[string]interface{}:
if referencesExist(v.(map[string]interface{})) {
return true
}
}
}
return false
}
// Generates a file <munged elementName>.go to contain a string literal for the pretty version
// of the schema with all references resolved. In the same file, creates a sample JSON that
// can be used to show a complete structure of the object.
func generateGoSchemaFile(schema map[string]interface{}, config Config) {
var obj map[string]interface{}
var schemas = make(map[string]interface{})
var outString = "package main\n\nvar schemas = `\n"
var filename = config.Schemas.GoSchemaFilename
var apiFunctions = config.Schemas.API
var elementNames = config.Schemas.GoSchemaElements
var functionKey = "API"
var objectModelKey = "objectModelSchemas"
schemas[functionKey] = interface{}(make(map[string]interface{}))
schemas[objectModelKey] = interface{}(make(map[string]interface{}))
fmt.Printf("Generate Go SCHEMA file %s for: \n %s and: \n %s\n", filename, apiFunctions, elementNames)
// grab the event API functions for input
for i := range apiFunctions {
functionSchemaName := "#/definitions/API/" + apiFunctions[i]
functionName := apiFunctions[i]
obj = getObject(schema, functionSchemaName)
if obj == nil {
fmt.Printf("** WARN ** %s returned nil from getObject\n", functionSchemaName)
return
}
schemas[functionKey].(map[string]interface{})[functionName] = obj
}
// grab the elements requested (these are useful separately even though
// they obviously appear already as part of the event API functions)
for i := range elementNames {
elementName := elementNames[i]
obj = getObject(schema, elementName)
if obj == nil {
fmt.Printf("** ERR ** %s returned nil from getObject\n", elementName)
return
}
schemas[objectModelKey].(map[string]interface{})[elementName] = obj
}
// marshal for output to file
schemaOut, err := json.MarshalIndent(&schemas, "", " ")
if err != nil {
fmt.Printf("** ERR ** cannot marshal schema file output for writing\n")
return
}
outString += string(schemaOut) + "`"
ioutil.WriteFile(filename, []byte(outString), 0644)
}
func sampleType(obj interface{}, elementName string) (interface{}) {
o, found := obj.(map[string]interface{})
if (!found) {
return "SCHEMA ELEMENT " + elementName + " IS NOT MAP"
}
t, found := o["type"].(string)
if !found {
//fmt.Printf("** WARN ** Element %s has no type field\n", elementName)
//fmt.Printf("Element missing type is: %s [%v]\n\n", elementName, o)
if elementName == "oneOf" {
return o
}
return "NO TYPE PROPERTY"
}
switch t {
default :
fmt.Printf("** WARN ** Unknown type in sampleType %s\n", t)
case "number" :
return 123.456
case "integer" :
return 789
case "string" :
if strings.ToLower(elementName) == "timestamp" {
return time.Now().Format(time.RFC3339Nano)
}
example, found := o["example"].(string)
if found && len(example) > 0 {
return example
}
def, found := o["default"].(string)
if found && len(def) > 0 {
return def
}
enum, found := o["enum"].([]interface{})
if found {
if len(enum) > 1 {
return enum[1]
}
if len(enum) > 0 {
return enum[0]
}
}
// description is a good alternate choice for sample data since it
// explains the prospective contents
desc, found := o["description"].(string)
if found && len(desc) > 0 {
return desc
}
// if nothing else ...
return "carpe noctem"
case "null" :
return nil
case "boolean" :
return true
case "array" :
var items, found = o["items"].(map[string]interface{})
if (!found) {
fmt.Printf("** WARN ** Element %s is array with no items property\n", elementName)
return "ARRAY WITH NO ITEMS PROPERTY"
}
return arrayFromSchema(items, elementName)
case "object" : {
props, found := o["properties"]
if !found {
fmt.Printf("** WARN ** %s is type object yet has no properties in SampleType\n", elementName)
return "INVALID OBJECT - MISSING PROPERTIES"
}
objOut := make(map[string]interface{})
for k, v := range props.(map[string]interface{}) {
//fmt.Printf("Visiting key %s with value %s\n", k, v)
if v == nil {
fmt.Printf("** WARN ** Key %s has NIL value in SampleType\n", k)
return "INVALID OBJECT - " + fmt.Sprintf("Key %s has NIL value in SampleType\n", k)
}
aArr, isArr := v.([]interface{})
aMap, isMap := v.(map[string]interface{})
switch {
case isArr:
if "oneOf" == k {
aOut := make([]interface{}, len(aArr))
// outer loop is anonymous objects
for k2, v2 := range aArr {
//fmt.Printf("SAMTYP outer OneOf: %d [%v]\n", k2, v2)
vObj, found := v2.(map[string]interface{})
if found {
// inner loop should find one named object
for k3, v3 := range vObj {
tmp := make(map[string]interface{}, 1)
//fmt.Printf("SAMTYP inner OneOf: %s [%v]\n", k3, v3)
//printObject(k3, v3)
tmp[k3] = sampleType(v3, k3)
aOut[k2] = tmp
}
}
objOut[k] = aOut
}
} else {
objOut[k] = "UNKNOWN ARRAY OBJECT"
}
case isMap:
objOut[k] = sampleType(aMap, k)
}
}
return objOut
}
}
fmt.Printf("** WARN ** UNKNOWN TYPE in SampleType: %s\n", t)
return fmt.Sprintf("UNKNOWN TYPE in SampleType: %s\n", t)
}
func printObject(elementName string, obj interface{}) {
aMap, isMap := obj.(map[string]interface{})
aArr, isArr := obj.([]interface{})
switch {
case isArr:
fmt.Printf("Element: %s is an ARRAY\n", elementName)
for k, v := range aArr {
fmt.Printf("[%d] : %+v\n\n", k, v)
}
case isMap:
fmt.Printf("Element: %s is a MAP\n", elementName)
for k, v := range aMap {
fmt.Printf("[%s] : %+v\n\n", k, v)
}
default:
fmt.Printf("Element: %s is of UNKNOWN shape\n", elementName)
}
}
// Generate a sample array from a schema
func arrayFromSchema(schema map[string]interface{}, elementName string) (interface{}) {
enum, found := schema["enum"]
if found {
// there is a set of enums, just use it
return enum
}
return []interface{}{sampleType(schema, elementName)}
}
// Generates a file <munged elementName>.go to contain a string literal for the pretty version
// of the schema with all references resolved. In the same file, creates a sample JSON that
// can be used to show a complete structure of the object.
func generateGoSampleFile(schema map[string]interface{}, config Config) {
var obj map[string]interface{}
var samples = make(map[string]interface{})
var outString = "package main\n\nvar samples = `\n"
var filename = config.Samples.GoSampleFilename
var elementNames = config.Samples.GoSampleElements
fmt.Printf("Generate Go SAMPLE file %s for: \n %s\n", filename, elementNames)
for i := range elementNames {
elementName := elementNames[i]
if elementName == "schema" {
// sample of the entire schema, can it even work?
obj = schema
} else {
// use the schema subset
obj = getObject(schema, elementName)
if obj == nil {
fmt.Printf("** WARN ** %s returned nil from getObject\n", elementName)
return
}
}
samples[elementName] = sampleType(obj, elementName)
}
samplesOut, err := json.MarshalIndent(&samples, "", " ")
if err != nil {
fmt.Println("** ERR ** cannot marshal sample file output for writing")
return
}
outString += string(samplesOut) + "`"
ioutil.WriteFile(filename, []byte(outString), 0644)
}
func generateGoObjectModel(schema map[string]interface{}, config Config) () {
for i := range config.ObjectModels.ObjectModelElements {
fmt.Println("Generating object model for: ",
config.ObjectModels.ObjectModelElements[i])
obj := getObject(schema, config.ObjectModels.ObjectModelElements[i])
fmt.Printf("%s: %s\n\n", config.ObjectModels.ObjectModelElements[i], obj)
}
}
// Reads payloadschema.json api file
// encodes as a string literal in payloadschema.go
func main() {
var configFileName = "generate.json"
var api string
var line = 1
var lineOut = 1
var offsets [5000]int
// read the configuration from the json file
filename, _ := filepath.Abs("./scripts/" + configFileName)
fmt.Printf("JSON CONFIG FILEPATH:\n %s\n", filename)
jsonFile, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("error reading json file")
panic(err)
}
var config Config
err = json.Unmarshal(jsonFile, &config)
if err != nil {
fmt.Println("error unmarshaling json config")
panic(err)
}
// read the schema file, stripping comments and blank lines, calculate offsets for error output
file, err := os.Open(config.Schemas.SchemaFilename)
if err != nil {
fmt.Printf("** ERR ** [%s] opening schema file at %s\n", err, config.Schemas.SchemaFilename)
return
}
defer file.Close()
reader := bufio.NewReader(file)
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ts := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(ts, "#") {
fmt.Println("Line: ", line, " is a comment")
} else if ts == "" {
fmt.Println("Line: ", line, " is blank")
} else {
api += ts + "\n"
lineOut++
}
offsets[lineOut] = line
line++
}
// verify the JSON format by unmarshaling it into a map
var schema map[string]interface{}
err = json.Unmarshal([]byte(api), &schema)
if err != nil {
fmt.Println("*********** UNMARSHAL ERR **************\n", err)
printSyntaxError(api, &offsets, err)
return
}
// Looks tricky, but simply creates an output with references resolved
// from the schema, and another object and passes it back. I used to
// call it for each object, but much simpler to call it once for the
// whole schema and simply pick off the objects we want for subschemas
// and samples.
schema = replaceReferences(schema, schema).(map[string]interface{})
// generate the Go files that the contract needs -- for now, complete schema and
// event schema and sample object
generateGoSchemaFile(schema, config)
generateGoSampleFile(schema, config)
// experimental
//generateGoObjectModel(schema, config)
// TODO generate js object model?? Java??
} | {
syntax, ok := err.(*json.SyntaxError)
if !ok {
fmt.Println("*********** ERR trying to get syntax error location **************\n", err)
return
}
start, end := strings.LastIndex(js[:syntax.Offset], "\n")+1, len(js)
if idx := strings.Index(js[start:], "\n"); idx >= 0 {
end = start + idx
}
line, pos := strings.Count(js[:start], "\n"), int(syntax.Offset) - start -1
fmt.Printf("Error in line %d: %s \n", off[line]+1, err)
fmt.Printf("%s\n%s^\n\n", js[start:end], strings.Repeat(" ", pos))
} | identifier_body |
generate_go_schema.go | /*
Copyright (c) 2016 IBM Corporation and other Contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
Contributors:
Kim Letkeman - Initial Contribution
*/
// IoT Blockchain Demo Smart Contract
// KL 03 Mar 2016 Generate schema and event subschema Go files for contract v3.1
// KL 04-07 Mar 2016 testing of schema, adaptation of output to contract 3.0.2,
// addition of config file generate.yaml
// KL 13 Mar 2016 Changed from yaml (lesser GPL) to JSON for config
// KL 8 June 2016 Supporting complex events and the "oneOf" keyword, better support
// for arrays, cleanup lint issues
package main
import (
"bufio"
"io/ioutil"
"os"
"strings"
"fmt"
"encoding/json"
"time"
"path/filepath"
)
// Config defines contents of "generate.json" colocated in scripts folder with this script
type Config struct {
Schemas struct {
SchemaFilename string `json:"schemaFilename"`
GoSchemaFilename string `json:"goSchemaFilename"`
GoSchemaElements []string `json:"goSchemaElements"`
API []string `json:"API"`
} `json:"schemas"`
Samples struct {
GoSampleFilename string `json:"goSampleFilename"`
GoSampleElements []string `json:"goSampleElements"`
} `json:"samples"`
ObjectModels struct {
ObjectModelElements []string `json:"generateGoObjectsFrom"`
} `json:"objectModels"`
}
// can print very accurate syntax errors as found by the JSON marshaler
// relies on the offset table created when reading the schema JSON file and expunging
// comments and blank lines
func printSyntaxError(js string, off *[5000]int, err interface{}) {
syntax, ok := err.(*json.SyntaxError)
if !ok {
fmt.Println("*********** ERR trying to get syntax error location **************\n", err)
return
}
start, end := strings.LastIndex(js[:syntax.Offset], "\n")+1, len(js)
if idx := strings.Index(js[start:], "\n"); idx >= 0 {
end = start + idx
}
line, pos := strings.Count(js[:start], "\n"), int(syntax.Offset) - start -1
fmt.Printf("Error in line %d: %s \n", off[line]+1, err)
fmt.Printf("%s\n%s^\n\n", js[start:end], strings.Repeat(" ", pos))
}
// retrieves a subschema object via the reference path; handles root node references and
// references starting after definitions; does not handle external file references yet
func getObject (schema map[string]interface{}, objName string) (map[string]interface{}) {
// return a copy of the selected object
// handles full path, or path starting after definitions
if !strings.HasPrefix(objName, "#/definitions/") {
objName = "#/definitions/" + objName
}
s := strings.Split(objName, "/")
// crawl the levels, skipping the # root
for i := 1; i < len(s); i++ {
props, found := (schema["properties"]).(map[string]interface{})
if found {
schema, found = (props[s[i]]).(map[string]interface{})
} else {
schema, found = (schema[s[i]]).(map[string]interface{})
}
if !found {
fmt.Printf("schema[s[i]] called %s looks like: %+v\n", objName, schema[s[i]])
fmt.Printf("** ERR ** getObject illegal selector %s at level %d called %s\n", objName, i, s[i])
return nil
}
}
return schema
}
// replaces all references recursively in the passed-in object (subschema) using the passed-in schema
func replaceReferences (schema map[string]interface{}, obj interface{}) (interface{}) {
oArr, isArr := obj.([]interface{})
oMap, isMap := obj.(map[string]interface{})
switch {
default:
return obj
case isArr:
//fmt.Printf("ARR [%s:%+v]\n", k, v)
for k, v := range oArr {
r, found := v.(map[string]interface{})
if found {
ref, found := r["$ref"]
if found {
// it is a reference so replace it and recursively replace references
oArr[k] = replaceReferences(schema, getObject(schema, ref.(string)))
} else {
oArr[k] = replaceReferences(schema, v)
}
} else {
//fmt.Printf("** WARN ** array member not a map object [%d:%+v]\n", k, v)
}
}
return oArr
case isMap:
//fmt.Printf("MAP [%s:%+v]\n", k, v)
for k, v := range oMap {
if k == "$ref" {
// it is a reference so replace it and recursively replace references
//fmt.Printf("** INFO ** Should be $ref [%s:%+v]\n", k, v)
oMap = replaceReferences(schema, getObject(schema, v.(string))).(map[string]interface{})
} else {
oMap[k] = replaceReferences(schema, v)
}
}
return oMap
}
}
// If a reference exists at any level in the passed-in schema, this will return true
// Recurses through every level of the map
func referencesExist (schema map[string]interface{}) (bool) {
_, exists := schema["$ref"]
if exists {
return true
}
for _, v := range schema {
switch v.(type) {
case map[string]interface{}:
if referencesExist(v.(map[string]interface{})) {
return true
}
}
}
return false
}
// Generates a file <munged elementName>.go to contain a string literal for the pretty version
// of the schema with all references resolved. In the same file, creates a sample JSON that
// can be used to show a complete structure of the object.
func generateGoSchemaFile(schema map[string]interface{}, config Config) {
var obj map[string]interface{}
var schemas = make(map[string]interface{})
var outString = "package main\n\nvar schemas = `\n"
var filename = config.Schemas.GoSchemaFilename
var apiFunctions = config.Schemas.API
var elementNames = config.Schemas.GoSchemaElements
var functionKey = "API"
var objectModelKey = "objectModelSchemas"
schemas[functionKey] = interface{}(make(map[string]interface{}))
schemas[objectModelKey] = interface{}(make(map[string]interface{}))
fmt.Printf("Generate Go SCHEMA file %s for: \n %s and: \n %s\n", filename, apiFunctions, elementNames)
// grab the event API functions for input
for i := range apiFunctions {
functionSchemaName := "#/definitions/API/" + apiFunctions[i]
functionName := apiFunctions[i]
obj = getObject(schema, functionSchemaName)
if obj == nil {
fmt.Printf("** WARN ** %s returned nil from getObject\n", functionSchemaName)
return
}
schemas[functionKey].(map[string]interface{})[functionName] = obj
}
// grab the elements requested (these are useful separately even though
// they obviously appear already as part of the event API functions)
for i := range elementNames {
elementName := elementNames[i]
obj = getObject(schema, elementName)
if obj == nil {
fmt.Printf("** ERR ** %s returned nil from getObject\n", elementName)
return
}
schemas[objectModelKey].(map[string]interface{})[elementName] = obj
}
// marshal for output to file
schemaOut, err := json.MarshalIndent(&schemas, "", " ")
if err != nil {
fmt.Printf("** ERR ** cannot marshal schema file output for writing\n")
return
}
outString += string(schemaOut) + "`"
ioutil.WriteFile(filename, []byte(outString), 0644)
}
func sampleType(obj interface{}, elementName string) (interface{}) {
o, found := obj.(map[string]interface{})
if (!found) {
return "SCHEMA ELEMENT " + elementName + " IS NOT MAP"
}
t, found := o["type"].(string)
if !found {
//fmt.Printf("** WARN ** Element %s has no type field\n", elementName)
//fmt.Printf("Element missing type is: %s [%v]\n\n", elementName, o)
if elementName == "oneOf" {
return o
}
return "NO TYPE PROPERTY"
}
switch t {
default :
fmt.Printf("** WARN ** Unknown type in sampleType %s\n", t)
case "number" :
return 123.456
case "integer" :
return 789
case "string" :
if strings.ToLower(elementName) == "timestamp" {
return time.Now().Format(time.RFC3339Nano)
}
example, found := o["example"].(string)
if found && len(example) > 0 {
return example
}
def, found := o["default"].(string)
if found && len(def) > 0 {
return def
}
enum, found := o["enum"].([]interface{})
if found {
if len(enum) > 1 {
return enum[1]
}
if len(enum) > 0 {
return enum[0]
}
}
// description is a good alternate choice for sample data since it
// explains the prospective contents
desc, found := o["description"].(string)
if found && len(desc) > 0 |
// if nothing else ...
return "carpe noctem"
case "null" :
return nil
case "boolean" :
return true
case "array" :
var items, found = o["items"].(map[string]interface{})
if (!found) {
fmt.Printf("** WARN ** Element %s is array with no items property\n", elementName)
return "ARRAY WITH NO ITEMS PROPERTY"
}
return arrayFromSchema(items, elementName)
case "object" : {
props, found := o["properties"]
if !found {
fmt.Printf("** WARN ** %s is type object yet has no properties in SampleType\n", elementName)
return "INVALID OBJECT - MISSING PROPERTIES"
}
objOut := make(map[string]interface{})
for k, v := range props.(map[string]interface{}) {
//fmt.Printf("Visiting key %s with value %s\n", k, v)
if v == nil {
fmt.Printf("** WARN ** Key %s has NIL value in SampleType\n", k)
return "INVALID OBJECT - " + fmt.Sprintf("Key %s has NIL value in SampleType\n", k)
}
aArr, isArr := v.([]interface{})
aMap, isMap := v.(map[string]interface{})
switch {
case isArr:
if "oneOf" == k {
aOut := make([]interface{}, len(aArr))
// outer loop is anonymous objects
for k2, v2 := range aArr {
//fmt.Printf("SAMTYP outer OneOf: %d [%v]\n", k2, v2)
vObj, found := v2.(map[string]interface{})
if found {
// inner loop should find one named object
for k3, v3 := range vObj {
tmp := make(map[string]interface{}, 1)
//fmt.Printf("SAMTYP inner OneOf: %s [%v]\n", k3, v3)
//printObject(k3, v3)
tmp[k3] = sampleType(v3, k3)
aOut[k2] = tmp
}
}
objOut[k] = aOut
}
} else {
objOut[k] = "UNKNOWN ARRAY OBJECT"
}
case isMap:
objOut[k] = sampleType(aMap, k)
}
}
return objOut
}
}
fmt.Printf("** WARN ** UNKNOWN TYPE in SampleType: %s\n", t)
return fmt.Sprintf("UNKNOWN TYPE in SampleType: %s\n", t)
}
func printObject(elementName string, obj interface{}) {
aMap, isMap := obj.(map[string]interface{})
aArr, isArr := obj.([]interface{})
switch {
case isArr:
fmt.Printf("Element: %s is an ARRAY\n", elementName)
for k, v := range aArr {
fmt.Printf("[%d] : %+v\n\n", k, v)
}
case isMap:
fmt.Printf("Element: %s is a MAP\n", elementName)
for k, v := range aMap {
fmt.Printf("[%s] : %+v\n\n", k, v)
}
default:
fmt.Printf("Element: %s is of UNKNOWN shape\n", elementName)
}
}
// Generate a sample array from a schema
func arrayFromSchema(schema map[string]interface{}, elementName string) (interface{}) {
enum, found := schema["enum"]
if found {
// there is a set of enums, just use it
return enum
}
return []interface{}{sampleType(schema, elementName)}
}
// Generates a file <munged elementName>.go to contain a string literal for the pretty version
// of the schema with all references resolved. In the same file, creates a sample JSON that
// can be used to show a complete structure of the object.
func generateGoSampleFile(schema map[string]interface{}, config Config) {
var obj map[string]interface{}
var samples = make(map[string]interface{})
var outString = "package main\n\nvar samples = `\n"
var filename = config.Samples.GoSampleFilename
var elementNames = config.Samples.GoSampleElements
fmt.Printf("Generate Go SAMPLE file %s for: \n %s\n", filename, elementNames)
for i := range elementNames {
elementName := elementNames[i]
if elementName == "schema" {
// sample of the entire schema, can it even work?
obj = schema
} else {
// use the schema subset
obj = getObject(schema, elementName)
if obj == nil {
fmt.Printf("** WARN ** %s returned nil from getObject\n", elementName)
return
}
}
samples[elementName] = sampleType(obj, elementName)
}
samplesOut, err := json.MarshalIndent(&samples, "", " ")
if err != nil {
fmt.Println("** ERR ** cannot marshal sample file output for writing")
return
}
outString += string(samplesOut) + "`"
ioutil.WriteFile(filename, []byte(outString), 0644)
}
func generateGoObjectModel(schema map[string]interface{}, config Config) () {
for i := range config.ObjectModels.ObjectModelElements {
fmt.Println("Generating object model for: ",
config.ObjectModels.ObjectModelElements[i])
obj := getObject(schema, config.ObjectModels.ObjectModelElements[i])
fmt.Printf("%s: %s\n\n", config.ObjectModels.ObjectModelElements[i], obj)
}
}
// Reads payloadschema.json api file
// encodes as a string literal in payloadschema.go
func main() {
var configFileName = "generate.json"
var api string
var line = 1
var lineOut = 1
var offsets [5000]int
// read the configuration from the json file
filename, _ := filepath.Abs("./scripts/" + configFileName)
fmt.Printf("JSON CONFIG FILEPATH:\n %s\n", filename)
jsonFile, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("error reading json file")
panic(err)
}
var config Config
err = json.Unmarshal(jsonFile, &config)
if err != nil {
fmt.Println("error unmarshaling json config")
panic(err)
}
// read the schema file, stripping comments and blank lines, calculate offsets for error output
file, err := os.Open(config.Schemas.SchemaFilename)
if err != nil {
fmt.Printf("** ERR ** [%s] opening schema file at %s\n", err, config.Schemas.SchemaFilename)
return
}
defer file.Close()
reader := bufio.NewReader(file)
scanner := bufio.NewScanner(reader)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
ts := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(ts, "#") {
fmt.Println("Line: ", line, " is a comment")
} else if ts == "" {
fmt.Println("Line: ", line, " is blank")
} else {
api += ts + "\n"
lineOut++
}
offsets[lineOut] = line
line++
}
// verify the JSON format by unmarshaling it into a map
var schema map[string]interface{}
err = json.Unmarshal([]byte(api), &schema)
if err != nil {
fmt.Println("*********** UNMARSHAL ERR **************\n", err)
printSyntaxError(api, &offsets, err)
return
}
// Looks tricky, but simply creates an output with references resolved
// from the schema, and another object and passes it back. I used to
// call it for each object, but much simpler to call it once for the
// whole schema and simply pick off the objects we want for subschemas
// and samples.
schema = replaceReferences(schema, schema).(map[string]interface{})
// generate the Go files that the contract needs -- for now, complete schema and
// event schema and sample object
generateGoSchemaFile(schema, config)
generateGoSampleFile(schema, config)
// experimental
//generateGoObjectModel(schema, config)
// TODO generate js object model?? Java??
} | {
return desc
} | conditional_block |
assistant.py | global file_name
global dictionnary
dictionnary = False
def ui_treatment(cmds):
"""
pre: Prend une liste de commande
post: Execute la fonction demandée dans la liste
"""
if cmds[0] == "file":
selectfile(cmds[1:])
elif cmds[0] == "info":
retrieve_info()
elif cmds[0] == "dictionnary" and len(cmds) == 1:
dictionnary()
elif cmds[0] == "search" and len(cmds) == 2:
search(cmds[1])
elif cmds[0] == "search" and len(cmds)>2:
print("You can't search more than one word at a time!")
elif cmds[0] == "pika" and len(cmds) == 1:
pikachu()
elif cmds[0] == "sum" and len(cmds) >1:
sumation(cmds)
elif cmds[0] == "sum" and len (cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "avg" and len(cmds) > 1:
avg(cmds[1:])
elif cmds[0] == "avg" and len(cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "help" and len(cmds) == 1:
help()
elif cmds[0] == "product" and len(cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "product" and len(cmds) > 1:
product(cmds[1:])
elif cmds[0] == "morsecode" and len(cmds) >1:
morsecode(cmds[1:])
else:
print("I don't know that command... Yet? Teach me machine learning so I can learn it!")
def dictionnary():
"""
Sets the chosen file as a dictionnary, allowing word search
"""
try:
file = open(file_name,"r")
except:
print("That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'")
return
dictionnary = True
def selectfile (f_name):
"""
Defines the file f_name as the file to use for commands "info", "dictionnary" and "search"
"""
global file_name
file_name = ""
for i in f_name:
file_name = file_name + i
try:
file = open(file_name,"r")
dictionnary = False
print("Now using {0} as base file".format(file_name))
file.close()
except:
print("Are you kidding me? That file doesn't even exist, could you please try again?")
return
def retrieve_info():
"""
Displays line and character information about the file
"""
try:
a = line_count()
b = char_count()
except:
print("That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'")
return
print("There are {0} lines in your file, for a total of {1} characters".format(a,b))
def line_count(): #Counts lines
file = open(file_name,"r")
a = file.readlines()
file.close()
return len(a)
def char_count(): #Counts characters
counter = 0
file = open(file_name,"r")
for line in file:
for x in line.strip():
counter +=1
file.close()
return counter
def pikachu(): #Does it really need an explanation?
print("█▀▀▄░░░░░░░░░░░▄▀▀█")
print("░█░░░▀▄░▄▄▄▄▄░▄▀░░░█")
print('░░▀▄░░░▀░░░░░▀░░░▄▀')
print('░░░░▌░▄▄░░░▄▄░▐▀▀')
print('░░░▐░░█▄░░░▄█░░▌▄▄▀▀▀▀█ ')#Pikachu
print('░░░▌▄▄▀▀░▄░▀▀▄▄▐░░░░░░█')
print('▄▀▀▐▀▀░▄▄▄▄▄░▀▀▌▄▄▄░░░█')
print('█░░░▀▄░█░░░█░▄▀░░░░█▀▀▀')
print('░▀▄░░▀░░▀▀▀░░▀░░░▄█▀')
print('░░░█░░░░░░░░░░░▄▀▄░▀▄')
print('░░░█░░░░░░░░░▄▀█░░█░░█')
print('░░░█░░░░░░░░░░░█▄█░░▄▀')
print('░░░█░░░░░░░░░░░████▀')
print('░░░▀▄▄▀▀▄▄▀▀▄▄▄█▀')
return
def sumation(cmds):
"""
Sums all the specified numbers in the command
"""
numbers = []
sum = 0
try:
for i in cmds[1:]:
numbers.append(float(i))
for l in numbers:
sum = sum + l
print(sum)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
def avg(nbs):
"""
Calculates the average of the specified numbers in the command
"""
try:
sum = 0
for nb in nbs:
sum += float(nb)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
return
print("And the average is.... : {0} ".format(sum/len(nbs)))
def product(nbs):
"""
Calculates the product of the specified numbers in the command
"""
try:
prod = 0
for nb in nbs:
prod *= float(nb)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
return
print("And the product is.... : {0} ".format(prod))
def help():
"""
Diplays help about the program
"""
print("""##########Help Page##########""")
print(">>> file <filename.extension>:")
print("Selects that file as the one to use for the following commands: dictionnary, info, search")
print(">>> info:")
print("Displays the number of lines and characters in the file specified by the 'file' function")
print(">>> dictionnary:")
print("Switches to dictionnary mode")
print(">>> search <word>")
print("If dictionnary mode is on and a file is selected, searches the closest word in that file.")
print(">>>sum <number1 number2 number3 ...>:")
print("Sums the given numbers")
print(">>>avg <number1 number2 number3 ...>:")
print("Averages the given numbers")
print(">>>product <number1 number2 number3 ...>:")
print("Multiplies the given numbers")
print(">>>morsecode <Sentence>:")
print("Translates the given sentence into morsecode. No accents!")
print(">>>help:")
print("Displays this very list")
print(">>>exit, q, quit, exit():")
print("Exits the assistant")
print(">>>pika:")
print("Just see for yourself")
def search(word):
"""
Prints the closest word in the file
"""
try:
words = list_every_word(file_name)
if len(words) > 20000:
print("This might take a while.")
except IOError:
print("This file doesn't exist... Are you sure you defined a valid filename? Use 'file <your filename>'")
except:
print("An undefined error occured")
if dictionnary == False:
print("You forgot to switch to dictionnary mode. Just use 'dictionnary'")
return
else:
try:
ld = smallest_ld(word,words)
print("The closest word found in the file is: {0}".format(ld[0][1]))
return ld[0][1]
except:
print("An unexpected error occured, be sure to have valid input in your file")
return
def list_every_word(file_name): #considers file_name is valid
"""
return a sorted list of all the words contained in a file
"""
file = open(file_name,"r")
words = []
lines = file.readlines()
for line in lines:
line = line.strip()
line = line.split(" ")
for word in line:
words.append(word)
return words
def smallest_ld(word, l):
ld = 100
nearest = []
for i in l:
n_ld = iterative_levenshtein(word, i)
if ld > n_ld:
ld = n_ld
nearest = [(ld, i)]
elif ld == n_ld:
nearest.append((ld,i))
return nearest
def iterative_levenshtein(s, t):
"""
iterative_levenshtein(s, t) -> ldist
ldist is the Levenshtein distance between the strings
s and t.
For all i and j, dist[i,j] will contain the Levenshtein
distance between the first i characters of s and the
first j characters of t
"""
rows = len(s)+1
cols = len(t)+1
dist = [[0 for x in range(cols)] for x in range(rows)]
# source prefixes can be transformed into empty strings
# by deletions:
for i in range(1, rows):
dist[i][0] = i
# target prefixes can be created from an empty source string
# by inserting the characters
for i in range(1, cols):
dist[0][i] = i
for col in range(1, cols):
for row in range(1, rows):
if s[row-1] == t[col-1]:
cost = 0
else:
cost = 1
dist[row][col] = min(dist[row-1][col] + 1, # deletion
dist[row][col-1] + 1, # insertion
dist[row-1][col-1] + cost) # substitution
#for r in range(rows):
#print(dist[r])
return dist[row][col]
def morsecode(stripped_sentence):
morse_code = [("A",'.-'),("B",'-...'),("C", | -..'),("E",'.'),("F",'..-.'),("g",'--.'),("h",'....'),("i",'..'),("j",'.--.'),("k","-.-"),("l",'.-..')]
morse_code += [("m",'--'),("n",'-.'),("o",'---'),("p",'.--.'),("q",'--.-'),("r",'.-.'),("s",'...'),("t",'-'),("u",'..-'),("v",'...-'),("w",'.--'),("x",'-..-'),("y",'-.--'),("z",'--..')]
morse_code += [("0",'-----'),("1",'.----'),("2",'..---'),("3",'...--'),("4",'....-'),("5",'.....'),("6",'-....'),("7",'--...'),("8",'---..'),("9",'----.')]
morse_code += [(".",'.-.-.-'),(",",'--..--'),("?",'..--..'),(":",'---...'),("/",'-..-.'),("-",'-....-'),("=",'-...-'),("""'""",'.----.'),("()",'-.--.-'),("_",'..--.-')]
morse_code += [("!",'-.-.--'),("&",'.-...'),('''"''','.-..-.')]
sentence = ""
translation = ""
for word in stripped_sentence:
for letter in word:
translated_letter = conversion(letter,morse_code)
if translated_letter == None:
return
translation = translation + translated_letter + "/"
translation = translation + "/"
print(translation)
return
def conversion(letter,alphabet):
for i in range(len(alphabet)):
if alphabet[i][0].upper() == letter.upper():
return alphabet[i][1]
print("That wasn't a valid sentence, please replace character {0}".format(letter))
return None
while True:
cmd = input("Enter a command, my master: ") #the name of the command
cmds = cmd.split(" ")
if cmds[0] in ["q","quit","exit","exit()"]:
print("cya")
break
else:
ui_treatment(cmds) | '-.-.'),("D",' | conditional_block |
assistant.py | global file_name
global dictionnary
dictionnary = False
def ui_treatment(cmds):
"""
pre: Prend une liste de commande
post: Execute la fonction demandée dans la liste
"""
if cmds[0] == "file":
selectfile(cmds[1:])
elif cmds[0] == "info":
retrieve_info()
elif cmds[0] == "dictionnary" and len(cmds) == 1:
dictionnary()
elif cmds[0] == "search" and len(cmds) == 2:
search(cmds[1])
elif cmds[0] == "search" and len(cmds)>2:
print("You can't search more than one word at a time!")
elif cmds[0] == "pika" and len(cmds) == 1:
pikachu()
elif cmds[0] == "sum" and len(cmds) >1:
sumation(cmds)
elif cmds[0] == "sum" and len (cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "avg" and len(cmds) > 1:
avg(cmds[1:])
elif cmds[0] == "avg" and len(cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "help" and len(cmds) == 1:
help()
elif cmds[0] == "product" and len(cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "product" and len(cmds) > 1:
product(cmds[1:])
elif cmds[0] == "morsecode" and len(cmds) >1:
morsecode(cmds[1:])
else:
print("I don't know that command... Yet? Teach me machine learning so I can learn it!")
def dictionnary():
"""
Sets the chosen file as a dictionnary, allowing word search
"""
try:
file = open(file_name,"r")
except:
print("That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'")
return
dictionnary = True
def selectfile (f_name):
"""
Defines the file f_name as the file to use for commands "info", "dictionnary" and "search"
"""
global file_name
file_name = ""
for i in f_name:
file_name = file_name + i
try:
file = open(file_name,"r")
dictionnary = False
print("Now using {0} as base file".format(file_name))
file.close()
except:
print("Are you kidding me? That file doesn't even exist, could you please try again?")
return
def retrieve_info():
"""
Displays line and character information about the file
"""
try:
a = line_count()
b = char_count()
except:
print("That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'")
return
print("There are {0} lines in your file, for a total of {1} characters".format(a,b))
def line_count(): #Counts lines
file = open(file_name,"r")
a = file.readlines()
file.close()
return len(a)
def char_count(): #Counts characters
counter = 0
file = open(file_name,"r")
for line in file:
for x in line.strip():
counter +=1
file.close()
return counter
def pikachu(): #Does it really need an explanation?
print("█▀▀▄░░░░░░░░░░░▄▀▀█")
print("░█░░░▀▄░▄▄▄▄▄░▄▀░░░█")
print('░░▀▄░░░▀░░░░░▀░░░▄▀')
print('░░░░▌░▄▄░░░▄▄░▐▀▀')
print('░░░▐░░█▄░░░▄█░░▌▄▄▀▀▀▀█ ')#Pikachu
print('░░░▌▄▄▀▀░▄░▀▀▄▄▐░░░░░░█')
print('▄▀▀▐▀▀░▄▄▄▄▄░▀▀▌▄▄▄░░░█')
print('█░░░▀▄░█░░░█░▄▀░░░░█▀▀▀')
print('░▀▄░░▀░░▀▀▀░░▀░░░▄█▀')
print('░░░█░░░░░░░░░░░▄▀▄░▀▄')
print('░░░█░░░░░░░░░▄▀█░░█░░█')
print('░░░█░░░░░░░░░░░█▄█░░▄▀')
print('░░░█░░░░░░░░░░░████▀')
print('░░░▀▄▄▀▀▄▄▀▀▄▄▄█▀')
return
def sumation(cmds):
"""
Sums all the specified numbers in the command
"""
numbers = []
sum = 0
try:
for i in cmds[1:]:
numbers.append(float(i))
for l in numbers:
sum = sum + l
print(sum)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
def avg(nbs):
"""
Calculates the average of the specified numbers in the command
"""
try:
sum = 0
for nb in nbs:
sum += float(nb)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
return
print("And the average is.... : {0} ".format(sum/len(nbs)))
def product(nbs):
"""
Calculates the product of the specified numbers in the command
"""
try:
prod = 0
for nb in nbs:
prod *= float(nb)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
return
print("And the product is.... : {0} ".format(prod))
def help():
"""
Diplays help about the program
"""
print("""##########Help Page##########""")
print(">>> file <filename.extension>:")
print("Selects that file as the one to use for the following commands: dictionnary, info, search")
print(">>> info:")
print("Displays the number of lines and characters in the file specified by the 'file' function")
print(">>> dictionnary:")
print("Switches to dictionnary mode")
print(">>> search <word>")
print("If dictionnary mode is on and a file is selected, searches the closest word in that file.")
print(">>>sum <number1 number2 number3 ...>:")
print("Sums the given numbers")
print(">>>avg <number1 number2 number3 ...>:")
print("Averages the given numbers")
print(">>>product <number1 number2 number3 ...>:")
print("Multiplies the given numbers")
print(">>>morsecode <Sentence>:")
print("Translates the given sentence into morsecode. No accents!")
print(">>>help:")
print("Displays this very list")
print(">>>exit, q, quit, exit():")
print("Exits the assistant")
print(">>>pika:")
print("Just see for yourself")
def search(word):
"""
Prints the closest word in the file
"""
try:
words = list_every_word(file_name)
if len(words) > 20000:
print("This might take a while.")
except IOError:
print("This file doesn't exist... Are you sure you defined a valid filename? Use 'file <your filename>'")
except:
print("An undefined error occured")
if dictionnary == False:
print("You forgot to switch to dictionnary mode. Just use 'dictionnary'")
return
else:
try:
ld = smallest_ld(word,words)
print("The closest word found in the file is: {0}".format(ld[0][1]))
return ld[0][1]
except:
print("An unexpected error occured, be sure to have valid input in your file")
return
def list_every_word(file_name): #considers file_name is valid
"""
return a sorted list of all the words contained in a file
"""
file = open(file_name,"r")
words = []
lines = file.readlines()
for line in lines:
line = line.strip()
line = line.split(" ")
for word in line:
words.append(word)
return words
def smallest_ld(word, l):
ld = 100
nearest = []
for i in l:
n_ld = iterative_levenshtein(word, i)
if ld > n_ld:
ld = n_ld
nearest = [(ld, i)]
elif ld == n_ld:
nearest.append((ld,i))
return nearest
def iterative_levenshtein(s, t):
"""
iterative_levenshtein(s, t) -> ldist
ldist is the Levenshtein distance between the strings
s and t.
For all i and j, dist[i,j] will contain the Levenshtein
distance between the first i characters of s and the
first j characters of t
"""
rows = len(s)+1
cols = len(t)+1
dist = [[0 for x in range(cols)] for x in range(rows)]
# source prefixes can be transformed into empty strings
# by deletions:
for i in range(1, rows):
dist[i][0] = i
# target prefixes can be created from an empty source string
# by inserting the characters
for i in range(1, cols):
dist[0][i] = i
for col in range(1, cols):
for row in range(1, rows):
if s[row-1] == t[col-1]:
cost = 0
else:
cost = 1
dist[row][col] = min(dist[row-1][col] + 1, # deletion
dist[row][col-1] + 1, # insertion
dist[row-1][col-1] + cost) # substitution
#for r in range(rows):
#print(dist[r])
return dist[row][col]
def morsecode(stripped_sentence):
morse_code = [("A",'.-'),("B",'-...'),("C",'-.-.'),("D",'-..'),("E",'.'),("F",'..-.'),("g",'--.'),("h",'....'),("i",'..'),("j",'.--.'),("k","-.-"),("l",'.-..')]
morse_code += [("m",'--'),("n",'-.'),("o",'---'),("p",'.--.'),("q",'--.-'),("r",'.-.'),("s",'...'),("t",'-'),("u",'..-'),("v",'...-'),("w",'.--'),("x",'-..-'),("y",'-.--'),("z",'--..')]
morse_code += [("0",'-----'),("1",'.----'),("2",'..---'),("3",'...--'),("4",'....-'),("5",'.....'),("6",'-....'),("7",'--...'),("8",'---..'),("9",'----.')]
morse_code += [(".",'.-.-.-'),(",",'--..--'),("?",'..--..'),(":" | ,'---...'),("/",'-..-.'),("-",'-....-'),("=",'-...-'),("""'""",'.----.'),("()",'-.--.-'),("_",'..--.-')]
morse_code += [("!",'-.-.--'),("&",'.-...'),('''"''','.-..-.')]
sentence = ""
translation = ""
for word in stripped_sentence:
for letter in word:
translated_letter = conversion(letter,morse_code)
if translated_letter == None:
return
translation = translation + translated_letter + "/"
translation = translation + "/"
print(translation)
return
def conversion(letter,alphabet):
for i in range(len(alphabet)):
if alphabet[i][0].upper() == letter.upper():
return alphabet[i][1]
print("That wasn't a valid sentence, please replace character {0}".format(letter))
return None
while True:
cmd = input("Enter a command, my master: ") #the name of the command
cmds = cmd.split(" ")
if cmds[0] in ["q","quit","exit","exit()"]:
print("cya")
break
else:
ui_treatment(cmds) | identifier_body | |
assistant.py | global file_name
global dictionnary
dictionnary = False
def ui_treatment(cmds):
"""
pre: Prend une liste de commande
post: Execute la fonction demandée dans la liste
"""
if cmds[0] == "file":
selectfile(cmds[1:])
elif cmds[0] == "info":
retrieve_info()
elif cmds[0] == "dictionnary" and len(cmds) == 1:
dictionnary()
elif cmds[0] == "search" and len(cmds) == 2:
search(cmds[1])
elif cmds[0] == "search" and len(cmds)>2:
print("You can't search more than one word at a time!")
elif cmds[0] == "pika" and len(cmds) == 1:
pikachu()
elif cmds[0] == "sum" and len(cmds) >1:
sumation(cmds)
elif cmds[0] == "sum" and len (cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "avg" and len(cmds) > 1:
avg(cmds[1:])
elif cmds[0] == "avg" and len(cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "help" and len(cmds) == 1:
help()
elif cmds[0] == "product" and len(cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "product" and len(cmds) > 1:
product(cmds[1:])
elif cmds[0] == "morsecode" and len(cmds) >1:
morsecode(cmds[1:])
else:
print("I don't know that command... Yet? Teach me machine learning so I can learn it!")
def dictionnary():
"""
Sets the chosen file as a dictionnary, allowing word search
"""
try:
file = open(file_name,"r")
except:
print("That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'")
return
dictionnary = True
def selectfile (f_name):
"""
Defines the file f_name as the file to use for commands "info", "dictionnary" and "search"
"""
global file_name
file_name = ""
for i in f_name:
file_name = file_name + i
try:
file = open(file_name,"r")
dictionnary = False
print("Now using {0} as base file".format(file_name))
file.close()
except:
print("Are you kidding me? That file doesn't even exist, could you please try again?")
return
def retrieve_info():
"""
Displays line and character information about the file
"""
try:
a = line_count()
b = char_count()
except:
print("That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'")
return
print("There are {0} lines in your file, for a total of {1} characters".format(a,b))
def line_count(): #Counts lines
file = open(file_name,"r")
a = file.readlines()
file.close()
return len(a)
def char_count(): #Counts characters
counter = 0
file = open(file_name,"r")
for line in file:
for x in line.strip():
counter +=1
file.close()
return counter
def pikachu(): #Does it really need an explanation?
print("█▀▀▄░░░░░░░░░░░▄▀▀█")
print("░█░░░▀▄░▄▄▄▄▄░▄▀░░░█")
print('░░▀▄░░░▀░░░░░▀░░░▄▀')
print('░░░░▌░▄▄░░░▄▄░▐▀▀')
print('░░░▐░░█▄░░░▄█░░▌▄▄▀▀▀▀█ ')#Pikachu
print('░░░▌▄▄▀▀░▄░▀▀▄▄▐░░░░░░█')
print('▄▀▀▐▀▀░▄▄▄▄▄░▀▀▌▄▄▄░░░█')
print('█░░░▀▄░█░░░█░▄▀░░░░█▀▀▀')
print('░▀▄░░▀░░▀▀▀░░▀░░░▄█▀')
print('░░░█░░░░░░░░░░░▄▀▄░▀▄')
print('░░░█░░░░░░░░░▄▀█░░█░░█')
print('░░░█░░░░░░░░░░░█▄█░░▄▀')
print('░░░█░░░░░░░░░░░████▀')
print('░░░▀▄▄▀▀▄▄▀▀▄▄▄█▀')
return
def sumation(cmds):
"""
Sums all the specified numbers in the command
"""
numbers = []
sum = 0
try:
for i in cmds[1:]:
numbers.append(float(i))
for l in numbers:
sum = sum + l
print(sum)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
def avg(nbs):
"""
Calculates the average of the specified numbers in the command
"""
try:
sum = 0
for nb in nbs:
sum += float(nb)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
return
print("And the average is.... : {0} ".format(sum/len(nbs)))
def product(nbs):
"""
Calculates the product of the specified numbers in the command
"""
try:
prod = 0
for nb in nbs:
prod *= float(nb)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
return
print("And the product is.... : {0} ".format(prod))
def help():
"""
Diplays help about the program
"""
print("""##########Help Page##########""")
print(">>> file <filename.extension>:")
print("Selects that file as the one to use for the following commands: dictionnary, info, search")
print(">>> info:")
print("Displays the number of lines and characters in the file specified by the 'file' function")
print(">>> dictionnary:")
print("Switches to dictionnary mode")
print(">>> search <word>")
print("If dictionnary mode is on and a file is selected, searches the closest word in that file.")
print(">>>sum <number1 number2 number3 ...>:")
print("Sums the given numbers")
print(">>>avg <number1 number2 number3 ...>:")
print("Averages the given numbers")
print(">>>product <number1 number2 number3 ...>:")
print("Multiplies the given numbers")
print(">>>morsecode <Sentence>:")
print("Translates the given sentence into morsecode. No accents!")
print(">>>help:")
print("Displays this very list")
print(">>>exit, q, quit, exit():")
print("Exits the assistant")
print(">>>pika:")
print("Just see for yourself")
def search(word):
"""
Prints the closest word in the file
"""
try:
words = list_every_word(file_name)
if len(words) > 20000:
print("This might take a while.")
except IOError:
print("This file doesn't exist... Are you sure you defined a valid filename? Use 'file <your filename>'")
except:
print("An undefined error occured")
if dictionnary == False:
print("You forgot to switch to dictionnary mode. Just use 'dictionnary'")
return
else:
try:
ld = smallest_ld(word,words)
print("The closest word found in the file is: {0}".format(ld[0][1]))
return ld[0][1]
except:
print("An unexpected error occured, be sure to have valid input in your file")
return |
def list_every_word(file_name): #considers file_name is valid
"""
return a sorted list of all the words contained in a file
"""
file = open(file_name,"r")
words = []
lines = file.readlines()
for line in lines:
line = line.strip()
line = line.split(" ")
for word in line:
words.append(word)
return words
def smallest_ld(word, l):
ld = 100
nearest = []
for i in l:
n_ld = iterative_levenshtein(word, i)
if ld > n_ld:
ld = n_ld
nearest = [(ld, i)]
elif ld == n_ld:
nearest.append((ld,i))
return nearest
def iterative_levenshtein(s, t):
"""
iterative_levenshtein(s, t) -> ldist
ldist is the Levenshtein distance between the strings
s and t.
For all i and j, dist[i,j] will contain the Levenshtein
distance between the first i characters of s and the
first j characters of t
"""
rows = len(s)+1
cols = len(t)+1
dist = [[0 for x in range(cols)] for x in range(rows)]
# source prefixes can be transformed into empty strings
# by deletions:
for i in range(1, rows):
dist[i][0] = i
# target prefixes can be created from an empty source string
# by inserting the characters
for i in range(1, cols):
dist[0][i] = i
for col in range(1, cols):
for row in range(1, rows):
if s[row-1] == t[col-1]:
cost = 0
else:
cost = 1
dist[row][col] = min(dist[row-1][col] + 1, # deletion
dist[row][col-1] + 1, # insertion
dist[row-1][col-1] + cost) # substitution
#for r in range(rows):
#print(dist[r])
return dist[row][col]
def morsecode(stripped_sentence):
morse_code = [("A",'.-'),("B",'-...'),("C",'-.-.'),("D",'-..'),("E",'.'),("F",'..-.'),("g",'--.'),("h",'....'),("i",'..'),("j",'.--.'),("k","-.-"),("l",'.-..')]
morse_code += [("m",'--'),("n",'-.'),("o",'---'),("p",'.--.'),("q",'--.-'),("r",'.-.'),("s",'...'),("t",'-'),("u",'..-'),("v",'...-'),("w",'.--'),("x",'-..-'),("y",'-.--'),("z",'--..')]
morse_code += [("0",'-----'),("1",'.----'),("2",'..---'),("3",'...--'),("4",'....-'),("5",'.....'),("6",'-....'),("7",'--...'),("8",'---..'),("9",'----.')]
morse_code += [(".",'.-.-.-'),(",",'--..--'),("?",'..--..'),(":",'---...'),("/",'-..-.'),("-",'-....-'),("=",'-...-'),("""'""",'.----.'),("()",'-.--.-'),("_",'..--.-')]
morse_code += [("!",'-.-.--'),("&",'.-...'),('''"''','.-..-.')]
sentence = ""
translation = ""
for word in stripped_sentence:
for letter in word:
translated_letter = conversion(letter,morse_code)
if translated_letter == None:
return
translation = translation + translated_letter + "/"
translation = translation + "/"
print(translation)
return
def conversion(letter,alphabet):
for i in range(len(alphabet)):
if alphabet[i][0].upper() == letter.upper():
return alphabet[i][1]
print("That wasn't a valid sentence, please replace character {0}".format(letter))
return None
while True:
cmd = input("Enter a command, my master: ") #the name of the command
cmds = cmd.split(" ")
if cmds[0] in ["q","quit","exit","exit()"]:
print("cya")
break
else:
ui_treatment(cmds) | random_line_split | |
assistant.py | global file_name
global dictionnary
dictionnary = False
def ui_treatment(cmds):
"""
pre: Prend une liste de commande
post: Execute la fonction demandée dans la liste
"""
if cmds[0] == "file":
selectfile(cmds[1:])
elif cmds[0] == "info":
retrieve_info()
elif cmds[0] == "dictionnary" and len(cmds) == 1:
dictionnary()
elif cmds[0] == "search" and len(cmds) == 2:
search(cmds[1])
elif cmds[0] == "search" and len(cmds)>2:
print("You can't search more than one word at a time!")
elif cmds[0] == "pika" and len(cmds) == 1:
pikachu()
elif cmds[0] == "sum" and len(cmds) >1:
sumation(cmds)
elif cmds[0] == "sum" and len (cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "avg" and len(cmds) > 1:
avg(cmds[1:])
elif cmds[0] == "avg" and len(cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "help" and len(cmds) == 1:
help()
elif cmds[0] == "product" and len(cmds) == 1:
print("""Argument needed! Expected syntax: Numbers separated by spaces""")
elif cmds[0] == "product" and len(cmds) > 1:
product(cmds[1:])
elif cmds[0] == "morsecode" and len(cmds) >1:
morsecode(cmds[1:])
else:
print("I don't know that command... Yet? Teach me machine learning so I can learn it!")
def dictionnary():
"""
Sets the chosen file as a dictionnary, allowing word search
"""
try:
file = open(file_name,"r")
except:
print("That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'")
return
dictionnary = True
def selectfile (f_name):
"""
Defines the file f_name as the file to use for commands "info", "dictionnary" and "search"
"""
global file_name
file_name = ""
for i in f_name:
file_name = file_name + i
try:
file = open(file_name,"r")
dictionnary = False
print("Now using {0} as base file".format(file_name))
file.close()
except:
print("Are you kidding me? That file doesn't even exist, could you please try again?")
return
def retrieve_info():
"""
Displays line and character information about the file
"""
try:
a = line_count()
b = char_count()
except:
print("That didn't work, are you sure you defined a valid filename? Try using 'file <your filename>'")
return
print("There are {0} lines in your file, for a total of {1} characters".format(a,b))
def line_count(): #Counts lines
file = open(file_name,"r")
a = file.readlines()
file.close()
return len(a)
def c | ): #Counts characters
counter = 0
file = open(file_name,"r")
for line in file:
for x in line.strip():
counter +=1
file.close()
return counter
def pikachu(): #Does it really need an explanation?
print("█▀▀▄░░░░░░░░░░░▄▀▀█")
print("░█░░░▀▄░▄▄▄▄▄░▄▀░░░█")
print('░░▀▄░░░▀░░░░░▀░░░▄▀')
print('░░░░▌░▄▄░░░▄▄░▐▀▀')
print('░░░▐░░█▄░░░▄█░░▌▄▄▀▀▀▀█ ')#Pikachu
print('░░░▌▄▄▀▀░▄░▀▀▄▄▐░░░░░░█')
print('▄▀▀▐▀▀░▄▄▄▄▄░▀▀▌▄▄▄░░░█')
print('█░░░▀▄░█░░░█░▄▀░░░░█▀▀▀')
print('░▀▄░░▀░░▀▀▀░░▀░░░▄█▀')
print('░░░█░░░░░░░░░░░▄▀▄░▀▄')
print('░░░█░░░░░░░░░▄▀█░░█░░█')
print('░░░█░░░░░░░░░░░█▄█░░▄▀')
print('░░░█░░░░░░░░░░░████▀')
print('░░░▀▄▄▀▀▄▄▀▀▄▄▄█▀')
return
def sumation(cmds):
"""
Sums all the specified numbers in the command
"""
numbers = []
sum = 0
try:
for i in cmds[1:]:
numbers.append(float(i))
for l in numbers:
sum = sum + l
print(sum)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
def avg(nbs):
"""
Calculates the average of the specified numbers in the command
"""
try:
sum = 0
for nb in nbs:
sum += float(nb)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
return
print("And the average is.... : {0} ".format(sum/len(nbs)))
def product(nbs):
"""
Calculates the product of the specified numbers in the command
"""
try:
prod = 0
for nb in nbs:
prod *= float(nb)
except TypeError:
print("Hmmm, I guess you haven't only entered valid numbers")
return
print("And the product is.... : {0} ".format(prod))
def help():
"""
Diplays help about the program
"""
print("""##########Help Page##########""")
print(">>> file <filename.extension>:")
print("Selects that file as the one to use for the following commands: dictionnary, info, search")
print(">>> info:")
print("Displays the number of lines and characters in the file specified by the 'file' function")
print(">>> dictionnary:")
print("Switches to dictionnary mode")
print(">>> search <word>")
print("If dictionnary mode is on and a file is selected, searches the closest word in that file.")
print(">>>sum <number1 number2 number3 ...>:")
print("Sums the given numbers")
print(">>>avg <number1 number2 number3 ...>:")
print("Averages the given numbers")
print(">>>product <number1 number2 number3 ...>:")
print("Multiplies the given numbers")
print(">>>morsecode <Sentence>:")
print("Translates the given sentence into morsecode. No accents!")
print(">>>help:")
print("Displays this very list")
print(">>>exit, q, quit, exit():")
print("Exits the assistant")
print(">>>pika:")
print("Just see for yourself")
def search(word):
"""
Prints the closest word in the file
"""
try:
words = list_every_word(file_name)
if len(words) > 20000:
print("This might take a while.")
except IOError:
print("This file doesn't exist... Are you sure you defined a valid filename? Use 'file <your filename>'")
except:
print("An undefined error occured")
if dictionnary == False:
print("You forgot to switch to dictionnary mode. Just use 'dictionnary'")
return
else:
try:
ld = smallest_ld(word,words)
print("The closest word found in the file is: {0}".format(ld[0][1]))
return ld[0][1]
except:
print("An unexpected error occured, be sure to have valid input in your file")
return
def list_every_word(file_name): #considers file_name is valid
"""
return a sorted list of all the words contained in a file
"""
file = open(file_name,"r")
words = []
lines = file.readlines()
for line in lines:
line = line.strip()
line = line.split(" ")
for word in line:
words.append(word)
return words
def smallest_ld(word, l):
ld = 100
nearest = []
for i in l:
n_ld = iterative_levenshtein(word, i)
if ld > n_ld:
ld = n_ld
nearest = [(ld, i)]
elif ld == n_ld:
nearest.append((ld,i))
return nearest
def iterative_levenshtein(s, t):
"""
iterative_levenshtein(s, t) -> ldist
ldist is the Levenshtein distance between the strings
s and t.
For all i and j, dist[i,j] will contain the Levenshtein
distance between the first i characters of s and the
first j characters of t
"""
rows = len(s)+1
cols = len(t)+1
dist = [[0 for x in range(cols)] for x in range(rows)]
# source prefixes can be transformed into empty strings
# by deletions:
for i in range(1, rows):
dist[i][0] = i
# target prefixes can be created from an empty source string
# by inserting the characters
for i in range(1, cols):
dist[0][i] = i
for col in range(1, cols):
for row in range(1, rows):
if s[row-1] == t[col-1]:
cost = 0
else:
cost = 1
dist[row][col] = min(dist[row-1][col] + 1, # deletion
dist[row][col-1] + 1, # insertion
dist[row-1][col-1] + cost) # substitution
#for r in range(rows):
#print(dist[r])
return dist[row][col]
def morsecode(stripped_sentence):
morse_code = [("A",'.-'),("B",'-...'),("C",'-.-.'),("D",'-..'),("E",'.'),("F",'..-.'),("g",'--.'),("h",'....'),("i",'..'),("j",'.--.'),("k","-.-"),("l",'.-..')]
morse_code += [("m",'--'),("n",'-.'),("o",'---'),("p",'.--.'),("q",'--.-'),("r",'.-.'),("s",'...'),("t",'-'),("u",'..-'),("v",'...-'),("w",'.--'),("x",'-..-'),("y",'-.--'),("z",'--..')]
morse_code += [("0",'-----'),("1",'.----'),("2",'..---'),("3",'...--'),("4",'....-'),("5",'.....'),("6",'-....'),("7",'--...'),("8",'---..'),("9",'----.')]
morse_code += [(".",'.-.-.-'),(",",'--..--'),("?",'..--..'),(":",'---...'),("/",'-..-.'),("-",'-....-'),("=",'-...-'),("""'""",'.----.'),("()",'-.--.-'),("_",'..--.-')]
morse_code += [("!",'-.-.--'),("&",'.-...'),('''"''','.-..-.')]
sentence = ""
translation = ""
for word in stripped_sentence:
for letter in word:
translated_letter = conversion(letter,morse_code)
if translated_letter == None:
return
translation = translation + translated_letter + "/"
translation = translation + "/"
print(translation)
return
def conversion(letter,alphabet):
for i in range(len(alphabet)):
if alphabet[i][0].upper() == letter.upper():
return alphabet[i][1]
print("That wasn't a valid sentence, please replace character {0}".format(letter))
return None
while True:
cmd = input("Enter a command, my master: ") #the name of the command
cmds = cmd.split(" ")
if cmds[0] in ["q","quit","exit","exit()"]:
print("cya")
break
else:
ui_treatment(cmds) | har_count( | identifier_name |
main.py | import os
import subprocess
import discord
import asyncio
import traceback
import sys
import ast
from discord.ext import commands
# Import Cogs
from cogs.misc import Miscellaneous
from cogs.serversettings import ServerSettings
from cogs.mod import Moderator
from cogs.automod import AutoMod
from cogs.google import Google
# Minigame/Fun Cogs
from cogs.fun import Fun
#from cogs.hangman import Hangman
#from cogs.rps import RockPaperScissors
from otherscipts.helpers import update_presence
from otherscipts.data import Data
TOKEN = os.getenv('SPARTA_TOKEN')
intents = discord.Intents.default()
intents.members = True
def get_prefix(client, message):
if str(message.guild.id) not in Data.server_data:
Data.server_data[str(message.guild.id)] = Data.create_new_data()
data = Data.server_data[str(message.guild.id)]
return data["prefix"]
PREFIX = get_prefix
bot = commands.Bot(
command_prefix=PREFIX,
description="I am Sparta Bot, a bot for the Official Sparta Gaming Discord server.",
intents=intents,
help_command=None,
case_insensitive=True
)
THEME_COLOR = discord.Colour.blue()
# Add Cogs
bot.add_cog(Miscellaneous(bot, THEME_COLOR)) | bot.add_cog(ServerSettings(bot, THEME_COLOR))
bot.add_cog(Moderator(bot, THEME_COLOR))
bot.add_cog(AutoMod(bot, THEME_COLOR))
bot.add_cog(Fun(bot, THEME_COLOR))
bot.add_cog(Google(bot, THEME_COLOR))
#bot.add_cog(Hangman(bot, THEME_COLOR))
#bot.add_cog(RockPaperScissors(bot, THEME_COLOR))
previous_msg_sender_id = None
@bot.event
async def on_ready():
bot.loop.create_task(Data.auto_update_data())
bot.loop.create_task(update_presence(bot, PREFIX))
print("Bot is ready...")
@bot.event
async def on_guild_join(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Joined - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_guild_remove(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Left - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_member_join(member):
guild: discord.Guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has joined {guild} server...")
join_role = guild.get_role(data["join_role"])
if join_role is not None:
await member.add_roles(join_role)
# Welcome Message
if data["welcome_msg"] is None:
server_wlcm_msg = f"Welcome, {member.mention}, to the Official **{guild.name}** Server"
else:
server_wlcm_msg = data["welcome_msg"]
server_wlcm_msg = server_wlcm_msg.replace(
"[mention]", f"{member.mention}")
# Welcome Channel
wel_channel = None
if data["welcome_channel"] is None:
for channel in channels:
if str(channel).find("welcome") != -1:
wel_channel = channel
break
else:
wel_channel = guild.get_channel(int(data["welcome_channel"]))
try:
await wel_channel.send(server_wlcm_msg)
except AttributeError:
print("DEBUG: No welcome channel has been set or found.")
#Remove welcome channel
@bot.command(name="remove_welcome", aliases=['rwel', 'remwel'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome(ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["welcome_channel"] = channel
await ctx.send("This server's welcome channel has been removed")
@bot.event
async def on_member_remove(member):
guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has left the {guild.name}...")
# Leave Message
if data["leave_msg"] is None:
server_leave_msg = f"Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server"
else:
server_leave_msg = data["leave_msg"]
server_leave_msg = server_leave_msg.replace("[member]", f"{member}")
# Leave Channel
lv_channel = None
if data["leave_channel"] is None:
for channel in channels:
if str(channel).find("bye") != -1 or str(channel).find("leave") != -1:
lv_channel = channel
break
else:
lv_channel = guild.get_channel(int(data["leave_channel"]))
try:
await lv_channel.send(server_leave_msg)
except AttributeError:
print("DEBUG: No leave channel has been set or found.")
#Remove leave
@bot.command(name="remove_leave", aliases=['rleave', 'remleave'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome( ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["leave_channel"] = channel
await ctx.send("This server's leave channel has been Removed")
@bot.event
async def on_command_error(ctx, error):
try:
error = error.original
except Exception:
pass
if type(error) is discord.ext.commands.errors.CommandNotFound:
return
elif type(error) is discord.ext.commands.errors.BadArgument:
pass
elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:
pass
elif type(error) is discord.ext.commands.errors.NoPrivateMessage:
pass
elif type(error) is discord.ext.commands.errors.MissingPermissions:
pass
elif type(error) is discord.ext.commands.errors.NotOwner:
pass
elif type(error) is discord.ext.commands.errors.CommandOnCooldown:
pass
elif type(error) is discord.ext.commands.errors.ChannelNotFound:
pass
elif type(error) is discord.ext.commands.errors.BadUnionArgument:
pass
elif type(error) is discord.ext.commands.errors.BotMissingPermissions:
pass
elif type(error) is discord.errors.Forbidden:
error = "I don't have permission to do that!"
else:
print(f"Error {type(error)}: {error}")
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
embed = discord.Embed(
title='Error!',
description='An unexpected error ocurred.\
Please report this to the dev.',
)
embed.add_field(
name='Error Message:',
value=f"{type(error)}:\n{error}",
inline=False
)
await ctx.send(f"{error}")
# LABEL: Programming Commands
def insert_returns(body):
# insert return stmt if the last expression is a expression statement
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
# for if statements, we insert returns into the body and the orelse
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
# for with blocks, again we insert returns into the body
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
@bot.command(name='eval')
async def eval_fn(ctx, *, cmd):
"""Evaluates input.
Input is interpreted as newline seperated statements.
If the last statement is an expression, that is the return value.
Usable globals:
- `bot`: the bot instance
- `discord`: the discord module
- `commands`: the discord.ext.commands module
- `ctx`: the invokation context
- `__import__`: the builtin `__import__` function
Such that `>eval 1 + 1` gives `2` as the result.
The following invokation will cause the bot to send the text '9'
to the channel of invokation and return '3' as the result of evaluating
>eval ```
a = 1 + 2
b = a * 2
await ctx.send(a + b)
a
```
"""
if ctx.message.author.id not in [400857098121904149, 733532987794128897]:
await ctx.send("You are not authorized to run this command")
return
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
# add a layer of indentation
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
# wrap in async def body
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
body = parsed.body[0].body
insert_returns(body)
env = {
'bot': ctx.bot,
'discord': discord,
'commands': commands,
'ctx': ctx,
'__import__': __import__
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
result = (await eval(f"{fn_name}()", env))
await ctx.send(result)
# LABEL: Debugging Commands
@bot.command(name="data")
async def data(ctx):
is_owner = await bot.is_owner(ctx.author)
if is_owner or ctx.author.id == 733532987794128897: # for real sparta
data_file = discord.File("data.json")
await ctx.send(file=data_file)
@bot.event
async def on_message(message: discord.Message):
global previous_msg_sender_id
if message.author.bot:
return
author: discord.Member = message.author
channel: discord.TextChannel = message.channel
guild: discord.Guild = message.guild
# print(str(author), ": ", message.content)
await bot.process_commands(message)
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
if message.content.replace('!', '') == bot.user.mention:
pre = data["prefix"]
await channel.send(f"The prefix in this server is `{pre}`")
for afk_user_entry in data["afks"]:
afk_user_id = int(afk_user_entry["user"])
afk_reason = afk_user_entry["reason"]
afk_user = guild.get_member(afk_user_id)
if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:
Data.server_data[str(guild.id)]["afks"].remove(afk_user_entry)
await channel.send(f"**{afk_user}** is no longer AFK.")
elif afk_user in message.mentions:
await channel.send(f"**{afk_user}** is currently AFK because **{afk_reason}**.")
if data["pay_respects"] and message.content.strip().lower() == "f":
await channel.send(f"**{author.display_name}** has paid their respects...")
if data["active"] and str(author.id) not in data["users"]:
if not str(channel.id) in data["channels"]:
perms = author.permissions_in(channel)
if not perms.administrator:
if "http://" in message.content or "https://" in message.content:
if len(data["urls"]) > 0:
for url in data["urls"]:
if not url in message.content:
await channel.purge(limit=1)
msg1 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(2)
await msg1.delete()
else:
await channel.purge(limit=1)
msg2 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(3)
await msg2.delete()
elif len(message.attachments) > 0:
await channel.purge(limit=1)
msg3 = await channel.send(f"{author.mention}, you are not allowed to send attachments in this channel.")
await asyncio.sleep(3)
await msg3.delete()
previous_msg_sender_id = author.id
bot.run(TOKEN) | random_line_split | |
main.py | import os
import subprocess
import discord
import asyncio
import traceback
import sys
import ast
from discord.ext import commands
# Import Cogs
from cogs.misc import Miscellaneous
from cogs.serversettings import ServerSettings
from cogs.mod import Moderator
from cogs.automod import AutoMod
from cogs.google import Google
# Minigame/Fun Cogs
from cogs.fun import Fun
#from cogs.hangman import Hangman
#from cogs.rps import RockPaperScissors
from otherscipts.helpers import update_presence
from otherscipts.data import Data
TOKEN = os.getenv('SPARTA_TOKEN')
intents = discord.Intents.default()
intents.members = True
def get_prefix(client, message):
if str(message.guild.id) not in Data.server_data:
Data.server_data[str(message.guild.id)] = Data.create_new_data()
data = Data.server_data[str(message.guild.id)]
return data["prefix"]
PREFIX = get_prefix
bot = commands.Bot(
command_prefix=PREFIX,
description="I am Sparta Bot, a bot for the Official Sparta Gaming Discord server.",
intents=intents,
help_command=None,
case_insensitive=True
)
THEME_COLOR = discord.Colour.blue()
# Add Cogs
bot.add_cog(Miscellaneous(bot, THEME_COLOR))
bot.add_cog(ServerSettings(bot, THEME_COLOR))
bot.add_cog(Moderator(bot, THEME_COLOR))
bot.add_cog(AutoMod(bot, THEME_COLOR))
bot.add_cog(Fun(bot, THEME_COLOR))
bot.add_cog(Google(bot, THEME_COLOR))
#bot.add_cog(Hangman(bot, THEME_COLOR))
#bot.add_cog(RockPaperScissors(bot, THEME_COLOR))
previous_msg_sender_id = None
@bot.event
async def on_ready():
bot.loop.create_task(Data.auto_update_data())
bot.loop.create_task(update_presence(bot, PREFIX))
print("Bot is ready...")
@bot.event
async def on_guild_join(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Joined - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_guild_remove(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Left - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_member_join(member):
guild: discord.Guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has joined {guild} server...")
join_role = guild.get_role(data["join_role"])
if join_role is not None:
|
# Welcome Message
if data["welcome_msg"] is None:
server_wlcm_msg = f"Welcome, {member.mention}, to the Official **{guild.name}** Server"
else:
server_wlcm_msg = data["welcome_msg"]
server_wlcm_msg = server_wlcm_msg.replace(
"[mention]", f"{member.mention}")
# Welcome Channel
wel_channel = None
if data["welcome_channel"] is None:
for channel in channels:
if str(channel).find("welcome") != -1:
wel_channel = channel
break
else:
wel_channel = guild.get_channel(int(data["welcome_channel"]))
try:
await wel_channel.send(server_wlcm_msg)
except AttributeError:
print("DEBUG: No welcome channel has been set or found.")
#Remove welcome channel
@bot.command(name="remove_welcome", aliases=['rwel', 'remwel'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome(ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["welcome_channel"] = channel
await ctx.send("This server's welcome channel has been removed")
@bot.event
async def on_member_remove(member):
guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has left the {guild.name}...")
# Leave Message
if data["leave_msg"] is None:
server_leave_msg = f"Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server"
else:
server_leave_msg = data["leave_msg"]
server_leave_msg = server_leave_msg.replace("[member]", f"{member}")
# Leave Channel
lv_channel = None
if data["leave_channel"] is None:
for channel in channels:
if str(channel).find("bye") != -1 or str(channel).find("leave") != -1:
lv_channel = channel
break
else:
lv_channel = guild.get_channel(int(data["leave_channel"]))
try:
await lv_channel.send(server_leave_msg)
except AttributeError:
print("DEBUG: No leave channel has been set or found.")
#Remove leave
@bot.command(name="remove_leave", aliases=['rleave', 'remleave'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome( ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["leave_channel"] = channel
await ctx.send("This server's leave channel has been Removed")
@bot.event
async def on_command_error(ctx, error):
try:
error = error.original
except Exception:
pass
if type(error) is discord.ext.commands.errors.CommandNotFound:
return
elif type(error) is discord.ext.commands.errors.BadArgument:
pass
elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:
pass
elif type(error) is discord.ext.commands.errors.NoPrivateMessage:
pass
elif type(error) is discord.ext.commands.errors.MissingPermissions:
pass
elif type(error) is discord.ext.commands.errors.NotOwner:
pass
elif type(error) is discord.ext.commands.errors.CommandOnCooldown:
pass
elif type(error) is discord.ext.commands.errors.ChannelNotFound:
pass
elif type(error) is discord.ext.commands.errors.BadUnionArgument:
pass
elif type(error) is discord.ext.commands.errors.BotMissingPermissions:
pass
elif type(error) is discord.errors.Forbidden:
error = "I don't have permission to do that!"
else:
print(f"Error {type(error)}: {error}")
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
embed = discord.Embed(
title='Error!',
description='An unexpected error ocurred.\
Please report this to the dev.',
)
embed.add_field(
name='Error Message:',
value=f"{type(error)}:\n{error}",
inline=False
)
await ctx.send(f"{error}")
# LABEL: Programming Commands
def insert_returns(body):
# insert return stmt if the last expression is a expression statement
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
# for if statements, we insert returns into the body and the orelse
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
# for with blocks, again we insert returns into the body
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
@bot.command(name='eval')
async def eval_fn(ctx, *, cmd):
"""Evaluates input.
Input is interpreted as newline seperated statements.
If the last statement is an expression, that is the return value.
Usable globals:
- `bot`: the bot instance
- `discord`: the discord module
- `commands`: the discord.ext.commands module
- `ctx`: the invokation context
- `__import__`: the builtin `__import__` function
Such that `>eval 1 + 1` gives `2` as the result.
The following invokation will cause the bot to send the text '9'
to the channel of invokation and return '3' as the result of evaluating
>eval ```
a = 1 + 2
b = a * 2
await ctx.send(a + b)
a
```
"""
if ctx.message.author.id not in [400857098121904149, 733532987794128897]:
await ctx.send("You are not authorized to run this command")
return
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
# add a layer of indentation
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
# wrap in async def body
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
body = parsed.body[0].body
insert_returns(body)
env = {
'bot': ctx.bot,
'discord': discord,
'commands': commands,
'ctx': ctx,
'__import__': __import__
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
result = (await eval(f"{fn_name}()", env))
await ctx.send(result)
# LABEL: Debugging Commands
@bot.command(name="data")
async def data(ctx):
is_owner = await bot.is_owner(ctx.author)
if is_owner or ctx.author.id == 733532987794128897: # for real sparta
data_file = discord.File("data.json")
await ctx.send(file=data_file)
@bot.event
async def on_message(message: discord.Message):
global previous_msg_sender_id
if message.author.bot:
return
author: discord.Member = message.author
channel: discord.TextChannel = message.channel
guild: discord.Guild = message.guild
# print(str(author), ": ", message.content)
await bot.process_commands(message)
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
if message.content.replace('!', '') == bot.user.mention:
pre = data["prefix"]
await channel.send(f"The prefix in this server is `{pre}`")
for afk_user_entry in data["afks"]:
afk_user_id = int(afk_user_entry["user"])
afk_reason = afk_user_entry["reason"]
afk_user = guild.get_member(afk_user_id)
if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:
Data.server_data[str(guild.id)]["afks"].remove(afk_user_entry)
await channel.send(f"**{afk_user}** is no longer AFK.")
elif afk_user in message.mentions:
await channel.send(f"**{afk_user}** is currently AFK because **{afk_reason}**.")
if data["pay_respects"] and message.content.strip().lower() == "f":
await channel.send(f"**{author.display_name}** has paid their respects...")
if data["active"] and str(author.id) not in data["users"]:
if not str(channel.id) in data["channels"]:
perms = author.permissions_in(channel)
if not perms.administrator:
if "http://" in message.content or "https://" in message.content:
if len(data["urls"]) > 0:
for url in data["urls"]:
if not url in message.content:
await channel.purge(limit=1)
msg1 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(2)
await msg1.delete()
else:
await channel.purge(limit=1)
msg2 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(3)
await msg2.delete()
elif len(message.attachments) > 0:
await channel.purge(limit=1)
msg3 = await channel.send(f"{author.mention}, you are not allowed to send attachments in this channel.")
await asyncio.sleep(3)
await msg3.delete()
previous_msg_sender_id = author.id
bot.run(TOKEN)
| await member.add_roles(join_role) | conditional_block |
main.py | import os
import subprocess
import discord
import asyncio
import traceback
import sys
import ast
from discord.ext import commands
# Import Cogs
from cogs.misc import Miscellaneous
from cogs.serversettings import ServerSettings
from cogs.mod import Moderator
from cogs.automod import AutoMod
from cogs.google import Google
# Minigame/Fun Cogs
from cogs.fun import Fun
#from cogs.hangman import Hangman
#from cogs.rps import RockPaperScissors
from otherscipts.helpers import update_presence
from otherscipts.data import Data
TOKEN = os.getenv('SPARTA_TOKEN')
intents = discord.Intents.default()
intents.members = True
def get_prefix(client, message):
if str(message.guild.id) not in Data.server_data:
Data.server_data[str(message.guild.id)] = Data.create_new_data()
data = Data.server_data[str(message.guild.id)]
return data["prefix"]
PREFIX = get_prefix
bot = commands.Bot(
command_prefix=PREFIX,
description="I am Sparta Bot, a bot for the Official Sparta Gaming Discord server.",
intents=intents,
help_command=None,
case_insensitive=True
)
THEME_COLOR = discord.Colour.blue()
# Add Cogs
bot.add_cog(Miscellaneous(bot, THEME_COLOR))
bot.add_cog(ServerSettings(bot, THEME_COLOR))
bot.add_cog(Moderator(bot, THEME_COLOR))
bot.add_cog(AutoMod(bot, THEME_COLOR))
bot.add_cog(Fun(bot, THEME_COLOR))
bot.add_cog(Google(bot, THEME_COLOR))
#bot.add_cog(Hangman(bot, THEME_COLOR))
#bot.add_cog(RockPaperScissors(bot, THEME_COLOR))
previous_msg_sender_id = None
@bot.event
async def on_ready():
bot.loop.create_task(Data.auto_update_data())
bot.loop.create_task(update_presence(bot, PREFIX))
print("Bot is ready...")
@bot.event
async def on_guild_join(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Joined - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_guild_remove(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Left - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_member_join(member):
guild: discord.Guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has joined {guild} server...")
join_role = guild.get_role(data["join_role"])
if join_role is not None:
await member.add_roles(join_role)
# Welcome Message
if data["welcome_msg"] is None:
server_wlcm_msg = f"Welcome, {member.mention}, to the Official **{guild.name}** Server"
else:
server_wlcm_msg = data["welcome_msg"]
server_wlcm_msg = server_wlcm_msg.replace(
"[mention]", f"{member.mention}")
# Welcome Channel
wel_channel = None
if data["welcome_channel"] is None:
for channel in channels:
if str(channel).find("welcome") != -1:
wel_channel = channel
break
else:
wel_channel = guild.get_channel(int(data["welcome_channel"]))
try:
await wel_channel.send(server_wlcm_msg)
except AttributeError:
print("DEBUG: No welcome channel has been set or found.")
#Remove welcome channel
@bot.command(name="remove_welcome", aliases=['rwel', 'remwel'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome(ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["welcome_channel"] = channel
await ctx.send("This server's welcome channel has been removed")
@bot.event
async def on_member_remove(member):
guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has left the {guild.name}...")
# Leave Message
if data["leave_msg"] is None:
server_leave_msg = f"Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server"
else:
server_leave_msg = data["leave_msg"]
server_leave_msg = server_leave_msg.replace("[member]", f"{member}")
# Leave Channel
lv_channel = None
if data["leave_channel"] is None:
for channel in channels:
if str(channel).find("bye") != -1 or str(channel).find("leave") != -1:
lv_channel = channel
break
else:
lv_channel = guild.get_channel(int(data["leave_channel"]))
try:
await lv_channel.send(server_leave_msg)
except AttributeError:
print("DEBUG: No leave channel has been set or found.")
#Remove leave
@bot.command(name="remove_leave", aliases=['rleave', 'remleave'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome( ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["leave_channel"] = channel
await ctx.send("This server's leave channel has been Removed")
@bot.event
async def on_command_error(ctx, error):
|
# LABEL: Programming Commands
def insert_returns(body):
# insert return stmt if the last expression is a expression statement
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
# for if statements, we insert returns into the body and the orelse
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
# for with blocks, again we insert returns into the body
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
@bot.command(name='eval')
async def eval_fn(ctx, *, cmd):
"""Evaluates input.
Input is interpreted as newline seperated statements.
If the last statement is an expression, that is the return value.
Usable globals:
- `bot`: the bot instance
- `discord`: the discord module
- `commands`: the discord.ext.commands module
- `ctx`: the invokation context
- `__import__`: the builtin `__import__` function
Such that `>eval 1 + 1` gives `2` as the result.
The following invokation will cause the bot to send the text '9'
to the channel of invokation and return '3' as the result of evaluating
>eval ```
a = 1 + 2
b = a * 2
await ctx.send(a + b)
a
```
"""
if ctx.message.author.id not in [400857098121904149, 733532987794128897]:
await ctx.send("You are not authorized to run this command")
return
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
# add a layer of indentation
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
# wrap in async def body
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
body = parsed.body[0].body
insert_returns(body)
env = {
'bot': ctx.bot,
'discord': discord,
'commands': commands,
'ctx': ctx,
'__import__': __import__
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
result = (await eval(f"{fn_name}()", env))
await ctx.send(result)
# LABEL: Debugging Commands
@bot.command(name="data")
async def data(ctx):
is_owner = await bot.is_owner(ctx.author)
if is_owner or ctx.author.id == 733532987794128897: # for real sparta
data_file = discord.File("data.json")
await ctx.send(file=data_file)
@bot.event
async def on_message(message: discord.Message):
global previous_msg_sender_id
if message.author.bot:
return
author: discord.Member = message.author
channel: discord.TextChannel = message.channel
guild: discord.Guild = message.guild
# print(str(author), ": ", message.content)
await bot.process_commands(message)
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
if message.content.replace('!', '') == bot.user.mention:
pre = data["prefix"]
await channel.send(f"The prefix in this server is `{pre}`")
for afk_user_entry in data["afks"]:
afk_user_id = int(afk_user_entry["user"])
afk_reason = afk_user_entry["reason"]
afk_user = guild.get_member(afk_user_id)
if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:
Data.server_data[str(guild.id)]["afks"].remove(afk_user_entry)
await channel.send(f"**{afk_user}** is no longer AFK.")
elif afk_user in message.mentions:
await channel.send(f"**{afk_user}** is currently AFK because **{afk_reason}**.")
if data["pay_respects"] and message.content.strip().lower() == "f":
await channel.send(f"**{author.display_name}** has paid their respects...")
if data["active"] and str(author.id) not in data["users"]:
if not str(channel.id) in data["channels"]:
perms = author.permissions_in(channel)
if not perms.administrator:
if "http://" in message.content or "https://" in message.content:
if len(data["urls"]) > 0:
for url in data["urls"]:
if not url in message.content:
await channel.purge(limit=1)
msg1 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(2)
await msg1.delete()
else:
await channel.purge(limit=1)
msg2 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(3)
await msg2.delete()
elif len(message.attachments) > 0:
await channel.purge(limit=1)
msg3 = await channel.send(f"{author.mention}, you are not allowed to send attachments in this channel.")
await asyncio.sleep(3)
await msg3.delete()
previous_msg_sender_id = author.id
bot.run(TOKEN)
| try:
error = error.original
except Exception:
pass
if type(error) is discord.ext.commands.errors.CommandNotFound:
return
elif type(error) is discord.ext.commands.errors.BadArgument:
pass
elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:
pass
elif type(error) is discord.ext.commands.errors.NoPrivateMessage:
pass
elif type(error) is discord.ext.commands.errors.MissingPermissions:
pass
elif type(error) is discord.ext.commands.errors.NotOwner:
pass
elif type(error) is discord.ext.commands.errors.CommandOnCooldown:
pass
elif type(error) is discord.ext.commands.errors.ChannelNotFound:
pass
elif type(error) is discord.ext.commands.errors.BadUnionArgument:
pass
elif type(error) is discord.ext.commands.errors.BotMissingPermissions:
pass
elif type(error) is discord.errors.Forbidden:
error = "I don't have permission to do that!"
else:
print(f"Error {type(error)}: {error}")
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
embed = discord.Embed(
title='Error!',
description='An unexpected error ocurred.\
Please report this to the dev.',
)
embed.add_field(
name='Error Message:',
value=f"{type(error)}:\n{error}",
inline=False
)
await ctx.send(f"{error}") | identifier_body |
main.py | import os
import subprocess
import discord
import asyncio
import traceback
import sys
import ast
from discord.ext import commands
# Import Cogs
from cogs.misc import Miscellaneous
from cogs.serversettings import ServerSettings
from cogs.mod import Moderator
from cogs.automod import AutoMod
from cogs.google import Google
# Minigame/Fun Cogs
from cogs.fun import Fun
#from cogs.hangman import Hangman
#from cogs.rps import RockPaperScissors
from otherscipts.helpers import update_presence
from otherscipts.data import Data
TOKEN = os.getenv('SPARTA_TOKEN')
intents = discord.Intents.default()
intents.members = True
def get_prefix(client, message):
if str(message.guild.id) not in Data.server_data:
Data.server_data[str(message.guild.id)] = Data.create_new_data()
data = Data.server_data[str(message.guild.id)]
return data["prefix"]
PREFIX = get_prefix
bot = commands.Bot(
command_prefix=PREFIX,
description="I am Sparta Bot, a bot for the Official Sparta Gaming Discord server.",
intents=intents,
help_command=None,
case_insensitive=True
)
THEME_COLOR = discord.Colour.blue()
# Add Cogs
bot.add_cog(Miscellaneous(bot, THEME_COLOR))
bot.add_cog(ServerSettings(bot, THEME_COLOR))
bot.add_cog(Moderator(bot, THEME_COLOR))
bot.add_cog(AutoMod(bot, THEME_COLOR))
bot.add_cog(Fun(bot, THEME_COLOR))
bot.add_cog(Google(bot, THEME_COLOR))
#bot.add_cog(Hangman(bot, THEME_COLOR))
#bot.add_cog(RockPaperScissors(bot, THEME_COLOR))
previous_msg_sender_id = None
@bot.event
async def on_ready():
bot.loop.create_task(Data.auto_update_data())
bot.loop.create_task(update_presence(bot, PREFIX))
print("Bot is ready...")
@bot.event
async def on_guild_join(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Joined - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_guild_remove(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Left - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def | (member):
guild: discord.Guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has joined {guild} server...")
join_role = guild.get_role(data["join_role"])
if join_role is not None:
await member.add_roles(join_role)
# Welcome Message
if data["welcome_msg"] is None:
server_wlcm_msg = f"Welcome, {member.mention}, to the Official **{guild.name}** Server"
else:
server_wlcm_msg = data["welcome_msg"]
server_wlcm_msg = server_wlcm_msg.replace(
"[mention]", f"{member.mention}")
# Welcome Channel
wel_channel = None
if data["welcome_channel"] is None:
for channel in channels:
if str(channel).find("welcome") != -1:
wel_channel = channel
break
else:
wel_channel = guild.get_channel(int(data["welcome_channel"]))
try:
await wel_channel.send(server_wlcm_msg)
except AttributeError:
print("DEBUG: No welcome channel has been set or found.")
#Remove welcome channel
@bot.command(name="remove_welcome", aliases=['rwel', 'remwel'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome(ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["welcome_channel"] = channel
await ctx.send("This server's welcome channel has been removed")
@bot.event
async def on_member_remove(member):
guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has left the {guild.name}...")
# Leave Message
if data["leave_msg"] is None:
server_leave_msg = f"Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server"
else:
server_leave_msg = data["leave_msg"]
server_leave_msg = server_leave_msg.replace("[member]", f"{member}")
# Leave Channel
lv_channel = None
if data["leave_channel"] is None:
for channel in channels:
if str(channel).find("bye") != -1 or str(channel).find("leave") != -1:
lv_channel = channel
break
else:
lv_channel = guild.get_channel(int(data["leave_channel"]))
try:
await lv_channel.send(server_leave_msg)
except AttributeError:
print("DEBUG: No leave channel has been set or found.")
#Remove leave
@bot.command(name="remove_leave", aliases=['rleave', 'remleave'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome( ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["leave_channel"] = channel
await ctx.send("This server's leave channel has been Removed")
@bot.event
async def on_command_error(ctx, error):
try:
error = error.original
except Exception:
pass
if type(error) is discord.ext.commands.errors.CommandNotFound:
return
elif type(error) is discord.ext.commands.errors.BadArgument:
pass
elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:
pass
elif type(error) is discord.ext.commands.errors.NoPrivateMessage:
pass
elif type(error) is discord.ext.commands.errors.MissingPermissions:
pass
elif type(error) is discord.ext.commands.errors.NotOwner:
pass
elif type(error) is discord.ext.commands.errors.CommandOnCooldown:
pass
elif type(error) is discord.ext.commands.errors.ChannelNotFound:
pass
elif type(error) is discord.ext.commands.errors.BadUnionArgument:
pass
elif type(error) is discord.ext.commands.errors.BotMissingPermissions:
pass
elif type(error) is discord.errors.Forbidden:
error = "I don't have permission to do that!"
else:
print(f"Error {type(error)}: {error}")
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
embed = discord.Embed(
title='Error!',
description='An unexpected error ocurred.\
Please report this to the dev.',
)
embed.add_field(
name='Error Message:',
value=f"{type(error)}:\n{error}",
inline=False
)
await ctx.send(f"{error}")
# LABEL: Programming Commands
def insert_returns(body):
# insert return stmt if the last expression is a expression statement
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
# for if statements, we insert returns into the body and the orelse
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
# for with blocks, again we insert returns into the body
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
@bot.command(name='eval')
async def eval_fn(ctx, *, cmd):
"""Evaluates input.
Input is interpreted as newline seperated statements.
If the last statement is an expression, that is the return value.
Usable globals:
- `bot`: the bot instance
- `discord`: the discord module
- `commands`: the discord.ext.commands module
- `ctx`: the invokation context
- `__import__`: the builtin `__import__` function
Such that `>eval 1 + 1` gives `2` as the result.
The following invokation will cause the bot to send the text '9'
to the channel of invokation and return '3' as the result of evaluating
>eval ```
a = 1 + 2
b = a * 2
await ctx.send(a + b)
a
```
"""
if ctx.message.author.id not in [400857098121904149, 733532987794128897]:
await ctx.send("You are not authorized to run this command")
return
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
# add a layer of indentation
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
# wrap in async def body
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
body = parsed.body[0].body
insert_returns(body)
env = {
'bot': ctx.bot,
'discord': discord,
'commands': commands,
'ctx': ctx,
'__import__': __import__
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
result = (await eval(f"{fn_name}()", env))
await ctx.send(result)
# LABEL: Debugging Commands
@bot.command(name="data")
async def data(ctx):
is_owner = await bot.is_owner(ctx.author)
if is_owner or ctx.author.id == 733532987794128897: # for real sparta
data_file = discord.File("data.json")
await ctx.send(file=data_file)
@bot.event
async def on_message(message: discord.Message):
global previous_msg_sender_id
if message.author.bot:
return
author: discord.Member = message.author
channel: discord.TextChannel = message.channel
guild: discord.Guild = message.guild
# print(str(author), ": ", message.content)
await bot.process_commands(message)
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
if message.content.replace('!', '') == bot.user.mention:
pre = data["prefix"]
await channel.send(f"The prefix in this server is `{pre}`")
for afk_user_entry in data["afks"]:
afk_user_id = int(afk_user_entry["user"])
afk_reason = afk_user_entry["reason"]
afk_user = guild.get_member(afk_user_id)
if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:
Data.server_data[str(guild.id)]["afks"].remove(afk_user_entry)
await channel.send(f"**{afk_user}** is no longer AFK.")
elif afk_user in message.mentions:
await channel.send(f"**{afk_user}** is currently AFK because **{afk_reason}**.")
if data["pay_respects"] and message.content.strip().lower() == "f":
await channel.send(f"**{author.display_name}** has paid their respects...")
if data["active"] and str(author.id) not in data["users"]:
if not str(channel.id) in data["channels"]:
perms = author.permissions_in(channel)
if not perms.administrator:
if "http://" in message.content or "https://" in message.content:
if len(data["urls"]) > 0:
for url in data["urls"]:
if not url in message.content:
await channel.purge(limit=1)
msg1 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(2)
await msg1.delete()
else:
await channel.purge(limit=1)
msg2 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(3)
await msg2.delete()
elif len(message.attachments) > 0:
await channel.purge(limit=1)
msg3 = await channel.send(f"{author.mention}, you are not allowed to send attachments in this channel.")
await asyncio.sleep(3)
await msg3.delete()
previous_msg_sender_id = author.id
bot.run(TOKEN)
| on_member_join | identifier_name |
lib.rs | use counters::flavors::{Counter, CounterType};
use counters::Counters;
use crossbeam_queue::ArrayQueue;
use log::Logger;
use packet::BoxPkt;
use packet::PacketPool;
use perf::Perf;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
// We preallocate space for these many graph nodes, of course it can grow beyond that,
// but the goal is as much as possible to pre-allocate space
const GRAPH_INIT_SZ: usize = 1024;
/// The size of the packet queue to each graph node. Beyond this, packets to that node
/// will get dropped
pub const VEC_SIZE: usize = 256;
pub trait Driver: Sync {
fn fd(&self) -> Option<i32>;
fn sendmsg(&mut self, pool: &mut dyn PacketPool, pkt: BoxPkt) -> usize;
fn recvmsg(&mut self, pool: &mut dyn PacketPool, headroom: usize) -> Option<BoxPkt>;
}
/// Every graph node feature/client needs to implement these methods/APIs
pub trait Gclient<T>: Send {
/// Make a clone() of the node, usually to be used in another thread. It is upto the
/// client to decide what should be cloned/copied and what should be shared. For example,
/// counters are always per thread and cant be shared, a new set of counters need to be
/// made per thread
fn clone(&self, _counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>>;
/// This API is called to hand over packets to the client for processing. Dispatch has
/// pop() API to get packets destined for the node, and push() API to push packets to
/// other graph nodes
fn dispatch(&mut self, _thread: usize, _vectors: &mut Dispatch);
/// This API is called when a node gets a message from control plane, like for example
/// to modify the nodes forwarding tables etc..
fn control_msg(&mut self, _thread: usize, _message: T) {}
}
/// This structure provides methods to get packets queued up for a node, and for
/// the node to queue up packets to other nodes
pub struct Dispatch<'d> {
node: usize,
pub pool: &'d mut dyn PacketPool,
vectors: &'d mut Vec<VecDeque<BoxPkt>>,
counters: &'d mut Vec<GnodeCntrs>,
nodes: &'d Vec<usize>,
work: bool,
wakeup: usize,
}
impl<'d> Dispatch<'d> {
/// Get one of the packets queued up for a node
pub fn pop(&mut self) -> Option<BoxPkt> |
/// Queue one packet to another node
pub fn push(&mut self, node: usize, pkt: BoxPkt) -> bool {
let node = self.nodes[node];
if self.vectors[node].capacity() >= 1 {
self.vectors[node].push_back(pkt);
if node <= self.node {
self.work = true;
self.wakeup = 0;
}
self.counters[node].enqed.incr();
true
} else {
self.counters[node].drops.incr();
false
}
}
/// Specify the time when this node has work again/needs to be scheduled again
/// wakeup of zero means it has work right now, non zero wakeup indicates time
/// in nanoseconds from now when the node has work
pub fn wakeup(&mut self, wakeup: usize) {
if self.work {
if wakeup < self.wakeup {
self.wakeup = wakeup;
}
} else {
self.work = true;
self.wakeup = wakeup;
}
}
}
/// The parameters each feature/client node needs to specify if it wants to be added
/// to the graph
pub struct GnodeInit {
/// A unique name for the node
pub name: String,
/// Names of all the nodes this node will have edges to (ie will send packets to)
pub next_names: Vec<String>,
/// A set of generic counters that tracks the node's enqueue/dequeue/drops etc..
pub cntrs: GnodeCntrs,
pub perf: Perf,
}
impl GnodeInit {
pub fn clone(&self, counters: &mut Counters) -> GnodeInit {
GnodeInit {
name: self.name.clone(),
next_names: self.next_names.clone(),
cntrs: GnodeCntrs::new(&self.name, counters),
perf: Perf::new(&self.name, counters),
}
}
}
pub struct GnodeCntrs {
enqed: Counter,
drops: Counter,
}
impl GnodeCntrs {
pub fn new(name: &str, counters: &mut Counters) -> GnodeCntrs {
let enqed = Counter::new(counters, name, CounterType::Pkts, "GraphEnq");
let drops = Counter::new(counters, name, CounterType::Error, "GraphDrop");
GnodeCntrs { enqed, drops }
}
}
// The Gnode structure holds the exact node feature/client object and some metadata
// associated with the client
struct Gnode<T> {
// The feature/client object
client: Box<dyn Gclient<T>>,
// Name of the feature/client
name: String,
// Names of all the nodes this node will have edges to (ie will send packets to)
next_names: Vec<String>,
// Node ids corresponding to the names in next_names
next_nodes: Vec<usize>,
}
impl<T> Gnode<T> {
fn new(client: Box<dyn Gclient<T>>, name: String, next_names: Vec<String>) -> Self {
Gnode {
client,
name,
next_names,
next_nodes: Vec::new(),
}
}
fn clone(&self, counters: &mut Counters, log: Arc<Logger>) -> Self {
Gnode {
client: self.client.clone(counters, log),
name: self.name.clone(),
next_names: self.next_names.clone(),
next_nodes: self.next_nodes.clone(),
}
}
}
// The Graph object, basically a collection of graph nodes and edges from node to node
// Usually there is one Graph per thread, the graphs in each thread are copies of each other
pub struct Graph<T> {
// The thread this graph belongs to
thread: usize,
// The graph nodes
nodes: Vec<Gnode<T>>,
// Graph node performance info
perf: Vec<Perf>,
// A per node packet queue, to hold packets from other nodes to this node
vectors: Vec<VecDeque<BoxPkt>>,
// Generic enq/deq/drop counters per node
counters: Vec<GnodeCntrs>,
// Each graph node has an index which is an offset into the nodes Vec in this structure.
// This hashmap provides a mapping from a graph node name to its index
indices: HashMap<String, usize>,
// Packet/Particle pool
pool: Box<dyn PacketPool>,
// Freed packets are queued here
queue: Arc<ArrayQueue<BoxPkt>>,
}
impl<T> Graph<T> {
/// A new graph is created with just one node in it, a Drop Node that just drops any packet
/// it receives.
pub fn new(
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
) -> Self {
let mut g = Graph {
thread,
nodes: Vec::with_capacity(GRAPH_INIT_SZ),
perf: Vec::with_capacity(GRAPH_INIT_SZ),
vectors: Vec::with_capacity(GRAPH_INIT_SZ),
counters: Vec::with_capacity(GRAPH_INIT_SZ),
indices: HashMap::with_capacity(GRAPH_INIT_SZ),
pool,
queue,
};
let init = GnodeInit {
name: names::DROP.to_string(),
next_names: vec![],
cntrs: GnodeCntrs::new(names::DROP, counters),
perf: Perf::new(names::DROP, counters),
};
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
g.add(Box::new(DropNode { count }), init);
g
}
/// Clone the entire graph. That relies on each graph node feature/client providing
/// an ability to clone() itself
pub fn clone(
&self,
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
log: Arc<Logger>,
) -> Self {
let mut nodes = Vec::with_capacity(GRAPH_INIT_SZ);
let mut perf = Vec::with_capacity(GRAPH_INIT_SZ);
let mut vectors = Vec::with_capacity(GRAPH_INIT_SZ);
let mut cntrs = Vec::with_capacity(GRAPH_INIT_SZ);
for n in self.nodes.iter() {
nodes.push(n.clone(counters, log.clone()));
perf.push(Perf::new(&n.name, counters));
vectors.push(VecDeque::with_capacity(VEC_SIZE));
cntrs.push(GnodeCntrs::new(&n.name, counters));
}
Graph {
thread,
nodes,
perf,
vectors,
counters: cntrs,
indices: self.indices.clone(),
pool,
queue,
}
}
/// Add a new feature/client node to the graph.
pub fn add(&mut self, client: Box<dyn Gclient<T>>, init: GnodeInit) {
let index = self.index(&init.name);
if index != 0 {
return; // Gclient already registered
}
self.nodes
.push(Gnode::new(client, init.name.clone(), init.next_names));
self.perf.push(init.perf);
self.vectors.push(VecDeque::with_capacity(VEC_SIZE));
self.counters.push(init.cntrs);
let index = self.nodes.len() - 1; // 0 based index
self.indices.insert(init.name, index);
}
fn index(&self, name: &str) -> usize {
if let Some(&index) = self.indices.get(name) {
index
} else {
0
}
}
/// Any time a new node is added to the graph, there might be other nodes that have
/// specified this new node as their next node - so we have to resolve those names
/// to a proper node index. The finalize() will walk through all nodes and resolve
/// next_name to node index. This is typically called after a new node is added
pub fn finalize(&mut self) {
for n in 0..self.nodes.len() {
let node = &self.nodes[n];
for l in 0..node.next_names.len() {
let node = &self.nodes[n];
let index = self.index(&node.next_names[l]);
let node = &mut self.nodes[n];
if node.next_nodes.len() <= l {
node.next_nodes.resize(l + 1, 0);
}
node.next_nodes[l] = index;
}
}
}
// Run through all the nodes one single time, do whatever work is possible in that
// iteration, and return values which say if more work is pending and at what time
// the work has to be done
pub fn run(&mut self) -> (bool, usize) {
// First return all the free packets back to the pool
while let Ok(p) = self.queue.pop() {
self.pool.free(p);
}
let mut nsecs = std::usize::MAX;
let mut work = false;
for n in 0..self.nodes.len() {
let node = &mut self.nodes[n];
let client = &mut node.client;
let mut d = Dispatch {
node: n,
pool: &mut *self.pool,
vectors: &mut self.vectors,
counters: &mut self.counters,
nodes: &node.next_nodes,
work: false,
wakeup: std::usize::MAX,
};
self.perf[n].start();
client.dispatch(self.thread, &mut d);
self.perf[n].stop();
// Does client have more work pending, and when does it need to do that work ?
if d.work {
work = true;
if d.wakeup < nsecs {
nsecs = d.wakeup;
}
}
}
(work, nsecs)
}
pub fn control_msg(&mut self, name: &str, message: T) -> bool {
let index = self.index(name);
if index == 0 {
false
} else {
self.nodes[index].client.control_msg(self.thread, message);
true
}
}
}
struct DropNode {
count: Counter,
}
impl<T> Gclient<T> for DropNode {
fn clone(&self, counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>> {
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
Box::new(DropNode { count })
}
fn dispatch(&mut self, _thread: usize, vectors: &mut Dispatch) {
while let Some(_) = vectors.pop() {
self.count.incr();
}
}
}
#[cfg(test)]
mod test;
| {
self.vectors[self.node].pop_front()
} | identifier_body |
lib.rs | use counters::flavors::{Counter, CounterType};
use counters::Counters;
use crossbeam_queue::ArrayQueue;
use log::Logger;
use packet::BoxPkt;
use packet::PacketPool;
use perf::Perf;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
// We preallocate space for these many graph nodes, of course it can grow beyond that,
// but the goal is as much as possible to pre-allocate space
const GRAPH_INIT_SZ: usize = 1024;
/// The size of the packet queue to each graph node. Beyond this, packets to that node
/// will get dropped
pub const VEC_SIZE: usize = 256;
pub trait Driver: Sync {
fn fd(&self) -> Option<i32>;
fn sendmsg(&mut self, pool: &mut dyn PacketPool, pkt: BoxPkt) -> usize;
fn recvmsg(&mut self, pool: &mut dyn PacketPool, headroom: usize) -> Option<BoxPkt>;
}
/// Every graph node feature/client needs to implement these methods/APIs
pub trait Gclient<T>: Send {
/// Make a clone() of the node, usually to be used in another thread. It is upto the
/// client to decide what should be cloned/copied and what should be shared. For example,
/// counters are always per thread and cant be shared, a new set of counters need to be
/// made per thread
fn clone(&self, _counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>>;
/// This API is called to hand over packets to the client for processing. Dispatch has
/// pop() API to get packets destined for the node, and push() API to push packets to
/// other graph nodes
fn dispatch(&mut self, _thread: usize, _vectors: &mut Dispatch);
/// This API is called when a node gets a message from control plane, like for example
/// to modify the nodes forwarding tables etc..
fn control_msg(&mut self, _thread: usize, _message: T) {}
}
/// This structure provides methods to get packets queued up for a node, and for
/// the node to queue up packets to other nodes
pub struct Dispatch<'d> {
node: usize,
pub pool: &'d mut dyn PacketPool,
vectors: &'d mut Vec<VecDeque<BoxPkt>>,
counters: &'d mut Vec<GnodeCntrs>,
nodes: &'d Vec<usize>,
work: bool,
wakeup: usize,
}
impl<'d> Dispatch<'d> {
/// Get one of the packets queued up for a node
pub fn pop(&mut self) -> Option<BoxPkt> {
self.vectors[self.node].pop_front()
}
/// Queue one packet to another node
pub fn push(&mut self, node: usize, pkt: BoxPkt) -> bool {
let node = self.nodes[node];
if self.vectors[node].capacity() >= 1 {
self.vectors[node].push_back(pkt);
if node <= self.node {
self.work = true;
self.wakeup = 0;
}
self.counters[node].enqed.incr();
true
} else {
self.counters[node].drops.incr();
false
}
}
/// Specify the time when this node has work again/needs to be scheduled again
/// wakeup of zero means it has work right now, non zero wakeup indicates time
/// in nanoseconds from now when the node has work
pub fn wakeup(&mut self, wakeup: usize) {
if self.work | else {
self.work = true;
self.wakeup = wakeup;
}
}
}
/// The parameters each feature/client node needs to specify if it wants to be added
/// to the graph
pub struct GnodeInit {
/// A unique name for the node
pub name: String,
/// Names of all the nodes this node will have edges to (ie will send packets to)
pub next_names: Vec<String>,
/// A set of generic counters that tracks the node's enqueue/dequeue/drops etc..
pub cntrs: GnodeCntrs,
pub perf: Perf,
}
impl GnodeInit {
pub fn clone(&self, counters: &mut Counters) -> GnodeInit {
GnodeInit {
name: self.name.clone(),
next_names: self.next_names.clone(),
cntrs: GnodeCntrs::new(&self.name, counters),
perf: Perf::new(&self.name, counters),
}
}
}
pub struct GnodeCntrs {
enqed: Counter,
drops: Counter,
}
impl GnodeCntrs {
pub fn new(name: &str, counters: &mut Counters) -> GnodeCntrs {
let enqed = Counter::new(counters, name, CounterType::Pkts, "GraphEnq");
let drops = Counter::new(counters, name, CounterType::Error, "GraphDrop");
GnodeCntrs { enqed, drops }
}
}
// The Gnode structure holds the exact node feature/client object and some metadata
// associated with the client
struct Gnode<T> {
// The feature/client object
client: Box<dyn Gclient<T>>,
// Name of the feature/client
name: String,
// Names of all the nodes this node will have edges to (ie will send packets to)
next_names: Vec<String>,
// Node ids corresponding to the names in next_names
next_nodes: Vec<usize>,
}
impl<T> Gnode<T> {
fn new(client: Box<dyn Gclient<T>>, name: String, next_names: Vec<String>) -> Self {
Gnode {
client,
name,
next_names,
next_nodes: Vec::new(),
}
}
fn clone(&self, counters: &mut Counters, log: Arc<Logger>) -> Self {
Gnode {
client: self.client.clone(counters, log),
name: self.name.clone(),
next_names: self.next_names.clone(),
next_nodes: self.next_nodes.clone(),
}
}
}
// The Graph object, basically a collection of graph nodes and edges from node to node
// Usually there is one Graph per thread, the graphs in each thread are copies of each other
pub struct Graph<T> {
// The thread this graph belongs to
thread: usize,
// The graph nodes
nodes: Vec<Gnode<T>>,
// Graph node performance info
perf: Vec<Perf>,
// A per node packet queue, to hold packets from other nodes to this node
vectors: Vec<VecDeque<BoxPkt>>,
// Generic enq/deq/drop counters per node
counters: Vec<GnodeCntrs>,
// Each graph node has an index which is an offset into the nodes Vec in this structure.
// This hashmap provides a mapping from a graph node name to its index
indices: HashMap<String, usize>,
// Packet/Particle pool
pool: Box<dyn PacketPool>,
// Freed packets are queued here
queue: Arc<ArrayQueue<BoxPkt>>,
}
impl<T> Graph<T> {
/// A new graph is created with just one node in it, a Drop Node that just drops any packet
/// it receives.
pub fn new(
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
) -> Self {
let mut g = Graph {
thread,
nodes: Vec::with_capacity(GRAPH_INIT_SZ),
perf: Vec::with_capacity(GRAPH_INIT_SZ),
vectors: Vec::with_capacity(GRAPH_INIT_SZ),
counters: Vec::with_capacity(GRAPH_INIT_SZ),
indices: HashMap::with_capacity(GRAPH_INIT_SZ),
pool,
queue,
};
let init = GnodeInit {
name: names::DROP.to_string(),
next_names: vec![],
cntrs: GnodeCntrs::new(names::DROP, counters),
perf: Perf::new(names::DROP, counters),
};
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
g.add(Box::new(DropNode { count }), init);
g
}
/// Clone the entire graph. That relies on each graph node feature/client providing
/// an ability to clone() itself
pub fn clone(
&self,
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
log: Arc<Logger>,
) -> Self {
let mut nodes = Vec::with_capacity(GRAPH_INIT_SZ);
let mut perf = Vec::with_capacity(GRAPH_INIT_SZ);
let mut vectors = Vec::with_capacity(GRAPH_INIT_SZ);
let mut cntrs = Vec::with_capacity(GRAPH_INIT_SZ);
for n in self.nodes.iter() {
nodes.push(n.clone(counters, log.clone()));
perf.push(Perf::new(&n.name, counters));
vectors.push(VecDeque::with_capacity(VEC_SIZE));
cntrs.push(GnodeCntrs::new(&n.name, counters));
}
Graph {
thread,
nodes,
perf,
vectors,
counters: cntrs,
indices: self.indices.clone(),
pool,
queue,
}
}
/// Add a new feature/client node to the graph.
pub fn add(&mut self, client: Box<dyn Gclient<T>>, init: GnodeInit) {
let index = self.index(&init.name);
if index != 0 {
return; // Gclient already registered
}
self.nodes
.push(Gnode::new(client, init.name.clone(), init.next_names));
self.perf.push(init.perf);
self.vectors.push(VecDeque::with_capacity(VEC_SIZE));
self.counters.push(init.cntrs);
let index = self.nodes.len() - 1; // 0 based index
self.indices.insert(init.name, index);
}
fn index(&self, name: &str) -> usize {
if let Some(&index) = self.indices.get(name) {
index
} else {
0
}
}
/// Any time a new node is added to the graph, there might be other nodes that have
/// specified this new node as their next node - so we have to resolve those names
/// to a proper node index. The finalize() will walk through all nodes and resolve
/// next_name to node index. This is typically called after a new node is added
pub fn finalize(&mut self) {
for n in 0..self.nodes.len() {
let node = &self.nodes[n];
for l in 0..node.next_names.len() {
let node = &self.nodes[n];
let index = self.index(&node.next_names[l]);
let node = &mut self.nodes[n];
if node.next_nodes.len() <= l {
node.next_nodes.resize(l + 1, 0);
}
node.next_nodes[l] = index;
}
}
}
// Run through all the nodes one single time, do whatever work is possible in that
// iteration, and return values which say if more work is pending and at what time
// the work has to be done
pub fn run(&mut self) -> (bool, usize) {
// First return all the free packets back to the pool
while let Ok(p) = self.queue.pop() {
self.pool.free(p);
}
let mut nsecs = std::usize::MAX;
let mut work = false;
for n in 0..self.nodes.len() {
let node = &mut self.nodes[n];
let client = &mut node.client;
let mut d = Dispatch {
node: n,
pool: &mut *self.pool,
vectors: &mut self.vectors,
counters: &mut self.counters,
nodes: &node.next_nodes,
work: false,
wakeup: std::usize::MAX,
};
self.perf[n].start();
client.dispatch(self.thread, &mut d);
self.perf[n].stop();
// Does client have more work pending, and when does it need to do that work ?
if d.work {
work = true;
if d.wakeup < nsecs {
nsecs = d.wakeup;
}
}
}
(work, nsecs)
}
pub fn control_msg(&mut self, name: &str, message: T) -> bool {
let index = self.index(name);
if index == 0 {
false
} else {
self.nodes[index].client.control_msg(self.thread, message);
true
}
}
}
struct DropNode {
count: Counter,
}
impl<T> Gclient<T> for DropNode {
fn clone(&self, counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>> {
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
Box::new(DropNode { count })
}
fn dispatch(&mut self, _thread: usize, vectors: &mut Dispatch) {
while let Some(_) = vectors.pop() {
self.count.incr();
}
}
}
#[cfg(test)]
mod test;
| {
if wakeup < self.wakeup {
self.wakeup = wakeup;
}
} | conditional_block |
lib.rs | use counters::flavors::{Counter, CounterType};
use counters::Counters;
use crossbeam_queue::ArrayQueue;
use log::Logger;
use packet::BoxPkt;
use packet::PacketPool;
use perf::Perf;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
// We preallocate space for these many graph nodes, of course it can grow beyond that,
// but the goal is as much as possible to pre-allocate space
const GRAPH_INIT_SZ: usize = 1024;
/// The size of the packet queue to each graph node. Beyond this, packets to that node
/// will get dropped
pub const VEC_SIZE: usize = 256;
pub trait Driver: Sync {
fn fd(&self) -> Option<i32>;
fn sendmsg(&mut self, pool: &mut dyn PacketPool, pkt: BoxPkt) -> usize;
fn recvmsg(&mut self, pool: &mut dyn PacketPool, headroom: usize) -> Option<BoxPkt>;
}
/// Every graph node feature/client needs to implement these methods/APIs
pub trait Gclient<T>: Send {
/// Make a clone() of the node, usually to be used in another thread. It is upto the
/// client to decide what should be cloned/copied and what should be shared. For example,
/// counters are always per thread and cant be shared, a new set of counters need to be
/// made per thread
fn clone(&self, _counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>>;
/// This API is called to hand over packets to the client for processing. Dispatch has
/// pop() API to get packets destined for the node, and push() API to push packets to
/// other graph nodes
fn dispatch(&mut self, _thread: usize, _vectors: &mut Dispatch);
/// This API is called when a node gets a message from control plane, like for example
/// to modify the nodes forwarding tables etc..
fn control_msg(&mut self, _thread: usize, _message: T) {}
}
/// This structure provides methods to get packets queued up for a node, and for
/// the node to queue up packets to other nodes
pub struct Dispatch<'d> {
node: usize,
pub pool: &'d mut dyn PacketPool,
vectors: &'d mut Vec<VecDeque<BoxPkt>>,
counters: &'d mut Vec<GnodeCntrs>,
nodes: &'d Vec<usize>,
work: bool,
wakeup: usize,
}
impl<'d> Dispatch<'d> {
/// Get one of the packets queued up for a node
pub fn pop(&mut self) -> Option<BoxPkt> {
self.vectors[self.node].pop_front()
}
/// Queue one packet to another node
pub fn push(&mut self, node: usize, pkt: BoxPkt) -> bool {
let node = self.nodes[node];
if self.vectors[node].capacity() >= 1 {
self.vectors[node].push_back(pkt);
if node <= self.node {
self.work = true;
self.wakeup = 0;
}
self.counters[node].enqed.incr();
true
} else {
self.counters[node].drops.incr();
false
}
}
/// Specify the time when this node has work again/needs to be scheduled again
/// wakeup of zero means it has work right now, non zero wakeup indicates time
/// in nanoseconds from now when the node has work
pub fn wakeup(&mut self, wakeup: usize) {
if self.work {
if wakeup < self.wakeup {
self.wakeup = wakeup;
}
} else {
self.work = true;
self.wakeup = wakeup;
}
}
}
/// The parameters each feature/client node needs to specify if it wants to be added
/// to the graph
pub struct GnodeInit {
/// A unique name for the node
pub name: String,
/// Names of all the nodes this node will have edges to (ie will send packets to)
pub next_names: Vec<String>,
/// A set of generic counters that tracks the node's enqueue/dequeue/drops etc..
pub cntrs: GnodeCntrs,
pub perf: Perf,
}
impl GnodeInit {
pub fn clone(&self, counters: &mut Counters) -> GnodeInit {
GnodeInit {
name: self.name.clone(),
next_names: self.next_names.clone(),
cntrs: GnodeCntrs::new(&self.name, counters),
perf: Perf::new(&self.name, counters),
}
}
}
pub struct GnodeCntrs {
enqed: Counter,
drops: Counter,
}
impl GnodeCntrs {
pub fn new(name: &str, counters: &mut Counters) -> GnodeCntrs {
let enqed = Counter::new(counters, name, CounterType::Pkts, "GraphEnq");
let drops = Counter::new(counters, name, CounterType::Error, "GraphDrop");
GnodeCntrs { enqed, drops }
}
}
// The Gnode structure holds the exact node feature/client object and some metadata
// associated with the client
struct Gnode<T> {
// The feature/client object
client: Box<dyn Gclient<T>>,
// Name of the feature/client
name: String,
// Names of all the nodes this node will have edges to (ie will send packets to)
next_names: Vec<String>,
// Node ids corresponding to the names in next_names
next_nodes: Vec<usize>,
}
impl<T> Gnode<T> {
fn new(client: Box<dyn Gclient<T>>, name: String, next_names: Vec<String>) -> Self {
Gnode {
client,
name,
next_names,
next_nodes: Vec::new(),
}
}
fn clone(&self, counters: &mut Counters, log: Arc<Logger>) -> Self {
Gnode {
client: self.client.clone(counters, log),
name: self.name.clone(),
next_names: self.next_names.clone(),
next_nodes: self.next_nodes.clone(),
}
}
}
// The Graph object, basically a collection of graph nodes and edges from node to node
// Usually there is one Graph per thread, the graphs in each thread are copies of each other
pub struct | <T> {
// The thread this graph belongs to
thread: usize,
// The graph nodes
nodes: Vec<Gnode<T>>,
// Graph node performance info
perf: Vec<Perf>,
// A per node packet queue, to hold packets from other nodes to this node
vectors: Vec<VecDeque<BoxPkt>>,
// Generic enq/deq/drop counters per node
counters: Vec<GnodeCntrs>,
// Each graph node has an index which is an offset into the nodes Vec in this structure.
// This hashmap provides a mapping from a graph node name to its index
indices: HashMap<String, usize>,
// Packet/Particle pool
pool: Box<dyn PacketPool>,
// Freed packets are queued here
queue: Arc<ArrayQueue<BoxPkt>>,
}
impl<T> Graph<T> {
/// A new graph is created with just one node in it, a Drop Node that just drops any packet
/// it receives.
pub fn new(
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
) -> Self {
let mut g = Graph {
thread,
nodes: Vec::with_capacity(GRAPH_INIT_SZ),
perf: Vec::with_capacity(GRAPH_INIT_SZ),
vectors: Vec::with_capacity(GRAPH_INIT_SZ),
counters: Vec::with_capacity(GRAPH_INIT_SZ),
indices: HashMap::with_capacity(GRAPH_INIT_SZ),
pool,
queue,
};
let init = GnodeInit {
name: names::DROP.to_string(),
next_names: vec![],
cntrs: GnodeCntrs::new(names::DROP, counters),
perf: Perf::new(names::DROP, counters),
};
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
g.add(Box::new(DropNode { count }), init);
g
}
/// Clone the entire graph. That relies on each graph node feature/client providing
/// an ability to clone() itself
pub fn clone(
&self,
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
log: Arc<Logger>,
) -> Self {
let mut nodes = Vec::with_capacity(GRAPH_INIT_SZ);
let mut perf = Vec::with_capacity(GRAPH_INIT_SZ);
let mut vectors = Vec::with_capacity(GRAPH_INIT_SZ);
let mut cntrs = Vec::with_capacity(GRAPH_INIT_SZ);
for n in self.nodes.iter() {
nodes.push(n.clone(counters, log.clone()));
perf.push(Perf::new(&n.name, counters));
vectors.push(VecDeque::with_capacity(VEC_SIZE));
cntrs.push(GnodeCntrs::new(&n.name, counters));
}
Graph {
thread,
nodes,
perf,
vectors,
counters: cntrs,
indices: self.indices.clone(),
pool,
queue,
}
}
/// Add a new feature/client node to the graph.
pub fn add(&mut self, client: Box<dyn Gclient<T>>, init: GnodeInit) {
let index = self.index(&init.name);
if index != 0 {
return; // Gclient already registered
}
self.nodes
.push(Gnode::new(client, init.name.clone(), init.next_names));
self.perf.push(init.perf);
self.vectors.push(VecDeque::with_capacity(VEC_SIZE));
self.counters.push(init.cntrs);
let index = self.nodes.len() - 1; // 0 based index
self.indices.insert(init.name, index);
}
fn index(&self, name: &str) -> usize {
if let Some(&index) = self.indices.get(name) {
index
} else {
0
}
}
/// Any time a new node is added to the graph, there might be other nodes that have
/// specified this new node as their next node - so we have to resolve those names
/// to a proper node index. The finalize() will walk through all nodes and resolve
/// next_name to node index. This is typically called after a new node is added
pub fn finalize(&mut self) {
for n in 0..self.nodes.len() {
let node = &self.nodes[n];
for l in 0..node.next_names.len() {
let node = &self.nodes[n];
let index = self.index(&node.next_names[l]);
let node = &mut self.nodes[n];
if node.next_nodes.len() <= l {
node.next_nodes.resize(l + 1, 0);
}
node.next_nodes[l] = index;
}
}
}
// Run through all the nodes one single time, do whatever work is possible in that
// iteration, and return values which say if more work is pending and at what time
// the work has to be done
pub fn run(&mut self) -> (bool, usize) {
// First return all the free packets back to the pool
while let Ok(p) = self.queue.pop() {
self.pool.free(p);
}
let mut nsecs = std::usize::MAX;
let mut work = false;
for n in 0..self.nodes.len() {
let node = &mut self.nodes[n];
let client = &mut node.client;
let mut d = Dispatch {
node: n,
pool: &mut *self.pool,
vectors: &mut self.vectors,
counters: &mut self.counters,
nodes: &node.next_nodes,
work: false,
wakeup: std::usize::MAX,
};
self.perf[n].start();
client.dispatch(self.thread, &mut d);
self.perf[n].stop();
// Does client have more work pending, and when does it need to do that work ?
if d.work {
work = true;
if d.wakeup < nsecs {
nsecs = d.wakeup;
}
}
}
(work, nsecs)
}
pub fn control_msg(&mut self, name: &str, message: T) -> bool {
let index = self.index(name);
if index == 0 {
false
} else {
self.nodes[index].client.control_msg(self.thread, message);
true
}
}
}
struct DropNode {
count: Counter,
}
impl<T> Gclient<T> for DropNode {
fn clone(&self, counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>> {
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
Box::new(DropNode { count })
}
fn dispatch(&mut self, _thread: usize, vectors: &mut Dispatch) {
while let Some(_) = vectors.pop() {
self.count.incr();
}
}
}
#[cfg(test)]
mod test;
| Graph | identifier_name |
lib.rs | use counters::flavors::{Counter, CounterType};
use counters::Counters;
use crossbeam_queue::ArrayQueue;
use log::Logger;
use packet::BoxPkt;
use packet::PacketPool;
use perf::Perf;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
// We preallocate space for these many graph nodes, of course it can grow beyond that,
// but the goal is as much as possible to pre-allocate space
const GRAPH_INIT_SZ: usize = 1024;
/// The size of the packet queue to each graph node. Beyond this, packets to that node
/// will get dropped
pub const VEC_SIZE: usize = 256;
pub trait Driver: Sync {
fn fd(&self) -> Option<i32>;
fn sendmsg(&mut self, pool: &mut dyn PacketPool, pkt: BoxPkt) -> usize;
fn recvmsg(&mut self, pool: &mut dyn PacketPool, headroom: usize) -> Option<BoxPkt>;
}
/// Every graph node feature/client needs to implement these methods/APIs
pub trait Gclient<T>: Send {
/// Make a clone() of the node, usually to be used in another thread. It is upto the
/// client to decide what should be cloned/copied and what should be shared. For example,
/// counters are always per thread and cant be shared, a new set of counters need to be
/// made per thread
fn clone(&self, _counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>>;
/// This API is called to hand over packets to the client for processing. Dispatch has
/// pop() API to get packets destined for the node, and push() API to push packets to
/// other graph nodes
fn dispatch(&mut self, _thread: usize, _vectors: &mut Dispatch);
/// This API is called when a node gets a message from control plane, like for example
/// to modify the nodes forwarding tables etc..
fn control_msg(&mut self, _thread: usize, _message: T) {}
}
/// This structure provides methods to get packets queued up for a node, and for
/// the node to queue up packets to other nodes
pub struct Dispatch<'d> {
node: usize,
pub pool: &'d mut dyn PacketPool,
vectors: &'d mut Vec<VecDeque<BoxPkt>>,
counters: &'d mut Vec<GnodeCntrs>,
nodes: &'d Vec<usize>,
work: bool,
wakeup: usize,
}
impl<'d> Dispatch<'d> {
/// Get one of the packets queued up for a node
pub fn pop(&mut self) -> Option<BoxPkt> {
self.vectors[self.node].pop_front()
}
/// Queue one packet to another node
pub fn push(&mut self, node: usize, pkt: BoxPkt) -> bool {
let node = self.nodes[node];
if self.vectors[node].capacity() >= 1 {
self.vectors[node].push_back(pkt);
if node <= self.node {
self.work = true;
self.wakeup = 0;
}
self.counters[node].enqed.incr();
true
} else {
self.counters[node].drops.incr();
false
}
}
/// Specify the time when this node has work again/needs to be scheduled again
/// wakeup of zero means it has work right now, non zero wakeup indicates time
/// in nanoseconds from now when the node has work
pub fn wakeup(&mut self, wakeup: usize) {
if self.work {
if wakeup < self.wakeup {
self.wakeup = wakeup;
}
} else {
self.work = true;
self.wakeup = wakeup;
}
}
}
/// The parameters each feature/client node needs to specify if it wants to be added
/// to the graph
pub struct GnodeInit {
/// A unique name for the node
pub name: String,
/// Names of all the nodes this node will have edges to (ie will send packets to)
pub next_names: Vec<String>,
/// A set of generic counters that tracks the node's enqueue/dequeue/drops etc..
pub cntrs: GnodeCntrs,
pub perf: Perf,
}
impl GnodeInit {
pub fn clone(&self, counters: &mut Counters) -> GnodeInit {
GnodeInit {
name: self.name.clone(),
next_names: self.next_names.clone(),
cntrs: GnodeCntrs::new(&self.name, counters),
perf: Perf::new(&self.name, counters),
}
}
}
pub struct GnodeCntrs {
enqed: Counter,
drops: Counter,
}
impl GnodeCntrs {
pub fn new(name: &str, counters: &mut Counters) -> GnodeCntrs {
let enqed = Counter::new(counters, name, CounterType::Pkts, "GraphEnq");
let drops = Counter::new(counters, name, CounterType::Error, "GraphDrop");
GnodeCntrs { enqed, drops }
}
}
// The Gnode structure holds the exact node feature/client object and some metadata
// associated with the client
struct Gnode<T> {
// The feature/client object
client: Box<dyn Gclient<T>>,
// Name of the feature/client
name: String,
// Names of all the nodes this node will have edges to (ie will send packets to)
next_names: Vec<String>,
// Node ids corresponding to the names in next_names
next_nodes: Vec<usize>,
}
impl<T> Gnode<T> {
fn new(client: Box<dyn Gclient<T>>, name: String, next_names: Vec<String>) -> Self {
Gnode {
client,
name,
next_names, | next_nodes: Vec::new(),
}
}
fn clone(&self, counters: &mut Counters, log: Arc<Logger>) -> Self {
Gnode {
client: self.client.clone(counters, log),
name: self.name.clone(),
next_names: self.next_names.clone(),
next_nodes: self.next_nodes.clone(),
}
}
}
// The Graph object, basically a collection of graph nodes and edges from node to node
// Usually there is one Graph per thread, the graphs in each thread are copies of each other
pub struct Graph<T> {
// The thread this graph belongs to
thread: usize,
// The graph nodes
nodes: Vec<Gnode<T>>,
// Graph node performance info
perf: Vec<Perf>,
// A per node packet queue, to hold packets from other nodes to this node
vectors: Vec<VecDeque<BoxPkt>>,
// Generic enq/deq/drop counters per node
counters: Vec<GnodeCntrs>,
// Each graph node has an index which is an offset into the nodes Vec in this structure.
// This hashmap provides a mapping from a graph node name to its index
indices: HashMap<String, usize>,
// Packet/Particle pool
pool: Box<dyn PacketPool>,
// Freed packets are queued here
queue: Arc<ArrayQueue<BoxPkt>>,
}
impl<T> Graph<T> {
/// A new graph is created with just one node in it, a Drop Node that just drops any packet
/// it receives.
pub fn new(
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
) -> Self {
let mut g = Graph {
thread,
nodes: Vec::with_capacity(GRAPH_INIT_SZ),
perf: Vec::with_capacity(GRAPH_INIT_SZ),
vectors: Vec::with_capacity(GRAPH_INIT_SZ),
counters: Vec::with_capacity(GRAPH_INIT_SZ),
indices: HashMap::with_capacity(GRAPH_INIT_SZ),
pool,
queue,
};
let init = GnodeInit {
name: names::DROP.to_string(),
next_names: vec![],
cntrs: GnodeCntrs::new(names::DROP, counters),
perf: Perf::new(names::DROP, counters),
};
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
g.add(Box::new(DropNode { count }), init);
g
}
/// Clone the entire graph. That relies on each graph node feature/client providing
/// an ability to clone() itself
pub fn clone(
&self,
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
log: Arc<Logger>,
) -> Self {
let mut nodes = Vec::with_capacity(GRAPH_INIT_SZ);
let mut perf = Vec::with_capacity(GRAPH_INIT_SZ);
let mut vectors = Vec::with_capacity(GRAPH_INIT_SZ);
let mut cntrs = Vec::with_capacity(GRAPH_INIT_SZ);
for n in self.nodes.iter() {
nodes.push(n.clone(counters, log.clone()));
perf.push(Perf::new(&n.name, counters));
vectors.push(VecDeque::with_capacity(VEC_SIZE));
cntrs.push(GnodeCntrs::new(&n.name, counters));
}
Graph {
thread,
nodes,
perf,
vectors,
counters: cntrs,
indices: self.indices.clone(),
pool,
queue,
}
}
/// Add a new feature/client node to the graph.
pub fn add(&mut self, client: Box<dyn Gclient<T>>, init: GnodeInit) {
let index = self.index(&init.name);
if index != 0 {
return; // Gclient already registered
}
self.nodes
.push(Gnode::new(client, init.name.clone(), init.next_names));
self.perf.push(init.perf);
self.vectors.push(VecDeque::with_capacity(VEC_SIZE));
self.counters.push(init.cntrs);
let index = self.nodes.len() - 1; // 0 based index
self.indices.insert(init.name, index);
}
fn index(&self, name: &str) -> usize {
if let Some(&index) = self.indices.get(name) {
index
} else {
0
}
}
/// Any time a new node is added to the graph, there might be other nodes that have
/// specified this new node as their next node - so we have to resolve those names
/// to a proper node index. The finalize() will walk through all nodes and resolve
/// next_name to node index. This is typically called after a new node is added
pub fn finalize(&mut self) {
for n in 0..self.nodes.len() {
let node = &self.nodes[n];
for l in 0..node.next_names.len() {
let node = &self.nodes[n];
let index = self.index(&node.next_names[l]);
let node = &mut self.nodes[n];
if node.next_nodes.len() <= l {
node.next_nodes.resize(l + 1, 0);
}
node.next_nodes[l] = index;
}
}
}
// Run through all the nodes one single time, do whatever work is possible in that
// iteration, and return values which say if more work is pending and at what time
// the work has to be done
pub fn run(&mut self) -> (bool, usize) {
// First return all the free packets back to the pool
while let Ok(p) = self.queue.pop() {
self.pool.free(p);
}
let mut nsecs = std::usize::MAX;
let mut work = false;
for n in 0..self.nodes.len() {
let node = &mut self.nodes[n];
let client = &mut node.client;
let mut d = Dispatch {
node: n,
pool: &mut *self.pool,
vectors: &mut self.vectors,
counters: &mut self.counters,
nodes: &node.next_nodes,
work: false,
wakeup: std::usize::MAX,
};
self.perf[n].start();
client.dispatch(self.thread, &mut d);
self.perf[n].stop();
// Does client have more work pending, and when does it need to do that work ?
if d.work {
work = true;
if d.wakeup < nsecs {
nsecs = d.wakeup;
}
}
}
(work, nsecs)
}
pub fn control_msg(&mut self, name: &str, message: T) -> bool {
let index = self.index(name);
if index == 0 {
false
} else {
self.nodes[index].client.control_msg(self.thread, message);
true
}
}
}
struct DropNode {
count: Counter,
}
impl<T> Gclient<T> for DropNode {
fn clone(&self, counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>> {
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
Box::new(DropNode { count })
}
fn dispatch(&mut self, _thread: usize, vectors: &mut Dispatch) {
while let Some(_) = vectors.pop() {
self.count.incr();
}
}
}
#[cfg(test)]
mod test; | random_line_split | |
mysql_connector.go | package mysql
import (
"bytes"
"context"
"database/sql/driver"
"fmt"
"io"
"reflect"
"strconv"
"strings"
typex "github.com/go-courier/x/types"
"github.com/go-courier/sqlx/v2"
"github.com/go-courier/sqlx/v2/builder"
"github.com/go-courier/sqlx/v2/migration"
"github.com/go-sql-driver/mysql"
)
var _ interface {
driver.Connector
builder.Dialect
} = (*MysqlConnector)(nil)
type MysqlConnector struct {
Host string
DBName string
Extra string
Engine string
Charset string
}
func dsn(host string, dbName string, extra string) string {
if extra != "" {
extra = "?" + extra
}
return host + "/" + dbName + extra
}
func (c MysqlConnector) WithDBName(dbName string) driver.Connector {
c.DBName = dbName
return &c
}
func (c *MysqlConnector) Migrate(ctx context.Context, db sqlx.DBExecutor) error {
output := migration.MigrationOutputFromContext(ctx)
// mysql without schema
d := db.D().WithSchema("")
dialect := db.Dialect()
prevDB, err := dbFromInformationSchema(db)
if err != nil {
return err
}
exec := func(expr builder.SqlExpr) error {
if expr == nil || expr.IsNil() {
return nil
}
if output != nil {
_, _ = io.WriteString(output, builder.ResolveExpr(expr).Query())
_, _ = io.WriteString(output, "\n")
return nil
}
_, err := db.ExecExpr(expr)
return err
}
if prevDB == nil {
prevDB = &sqlx.Database{
Name: d.Name,
}
if err := exec(dialect.CreateDatabase(d.Name)); err != nil |
}
for _, name := range d.Tables.TableNames() {
table := d.Tables.Table(name)
prevTable := prevDB.Table(name)
if prevTable == nil {
for _, expr := range dialect.CreateTableIsNotExists(table) {
if err := exec(expr); err != nil {
return err
}
}
continue
}
exprList := table.Diff(prevTable, dialect)
for _, expr := range exprList {
if err := exec(expr); err != nil {
return err
}
}
}
return nil
}
func (c *MysqlConnector) Connect(ctx context.Context) (driver.Conn, error) {
d := c.Driver()
conn, err := d.Open(dsn(c.Host, c.DBName, c.Extra))
if err != nil {
if c.IsErrorUnknownDatabase(err) {
conn, err := d.Open(dsn(c.Host, "", c.Extra))
if err != nil {
return nil, err
}
if _, err := conn.(driver.ExecerContext).ExecContext(context.Background(), builder.ResolveExpr(c.CreateDatabase(c.DBName)).Query(), nil); err != nil {
return nil, err
}
if err := conn.Close(); err != nil {
return nil, err
}
return c.Connect(ctx)
}
return nil, err
}
return conn, nil
}
func (c MysqlConnector) Driver() driver.Driver {
return (&MySqlLoggingDriver{}).Driver()
}
func (MysqlConnector) DriverName() string {
return "mysql"
}
func (MysqlConnector) PrimaryKeyName() string {
return "primary"
}
func (c MysqlConnector) IsErrorUnknownDatabase(err error) bool {
if mysqlErr, ok := sqlx.UnwrapAll(err).(*mysql.MySQLError); ok && mysqlErr.Number == 1049 {
return true
}
return false
}
func (c MysqlConnector) IsErrorConflict(err error) bool {
if mysqlErr, ok := sqlx.UnwrapAll(err).(*mysql.MySQLError); ok && mysqlErr.Number == 1062 {
return true
}
return false
}
func quoteString(name string) string {
if len(name) < 2 ||
(name[0] == '`' && name[len(name)-1] == '`') {
return name
}
return "`" + name + "`"
}
func (c *MysqlConnector) CreateDatabase(dbName string) builder.SqlExpr {
e := builder.Expr("CREATE DATABASE ")
e.WriteQuery(quoteString(dbName))
e.WriteEnd()
return e
}
func (c *MysqlConnector) CreateSchema(schema string) builder.SqlExpr {
e := builder.Expr("CREATE SCHEMA ")
e.WriteQuery(schema)
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropDatabase(dbName string) builder.SqlExpr {
e := builder.Expr("DROP DATABASE ")
e.WriteQuery(quoteString(dbName))
e.WriteEnd()
return e
}
func (c *MysqlConnector) AddIndex(key *builder.Key) builder.SqlExpr {
if key.IsPrimary() {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(key.Table)
e.WriteQuery(" ADD PRIMARY KEY ")
e.WriteExpr(key.Def.TableExpr(key.Table))
e.WriteEnd()
return e
}
e := builder.Expr("CREATE ")
if key.Method == "SPATIAL" {
e.WriteQuery("SPATIAL ")
} else if key.IsUnique {
e.WriteQuery("UNIQUE ")
}
e.WriteQuery("INDEX ")
e.WriteQuery(key.Name)
if key.Method == "BTREE" || key.Method == "HASH" {
e.WriteQuery(" USING ")
e.WriteQuery(key.Method)
}
e.WriteQuery(" ON ")
e.WriteExpr(key.Table)
e.WriteQueryByte(' ')
e.WriteExpr(key.Def.TableExpr(key.Table))
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropIndex(key *builder.Key) builder.SqlExpr {
if key.IsPrimary() {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(key.Table)
e.WriteQuery(" DROP PRIMARY KEY")
e.WriteEnd()
return e
}
e := builder.Expr("DROP ")
e.WriteQuery("INDEX ")
e.WriteQuery(key.Name)
e.WriteQuery(" ON ")
e.WriteExpr(key.Table)
e.WriteEnd()
return e
}
func (c *MysqlConnector) CreateTableIsNotExists(table *builder.Table) (exprs []builder.SqlExpr) {
expr := builder.Expr("CREATE TABLE IF NOT EXISTS ")
expr.WriteExpr(table)
expr.WriteQueryByte(' ')
expr.WriteGroup(func(e *builder.Ex) {
if table.Columns.IsNil() {
return
}
table.Columns.Range(func(col *builder.Column, idx int) {
if col.DeprecatedActions != nil {
return
}
if idx > 0 {
e.WriteQueryByte(',')
}
e.WriteQueryByte('\n')
e.WriteQueryByte('\t')
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
})
table.Keys.Range(func(key *builder.Key, idx int) {
if key.IsPrimary() {
e.WriteQueryByte(',')
e.WriteQueryByte('\n')
e.WriteQueryByte('\t')
e.WriteQuery("PRIMARY KEY ")
e.WriteExpr(key.Def.TableExpr(key.Table))
}
})
expr.WriteQueryByte('\n')
})
expr.WriteQuery(" ENGINE=")
if c.Engine == "" {
expr.WriteQuery("InnoDB")
} else {
expr.WriteQuery(c.Engine)
}
expr.WriteQuery(" CHARSET=")
if c.Charset == "" {
expr.WriteQuery("utf8mb4")
} else {
expr.WriteQuery(c.Charset)
}
expr.WriteEnd()
exprs = append(exprs, expr)
table.Keys.Range(func(key *builder.Key, idx int) {
if !key.IsPrimary() {
exprs = append(exprs, c.AddIndex(key))
}
})
return
}
func (c *MysqlConnector) DropTable(t *builder.Table) builder.SqlExpr {
e := builder.Expr("DROP TABLE IF EXISTS ")
e.WriteQuery(t.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) TruncateTable(t *builder.Table) builder.SqlExpr {
e := builder.Expr("TRUNCATE TABLE ")
e.WriteQuery(t.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) AddColumn(col *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" ADD COLUMN ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
e.WriteEnd()
return e
}
func (c *MysqlConnector) RenameColumn(col *builder.Column, target *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" CHANGE ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(target)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(target.ColumnType))
e.WriteEnd()
return e
}
func (c *MysqlConnector) ModifyColumn(col *builder.Column, prev *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" MODIFY COLUMN ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
e.WriteQuery(" /* FROM")
e.WriteExpr(c.DataType(prev.ColumnType))
e.WriteQuery(" */")
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropColumn(col *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" DROP COLUMN ")
e.WriteQuery(col.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) DataType(columnType *builder.ColumnType) builder.SqlExpr {
dbDataType := dealias(c.dbDataType(columnType.Type, columnType))
return builder.Expr(dbDataType + autocompleteSize(dbDataType, columnType) + c.dataTypeModify(columnType))
}
func (c *MysqlConnector) dataType(typ typex.Type, columnType *builder.ColumnType) string {
dbDataType := dealias(c.dbDataType(typ, columnType))
return dbDataType + autocompleteSize(dbDataType, columnType)
}
func (c *MysqlConnector) dbDataType(typ typex.Type, columnType *builder.ColumnType) string {
if columnType.DataType != "" {
return columnType.DataType
}
if rv, ok := typex.TryNew(typ); ok {
if dtd, ok := rv.Interface().(builder.DataTypeDescriber); ok {
return dtd.DataType(c.DriverName())
}
}
switch typ.Kind() {
case reflect.Ptr:
return c.dataType(typ.Elem(), columnType)
case reflect.Bool:
return "boolean"
case reflect.Int8:
return "tinyint"
case reflect.Uint8:
return "tinyint unsigned"
case reflect.Int16:
return "smallint"
case reflect.Uint16:
return "smallint unsigned"
case reflect.Int, reflect.Int32:
return "int"
case reflect.Uint, reflect.Uint32:
return "int unsigned"
case reflect.Int64:
return "bigint"
case reflect.Uint64:
return "bigint unsigned"
case reflect.Float32:
return "float"
case reflect.Float64:
return "double"
case reflect.String:
size := columnType.Length
if size < 65535/3 {
return "varchar"
}
return "text"
case reflect.Slice:
if typ.Elem().Kind() == reflect.Uint8 {
return "mediumblob"
}
}
switch typ.Name() {
case "NullInt64":
return "bigint"
case "NullFloat64":
return "double"
case "NullBool":
return "tinyint"
case "Time":
return "datetime"
}
panic(fmt.Errorf("unsupport type %s", typ))
}
func (c *MysqlConnector) dataTypeModify(columnType *builder.ColumnType) string {
buf := bytes.NewBuffer(nil)
if !columnType.Null {
buf.WriteString(" NOT NULL")
}
if columnType.AutoIncrement {
buf.WriteString(" AUTO_INCREMENT")
}
if columnType.Default != nil {
buf.WriteString(" DEFAULT ")
buf.WriteString(*columnType.Default)
}
if columnType.OnUpdate != nil {
buf.WriteString(" ON UPDATE ")
buf.WriteString(*columnType.OnUpdate)
}
return buf.String()
}
func autocompleteSize(dataType string, columnType *builder.ColumnType) string {
switch strings.ToLower(dataType) {
case "varchar":
size := columnType.Length
if size == 0 {
size = 255
}
return sizeModifier(size, columnType.Decimal)
case "float", "double", "decimal":
if columnType.Length > 0 {
return sizeModifier(columnType.Length, columnType.Decimal)
}
}
return ""
}
func dealias(dataType string) string {
return dataType
}
func sizeModifier(length uint64, decimal uint64) string {
if length > 0 {
size := strconv.FormatUint(length, 10)
if decimal > 0 {
return "(" + size + "," + strconv.FormatUint(decimal, 10) + ")"
}
return "(" + size + ")"
}
return ""
}
| {
return err
} | conditional_block |
mysql_connector.go | package mysql
import (
"bytes"
"context"
"database/sql/driver"
"fmt"
"io"
"reflect"
"strconv"
"strings"
typex "github.com/go-courier/x/types"
"github.com/go-courier/sqlx/v2"
"github.com/go-courier/sqlx/v2/builder"
"github.com/go-courier/sqlx/v2/migration"
"github.com/go-sql-driver/mysql"
)
var _ interface {
driver.Connector
builder.Dialect
} = (*MysqlConnector)(nil)
type MysqlConnector struct {
Host string
DBName string
Extra string
Engine string
Charset string
}
func dsn(host string, dbName string, extra string) string {
if extra != "" {
extra = "?" + extra
}
return host + "/" + dbName + extra
}
func (c MysqlConnector) WithDBName(dbName string) driver.Connector {
c.DBName = dbName
return &c
}
func (c *MysqlConnector) Migrate(ctx context.Context, db sqlx.DBExecutor) error {
output := migration.MigrationOutputFromContext(ctx)
// mysql without schema
d := db.D().WithSchema("")
dialect := db.Dialect()
prevDB, err := dbFromInformationSchema(db)
if err != nil {
return err
}
exec := func(expr builder.SqlExpr) error {
if expr == nil || expr.IsNil() {
return nil
}
if output != nil {
_, _ = io.WriteString(output, builder.ResolveExpr(expr).Query())
_, _ = io.WriteString(output, "\n")
return nil
}
_, err := db.ExecExpr(expr)
return err
}
if prevDB == nil {
prevDB = &sqlx.Database{
Name: d.Name,
}
if err := exec(dialect.CreateDatabase(d.Name)); err != nil {
return err
}
}
for _, name := range d.Tables.TableNames() {
table := d.Tables.Table(name)
prevTable := prevDB.Table(name)
if prevTable == nil {
for _, expr := range dialect.CreateTableIsNotExists(table) {
if err := exec(expr); err != nil {
return err
}
}
continue
}
exprList := table.Diff(prevTable, dialect)
for _, expr := range exprList {
if err := exec(expr); err != nil {
return err
}
}
}
return nil
}
func (c *MysqlConnector) Connect(ctx context.Context) (driver.Conn, error) {
d := c.Driver()
conn, err := d.Open(dsn(c.Host, c.DBName, c.Extra))
if err != nil {
if c.IsErrorUnknownDatabase(err) {
conn, err := d.Open(dsn(c.Host, "", c.Extra))
if err != nil {
return nil, err
}
if _, err := conn.(driver.ExecerContext).ExecContext(context.Background(), builder.ResolveExpr(c.CreateDatabase(c.DBName)).Query(), nil); err != nil {
return nil, err
}
if err := conn.Close(); err != nil {
return nil, err
}
return c.Connect(ctx)
}
return nil, err
}
return conn, nil
}
func (c MysqlConnector) Driver() driver.Driver {
return (&MySqlLoggingDriver{}).Driver()
}
func (MysqlConnector) DriverName() string {
return "mysql"
}
func (MysqlConnector) PrimaryKeyName() string {
return "primary"
}
func (c MysqlConnector) IsErrorUnknownDatabase(err error) bool |
func (c MysqlConnector) IsErrorConflict(err error) bool {
if mysqlErr, ok := sqlx.UnwrapAll(err).(*mysql.MySQLError); ok && mysqlErr.Number == 1062 {
return true
}
return false
}
func quoteString(name string) string {
if len(name) < 2 ||
(name[0] == '`' && name[len(name)-1] == '`') {
return name
}
return "`" + name + "`"
}
func (c *MysqlConnector) CreateDatabase(dbName string) builder.SqlExpr {
e := builder.Expr("CREATE DATABASE ")
e.WriteQuery(quoteString(dbName))
e.WriteEnd()
return e
}
func (c *MysqlConnector) CreateSchema(schema string) builder.SqlExpr {
e := builder.Expr("CREATE SCHEMA ")
e.WriteQuery(schema)
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropDatabase(dbName string) builder.SqlExpr {
e := builder.Expr("DROP DATABASE ")
e.WriteQuery(quoteString(dbName))
e.WriteEnd()
return e
}
func (c *MysqlConnector) AddIndex(key *builder.Key) builder.SqlExpr {
if key.IsPrimary() {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(key.Table)
e.WriteQuery(" ADD PRIMARY KEY ")
e.WriteExpr(key.Def.TableExpr(key.Table))
e.WriteEnd()
return e
}
e := builder.Expr("CREATE ")
if key.Method == "SPATIAL" {
e.WriteQuery("SPATIAL ")
} else if key.IsUnique {
e.WriteQuery("UNIQUE ")
}
e.WriteQuery("INDEX ")
e.WriteQuery(key.Name)
if key.Method == "BTREE" || key.Method == "HASH" {
e.WriteQuery(" USING ")
e.WriteQuery(key.Method)
}
e.WriteQuery(" ON ")
e.WriteExpr(key.Table)
e.WriteQueryByte(' ')
e.WriteExpr(key.Def.TableExpr(key.Table))
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropIndex(key *builder.Key) builder.SqlExpr {
if key.IsPrimary() {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(key.Table)
e.WriteQuery(" DROP PRIMARY KEY")
e.WriteEnd()
return e
}
e := builder.Expr("DROP ")
e.WriteQuery("INDEX ")
e.WriteQuery(key.Name)
e.WriteQuery(" ON ")
e.WriteExpr(key.Table)
e.WriteEnd()
return e
}
func (c *MysqlConnector) CreateTableIsNotExists(table *builder.Table) (exprs []builder.SqlExpr) {
expr := builder.Expr("CREATE TABLE IF NOT EXISTS ")
expr.WriteExpr(table)
expr.WriteQueryByte(' ')
expr.WriteGroup(func(e *builder.Ex) {
if table.Columns.IsNil() {
return
}
table.Columns.Range(func(col *builder.Column, idx int) {
if col.DeprecatedActions != nil {
return
}
if idx > 0 {
e.WriteQueryByte(',')
}
e.WriteQueryByte('\n')
e.WriteQueryByte('\t')
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
})
table.Keys.Range(func(key *builder.Key, idx int) {
if key.IsPrimary() {
e.WriteQueryByte(',')
e.WriteQueryByte('\n')
e.WriteQueryByte('\t')
e.WriteQuery("PRIMARY KEY ")
e.WriteExpr(key.Def.TableExpr(key.Table))
}
})
expr.WriteQueryByte('\n')
})
expr.WriteQuery(" ENGINE=")
if c.Engine == "" {
expr.WriteQuery("InnoDB")
} else {
expr.WriteQuery(c.Engine)
}
expr.WriteQuery(" CHARSET=")
if c.Charset == "" {
expr.WriteQuery("utf8mb4")
} else {
expr.WriteQuery(c.Charset)
}
expr.WriteEnd()
exprs = append(exprs, expr)
table.Keys.Range(func(key *builder.Key, idx int) {
if !key.IsPrimary() {
exprs = append(exprs, c.AddIndex(key))
}
})
return
}
func (c *MysqlConnector) DropTable(t *builder.Table) builder.SqlExpr {
e := builder.Expr("DROP TABLE IF EXISTS ")
e.WriteQuery(t.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) TruncateTable(t *builder.Table) builder.SqlExpr {
e := builder.Expr("TRUNCATE TABLE ")
e.WriteQuery(t.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) AddColumn(col *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" ADD COLUMN ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
e.WriteEnd()
return e
}
func (c *MysqlConnector) RenameColumn(col *builder.Column, target *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" CHANGE ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(target)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(target.ColumnType))
e.WriteEnd()
return e
}
func (c *MysqlConnector) ModifyColumn(col *builder.Column, prev *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" MODIFY COLUMN ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
e.WriteQuery(" /* FROM")
e.WriteExpr(c.DataType(prev.ColumnType))
e.WriteQuery(" */")
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropColumn(col *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" DROP COLUMN ")
e.WriteQuery(col.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) DataType(columnType *builder.ColumnType) builder.SqlExpr {
dbDataType := dealias(c.dbDataType(columnType.Type, columnType))
return builder.Expr(dbDataType + autocompleteSize(dbDataType, columnType) + c.dataTypeModify(columnType))
}
func (c *MysqlConnector) dataType(typ typex.Type, columnType *builder.ColumnType) string {
dbDataType := dealias(c.dbDataType(typ, columnType))
return dbDataType + autocompleteSize(dbDataType, columnType)
}
func (c *MysqlConnector) dbDataType(typ typex.Type, columnType *builder.ColumnType) string {
if columnType.DataType != "" {
return columnType.DataType
}
if rv, ok := typex.TryNew(typ); ok {
if dtd, ok := rv.Interface().(builder.DataTypeDescriber); ok {
return dtd.DataType(c.DriverName())
}
}
switch typ.Kind() {
case reflect.Ptr:
return c.dataType(typ.Elem(), columnType)
case reflect.Bool:
return "boolean"
case reflect.Int8:
return "tinyint"
case reflect.Uint8:
return "tinyint unsigned"
case reflect.Int16:
return "smallint"
case reflect.Uint16:
return "smallint unsigned"
case reflect.Int, reflect.Int32:
return "int"
case reflect.Uint, reflect.Uint32:
return "int unsigned"
case reflect.Int64:
return "bigint"
case reflect.Uint64:
return "bigint unsigned"
case reflect.Float32:
return "float"
case reflect.Float64:
return "double"
case reflect.String:
size := columnType.Length
if size < 65535/3 {
return "varchar"
}
return "text"
case reflect.Slice:
if typ.Elem().Kind() == reflect.Uint8 {
return "mediumblob"
}
}
switch typ.Name() {
case "NullInt64":
return "bigint"
case "NullFloat64":
return "double"
case "NullBool":
return "tinyint"
case "Time":
return "datetime"
}
panic(fmt.Errorf("unsupport type %s", typ))
}
func (c *MysqlConnector) dataTypeModify(columnType *builder.ColumnType) string {
buf := bytes.NewBuffer(nil)
if !columnType.Null {
buf.WriteString(" NOT NULL")
}
if columnType.AutoIncrement {
buf.WriteString(" AUTO_INCREMENT")
}
if columnType.Default != nil {
buf.WriteString(" DEFAULT ")
buf.WriteString(*columnType.Default)
}
if columnType.OnUpdate != nil {
buf.WriteString(" ON UPDATE ")
buf.WriteString(*columnType.OnUpdate)
}
return buf.String()
}
func autocompleteSize(dataType string, columnType *builder.ColumnType) string {
switch strings.ToLower(dataType) {
case "varchar":
size := columnType.Length
if size == 0 {
size = 255
}
return sizeModifier(size, columnType.Decimal)
case "float", "double", "decimal":
if columnType.Length > 0 {
return sizeModifier(columnType.Length, columnType.Decimal)
}
}
return ""
}
func dealias(dataType string) string {
return dataType
}
func sizeModifier(length uint64, decimal uint64) string {
if length > 0 {
size := strconv.FormatUint(length, 10)
if decimal > 0 {
return "(" + size + "," + strconv.FormatUint(decimal, 10) + ")"
}
return "(" + size + ")"
}
return ""
}
| {
if mysqlErr, ok := sqlx.UnwrapAll(err).(*mysql.MySQLError); ok && mysqlErr.Number == 1049 {
return true
}
return false
} | identifier_body |
mysql_connector.go | package mysql
import (
"bytes"
"context"
"database/sql/driver"
"fmt"
"io"
"reflect"
"strconv"
"strings"
typex "github.com/go-courier/x/types"
"github.com/go-courier/sqlx/v2"
"github.com/go-courier/sqlx/v2/builder"
"github.com/go-courier/sqlx/v2/migration"
"github.com/go-sql-driver/mysql"
)
var _ interface {
driver.Connector
builder.Dialect
} = (*MysqlConnector)(nil)
type MysqlConnector struct {
Host string
DBName string
Extra string
Engine string
Charset string
}
func dsn(host string, dbName string, extra string) string {
if extra != "" {
extra = "?" + extra
}
return host + "/" + dbName + extra
}
func (c MysqlConnector) WithDBName(dbName string) driver.Connector {
c.DBName = dbName
return &c
}
func (c *MysqlConnector) Migrate(ctx context.Context, db sqlx.DBExecutor) error {
output := migration.MigrationOutputFromContext(ctx)
// mysql without schema
d := db.D().WithSchema("")
dialect := db.Dialect()
prevDB, err := dbFromInformationSchema(db)
if err != nil {
return err
}
exec := func(expr builder.SqlExpr) error {
if expr == nil || expr.IsNil() {
return nil
}
if output != nil {
_, _ = io.WriteString(output, builder.ResolveExpr(expr).Query())
_, _ = io.WriteString(output, "\n")
return nil
}
_, err := db.ExecExpr(expr)
return err
}
if prevDB == nil {
prevDB = &sqlx.Database{
Name: d.Name,
}
if err := exec(dialect.CreateDatabase(d.Name)); err != nil {
return err
}
}
for _, name := range d.Tables.TableNames() {
table := d.Tables.Table(name)
prevTable := prevDB.Table(name)
if prevTable == nil { | for _, expr := range dialect.CreateTableIsNotExists(table) {
if err := exec(expr); err != nil {
return err
}
}
continue
}
exprList := table.Diff(prevTable, dialect)
for _, expr := range exprList {
if err := exec(expr); err != nil {
return err
}
}
}
return nil
}
func (c *MysqlConnector) Connect(ctx context.Context) (driver.Conn, error) {
d := c.Driver()
conn, err := d.Open(dsn(c.Host, c.DBName, c.Extra))
if err != nil {
if c.IsErrorUnknownDatabase(err) {
conn, err := d.Open(dsn(c.Host, "", c.Extra))
if err != nil {
return nil, err
}
if _, err := conn.(driver.ExecerContext).ExecContext(context.Background(), builder.ResolveExpr(c.CreateDatabase(c.DBName)).Query(), nil); err != nil {
return nil, err
}
if err := conn.Close(); err != nil {
return nil, err
}
return c.Connect(ctx)
}
return nil, err
}
return conn, nil
}
func (c MysqlConnector) Driver() driver.Driver {
return (&MySqlLoggingDriver{}).Driver()
}
func (MysqlConnector) DriverName() string {
return "mysql"
}
func (MysqlConnector) PrimaryKeyName() string {
return "primary"
}
func (c MysqlConnector) IsErrorUnknownDatabase(err error) bool {
if mysqlErr, ok := sqlx.UnwrapAll(err).(*mysql.MySQLError); ok && mysqlErr.Number == 1049 {
return true
}
return false
}
func (c MysqlConnector) IsErrorConflict(err error) bool {
if mysqlErr, ok := sqlx.UnwrapAll(err).(*mysql.MySQLError); ok && mysqlErr.Number == 1062 {
return true
}
return false
}
func quoteString(name string) string {
if len(name) < 2 ||
(name[0] == '`' && name[len(name)-1] == '`') {
return name
}
return "`" + name + "`"
}
func (c *MysqlConnector) CreateDatabase(dbName string) builder.SqlExpr {
e := builder.Expr("CREATE DATABASE ")
e.WriteQuery(quoteString(dbName))
e.WriteEnd()
return e
}
func (c *MysqlConnector) CreateSchema(schema string) builder.SqlExpr {
e := builder.Expr("CREATE SCHEMA ")
e.WriteQuery(schema)
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropDatabase(dbName string) builder.SqlExpr {
e := builder.Expr("DROP DATABASE ")
e.WriteQuery(quoteString(dbName))
e.WriteEnd()
return e
}
func (c *MysqlConnector) AddIndex(key *builder.Key) builder.SqlExpr {
if key.IsPrimary() {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(key.Table)
e.WriteQuery(" ADD PRIMARY KEY ")
e.WriteExpr(key.Def.TableExpr(key.Table))
e.WriteEnd()
return e
}
e := builder.Expr("CREATE ")
if key.Method == "SPATIAL" {
e.WriteQuery("SPATIAL ")
} else if key.IsUnique {
e.WriteQuery("UNIQUE ")
}
e.WriteQuery("INDEX ")
e.WriteQuery(key.Name)
if key.Method == "BTREE" || key.Method == "HASH" {
e.WriteQuery(" USING ")
e.WriteQuery(key.Method)
}
e.WriteQuery(" ON ")
e.WriteExpr(key.Table)
e.WriteQueryByte(' ')
e.WriteExpr(key.Def.TableExpr(key.Table))
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropIndex(key *builder.Key) builder.SqlExpr {
if key.IsPrimary() {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(key.Table)
e.WriteQuery(" DROP PRIMARY KEY")
e.WriteEnd()
return e
}
e := builder.Expr("DROP ")
e.WriteQuery("INDEX ")
e.WriteQuery(key.Name)
e.WriteQuery(" ON ")
e.WriteExpr(key.Table)
e.WriteEnd()
return e
}
func (c *MysqlConnector) CreateTableIsNotExists(table *builder.Table) (exprs []builder.SqlExpr) {
expr := builder.Expr("CREATE TABLE IF NOT EXISTS ")
expr.WriteExpr(table)
expr.WriteQueryByte(' ')
expr.WriteGroup(func(e *builder.Ex) {
if table.Columns.IsNil() {
return
}
table.Columns.Range(func(col *builder.Column, idx int) {
if col.DeprecatedActions != nil {
return
}
if idx > 0 {
e.WriteQueryByte(',')
}
e.WriteQueryByte('\n')
e.WriteQueryByte('\t')
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
})
table.Keys.Range(func(key *builder.Key, idx int) {
if key.IsPrimary() {
e.WriteQueryByte(',')
e.WriteQueryByte('\n')
e.WriteQueryByte('\t')
e.WriteQuery("PRIMARY KEY ")
e.WriteExpr(key.Def.TableExpr(key.Table))
}
})
expr.WriteQueryByte('\n')
})
expr.WriteQuery(" ENGINE=")
if c.Engine == "" {
expr.WriteQuery("InnoDB")
} else {
expr.WriteQuery(c.Engine)
}
expr.WriteQuery(" CHARSET=")
if c.Charset == "" {
expr.WriteQuery("utf8mb4")
} else {
expr.WriteQuery(c.Charset)
}
expr.WriteEnd()
exprs = append(exprs, expr)
table.Keys.Range(func(key *builder.Key, idx int) {
if !key.IsPrimary() {
exprs = append(exprs, c.AddIndex(key))
}
})
return
}
func (c *MysqlConnector) DropTable(t *builder.Table) builder.SqlExpr {
e := builder.Expr("DROP TABLE IF EXISTS ")
e.WriteQuery(t.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) TruncateTable(t *builder.Table) builder.SqlExpr {
e := builder.Expr("TRUNCATE TABLE ")
e.WriteQuery(t.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) AddColumn(col *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" ADD COLUMN ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
e.WriteEnd()
return e
}
func (c *MysqlConnector) RenameColumn(col *builder.Column, target *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" CHANGE ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(target)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(target.ColumnType))
e.WriteEnd()
return e
}
func (c *MysqlConnector) ModifyColumn(col *builder.Column, prev *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" MODIFY COLUMN ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
e.WriteQuery(" /* FROM")
e.WriteExpr(c.DataType(prev.ColumnType))
e.WriteQuery(" */")
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropColumn(col *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" DROP COLUMN ")
e.WriteQuery(col.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) DataType(columnType *builder.ColumnType) builder.SqlExpr {
dbDataType := dealias(c.dbDataType(columnType.Type, columnType))
return builder.Expr(dbDataType + autocompleteSize(dbDataType, columnType) + c.dataTypeModify(columnType))
}
func (c *MysqlConnector) dataType(typ typex.Type, columnType *builder.ColumnType) string {
dbDataType := dealias(c.dbDataType(typ, columnType))
return dbDataType + autocompleteSize(dbDataType, columnType)
}
func (c *MysqlConnector) dbDataType(typ typex.Type, columnType *builder.ColumnType) string {
if columnType.DataType != "" {
return columnType.DataType
}
if rv, ok := typex.TryNew(typ); ok {
if dtd, ok := rv.Interface().(builder.DataTypeDescriber); ok {
return dtd.DataType(c.DriverName())
}
}
switch typ.Kind() {
case reflect.Ptr:
return c.dataType(typ.Elem(), columnType)
case reflect.Bool:
return "boolean"
case reflect.Int8:
return "tinyint"
case reflect.Uint8:
return "tinyint unsigned"
case reflect.Int16:
return "smallint"
case reflect.Uint16:
return "smallint unsigned"
case reflect.Int, reflect.Int32:
return "int"
case reflect.Uint, reflect.Uint32:
return "int unsigned"
case reflect.Int64:
return "bigint"
case reflect.Uint64:
return "bigint unsigned"
case reflect.Float32:
return "float"
case reflect.Float64:
return "double"
case reflect.String:
size := columnType.Length
if size < 65535/3 {
return "varchar"
}
return "text"
case reflect.Slice:
if typ.Elem().Kind() == reflect.Uint8 {
return "mediumblob"
}
}
switch typ.Name() {
case "NullInt64":
return "bigint"
case "NullFloat64":
return "double"
case "NullBool":
return "tinyint"
case "Time":
return "datetime"
}
panic(fmt.Errorf("unsupport type %s", typ))
}
func (c *MysqlConnector) dataTypeModify(columnType *builder.ColumnType) string {
buf := bytes.NewBuffer(nil)
if !columnType.Null {
buf.WriteString(" NOT NULL")
}
if columnType.AutoIncrement {
buf.WriteString(" AUTO_INCREMENT")
}
if columnType.Default != nil {
buf.WriteString(" DEFAULT ")
buf.WriteString(*columnType.Default)
}
if columnType.OnUpdate != nil {
buf.WriteString(" ON UPDATE ")
buf.WriteString(*columnType.OnUpdate)
}
return buf.String()
}
func autocompleteSize(dataType string, columnType *builder.ColumnType) string {
switch strings.ToLower(dataType) {
case "varchar":
size := columnType.Length
if size == 0 {
size = 255
}
return sizeModifier(size, columnType.Decimal)
case "float", "double", "decimal":
if columnType.Length > 0 {
return sizeModifier(columnType.Length, columnType.Decimal)
}
}
return ""
}
func dealias(dataType string) string {
return dataType
}
func sizeModifier(length uint64, decimal uint64) string {
if length > 0 {
size := strconv.FormatUint(length, 10)
if decimal > 0 {
return "(" + size + "," + strconv.FormatUint(decimal, 10) + ")"
}
return "(" + size + ")"
}
return ""
} | random_line_split | |
mysql_connector.go | package mysql
import (
"bytes"
"context"
"database/sql/driver"
"fmt"
"io"
"reflect"
"strconv"
"strings"
typex "github.com/go-courier/x/types"
"github.com/go-courier/sqlx/v2"
"github.com/go-courier/sqlx/v2/builder"
"github.com/go-courier/sqlx/v2/migration"
"github.com/go-sql-driver/mysql"
)
var _ interface {
driver.Connector
builder.Dialect
} = (*MysqlConnector)(nil)
type MysqlConnector struct {
Host string
DBName string
Extra string
Engine string
Charset string
}
func dsn(host string, dbName string, extra string) string {
if extra != "" {
extra = "?" + extra
}
return host + "/" + dbName + extra
}
func (c MysqlConnector) WithDBName(dbName string) driver.Connector {
c.DBName = dbName
return &c
}
func (c *MysqlConnector) Migrate(ctx context.Context, db sqlx.DBExecutor) error {
output := migration.MigrationOutputFromContext(ctx)
// mysql without schema
d := db.D().WithSchema("")
dialect := db.Dialect()
prevDB, err := dbFromInformationSchema(db)
if err != nil {
return err
}
exec := func(expr builder.SqlExpr) error {
if expr == nil || expr.IsNil() {
return nil
}
if output != nil {
_, _ = io.WriteString(output, builder.ResolveExpr(expr).Query())
_, _ = io.WriteString(output, "\n")
return nil
}
_, err := db.ExecExpr(expr)
return err
}
if prevDB == nil {
prevDB = &sqlx.Database{
Name: d.Name,
}
if err := exec(dialect.CreateDatabase(d.Name)); err != nil {
return err
}
}
for _, name := range d.Tables.TableNames() {
table := d.Tables.Table(name)
prevTable := prevDB.Table(name)
if prevTable == nil {
for _, expr := range dialect.CreateTableIsNotExists(table) {
if err := exec(expr); err != nil {
return err
}
}
continue
}
exprList := table.Diff(prevTable, dialect)
for _, expr := range exprList {
if err := exec(expr); err != nil {
return err
}
}
}
return nil
}
func (c *MysqlConnector) Connect(ctx context.Context) (driver.Conn, error) {
d := c.Driver()
conn, err := d.Open(dsn(c.Host, c.DBName, c.Extra))
if err != nil {
if c.IsErrorUnknownDatabase(err) {
conn, err := d.Open(dsn(c.Host, "", c.Extra))
if err != nil {
return nil, err
}
if _, err := conn.(driver.ExecerContext).ExecContext(context.Background(), builder.ResolveExpr(c.CreateDatabase(c.DBName)).Query(), nil); err != nil {
return nil, err
}
if err := conn.Close(); err != nil {
return nil, err
}
return c.Connect(ctx)
}
return nil, err
}
return conn, nil
}
func (c MysqlConnector) | () driver.Driver {
return (&MySqlLoggingDriver{}).Driver()
}
func (MysqlConnector) DriverName() string {
return "mysql"
}
func (MysqlConnector) PrimaryKeyName() string {
return "primary"
}
func (c MysqlConnector) IsErrorUnknownDatabase(err error) bool {
if mysqlErr, ok := sqlx.UnwrapAll(err).(*mysql.MySQLError); ok && mysqlErr.Number == 1049 {
return true
}
return false
}
func (c MysqlConnector) IsErrorConflict(err error) bool {
if mysqlErr, ok := sqlx.UnwrapAll(err).(*mysql.MySQLError); ok && mysqlErr.Number == 1062 {
return true
}
return false
}
func quoteString(name string) string {
if len(name) < 2 ||
(name[0] == '`' && name[len(name)-1] == '`') {
return name
}
return "`" + name + "`"
}
func (c *MysqlConnector) CreateDatabase(dbName string) builder.SqlExpr {
e := builder.Expr("CREATE DATABASE ")
e.WriteQuery(quoteString(dbName))
e.WriteEnd()
return e
}
func (c *MysqlConnector) CreateSchema(schema string) builder.SqlExpr {
e := builder.Expr("CREATE SCHEMA ")
e.WriteQuery(schema)
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropDatabase(dbName string) builder.SqlExpr {
e := builder.Expr("DROP DATABASE ")
e.WriteQuery(quoteString(dbName))
e.WriteEnd()
return e
}
func (c *MysqlConnector) AddIndex(key *builder.Key) builder.SqlExpr {
if key.IsPrimary() {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(key.Table)
e.WriteQuery(" ADD PRIMARY KEY ")
e.WriteExpr(key.Def.TableExpr(key.Table))
e.WriteEnd()
return e
}
e := builder.Expr("CREATE ")
if key.Method == "SPATIAL" {
e.WriteQuery("SPATIAL ")
} else if key.IsUnique {
e.WriteQuery("UNIQUE ")
}
e.WriteQuery("INDEX ")
e.WriteQuery(key.Name)
if key.Method == "BTREE" || key.Method == "HASH" {
e.WriteQuery(" USING ")
e.WriteQuery(key.Method)
}
e.WriteQuery(" ON ")
e.WriteExpr(key.Table)
e.WriteQueryByte(' ')
e.WriteExpr(key.Def.TableExpr(key.Table))
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropIndex(key *builder.Key) builder.SqlExpr {
if key.IsPrimary() {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(key.Table)
e.WriteQuery(" DROP PRIMARY KEY")
e.WriteEnd()
return e
}
e := builder.Expr("DROP ")
e.WriteQuery("INDEX ")
e.WriteQuery(key.Name)
e.WriteQuery(" ON ")
e.WriteExpr(key.Table)
e.WriteEnd()
return e
}
func (c *MysqlConnector) CreateTableIsNotExists(table *builder.Table) (exprs []builder.SqlExpr) {
expr := builder.Expr("CREATE TABLE IF NOT EXISTS ")
expr.WriteExpr(table)
expr.WriteQueryByte(' ')
expr.WriteGroup(func(e *builder.Ex) {
if table.Columns.IsNil() {
return
}
table.Columns.Range(func(col *builder.Column, idx int) {
if col.DeprecatedActions != nil {
return
}
if idx > 0 {
e.WriteQueryByte(',')
}
e.WriteQueryByte('\n')
e.WriteQueryByte('\t')
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
})
table.Keys.Range(func(key *builder.Key, idx int) {
if key.IsPrimary() {
e.WriteQueryByte(',')
e.WriteQueryByte('\n')
e.WriteQueryByte('\t')
e.WriteQuery("PRIMARY KEY ")
e.WriteExpr(key.Def.TableExpr(key.Table))
}
})
expr.WriteQueryByte('\n')
})
expr.WriteQuery(" ENGINE=")
if c.Engine == "" {
expr.WriteQuery("InnoDB")
} else {
expr.WriteQuery(c.Engine)
}
expr.WriteQuery(" CHARSET=")
if c.Charset == "" {
expr.WriteQuery("utf8mb4")
} else {
expr.WriteQuery(c.Charset)
}
expr.WriteEnd()
exprs = append(exprs, expr)
table.Keys.Range(func(key *builder.Key, idx int) {
if !key.IsPrimary() {
exprs = append(exprs, c.AddIndex(key))
}
})
return
}
func (c *MysqlConnector) DropTable(t *builder.Table) builder.SqlExpr {
e := builder.Expr("DROP TABLE IF EXISTS ")
e.WriteQuery(t.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) TruncateTable(t *builder.Table) builder.SqlExpr {
e := builder.Expr("TRUNCATE TABLE ")
e.WriteQuery(t.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) AddColumn(col *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" ADD COLUMN ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
e.WriteEnd()
return e
}
func (c *MysqlConnector) RenameColumn(col *builder.Column, target *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" CHANGE ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(target)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(target.ColumnType))
e.WriteEnd()
return e
}
func (c *MysqlConnector) ModifyColumn(col *builder.Column, prev *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" MODIFY COLUMN ")
e.WriteExpr(col)
e.WriteQueryByte(' ')
e.WriteExpr(c.DataType(col.ColumnType))
e.WriteQuery(" /* FROM")
e.WriteExpr(c.DataType(prev.ColumnType))
e.WriteQuery(" */")
e.WriteEnd()
return e
}
func (c *MysqlConnector) DropColumn(col *builder.Column) builder.SqlExpr {
e := builder.Expr("ALTER TABLE ")
e.WriteExpr(col.Table)
e.WriteQuery(" DROP COLUMN ")
e.WriteQuery(col.Name)
e.WriteEnd()
return e
}
func (c *MysqlConnector) DataType(columnType *builder.ColumnType) builder.SqlExpr {
dbDataType := dealias(c.dbDataType(columnType.Type, columnType))
return builder.Expr(dbDataType + autocompleteSize(dbDataType, columnType) + c.dataTypeModify(columnType))
}
func (c *MysqlConnector) dataType(typ typex.Type, columnType *builder.ColumnType) string {
dbDataType := dealias(c.dbDataType(typ, columnType))
return dbDataType + autocompleteSize(dbDataType, columnType)
}
func (c *MysqlConnector) dbDataType(typ typex.Type, columnType *builder.ColumnType) string {
if columnType.DataType != "" {
return columnType.DataType
}
if rv, ok := typex.TryNew(typ); ok {
if dtd, ok := rv.Interface().(builder.DataTypeDescriber); ok {
return dtd.DataType(c.DriverName())
}
}
switch typ.Kind() {
case reflect.Ptr:
return c.dataType(typ.Elem(), columnType)
case reflect.Bool:
return "boolean"
case reflect.Int8:
return "tinyint"
case reflect.Uint8:
return "tinyint unsigned"
case reflect.Int16:
return "smallint"
case reflect.Uint16:
return "smallint unsigned"
case reflect.Int, reflect.Int32:
return "int"
case reflect.Uint, reflect.Uint32:
return "int unsigned"
case reflect.Int64:
return "bigint"
case reflect.Uint64:
return "bigint unsigned"
case reflect.Float32:
return "float"
case reflect.Float64:
return "double"
case reflect.String:
size := columnType.Length
if size < 65535/3 {
return "varchar"
}
return "text"
case reflect.Slice:
if typ.Elem().Kind() == reflect.Uint8 {
return "mediumblob"
}
}
switch typ.Name() {
case "NullInt64":
return "bigint"
case "NullFloat64":
return "double"
case "NullBool":
return "tinyint"
case "Time":
return "datetime"
}
panic(fmt.Errorf("unsupport type %s", typ))
}
func (c *MysqlConnector) dataTypeModify(columnType *builder.ColumnType) string {
buf := bytes.NewBuffer(nil)
if !columnType.Null {
buf.WriteString(" NOT NULL")
}
if columnType.AutoIncrement {
buf.WriteString(" AUTO_INCREMENT")
}
if columnType.Default != nil {
buf.WriteString(" DEFAULT ")
buf.WriteString(*columnType.Default)
}
if columnType.OnUpdate != nil {
buf.WriteString(" ON UPDATE ")
buf.WriteString(*columnType.OnUpdate)
}
return buf.String()
}
func autocompleteSize(dataType string, columnType *builder.ColumnType) string {
switch strings.ToLower(dataType) {
case "varchar":
size := columnType.Length
if size == 0 {
size = 255
}
return sizeModifier(size, columnType.Decimal)
case "float", "double", "decimal":
if columnType.Length > 0 {
return sizeModifier(columnType.Length, columnType.Decimal)
}
}
return ""
}
func dealias(dataType string) string {
return dataType
}
func sizeModifier(length uint64, decimal uint64) string {
if length > 0 {
size := strconv.FormatUint(length, 10)
if decimal > 0 {
return "(" + size + "," + strconv.FormatUint(decimal, 10) + ")"
}
return "(" + size + ")"
}
return ""
}
| Driver | identifier_name |
model_evaluation.py | '''
Helper functions for evaluating Classification Models
'''
# Imports
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Models
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
# Model support
from matplotlib.colors import ListedColormap
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (accuracy_score, classification_report, confusion_matrix, f1_score,
plot_confusion_matrix, precision_score, recall_score, roc_auc_score, roc_curve, auc,
precision_recall_curve)
def | (model_list, X, y, kf):
'''
Takes a list of models (at same scale) and performs KFolds cross-validation on each
Inputs: * list of models to be evaluated
* X, y training data
* KFolds parameters
Returns: * Scoring metrics for each CV Round
Metrics: Accuracy, Precision, Recall, F1, ROC AUC
'''
for model in model_list:
# Accuracy scores lists
acc_scores, prec_scores, recall_scores, f1_scores, roc_auc_scores = [], [], [], [], []
X_kf, y_kf = np.array(X), np.array(y)
for train_ind, val_ind in kf.split(X, y):
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
pred = model.predict(X_val)
# Score model and append to list
acc_scores.append(accuracy_score(y_val, pred))
prec_scores.append(precision_score(y_val, pred))
recall_scores.append(recall_score(y_val, pred))
f1_scores.append(f1_score(y_val, pred))
roc_auc_scores.append(roc_auc_score(y_val, model.predict_proba(X_val)[:,1]))
print(f'Model: {model}')
print("-"*30)
print(f'Accuracy: {np.mean(acc_scores):.5f} +- {np.std(acc_scores):5f}')
print(f'Precision: {np.mean(prec_scores):.5f} +- {np.std(prec_scores):5f}')
print(f'Recall: {np.mean(recall_scores):.5f} +- {np.std(recall_scores):5f}')
print(f'F1 Score: {np.mean(f1_scores):.5f} +- {np.std(f1_scores):5f}')
print(f'ROC AUC: {np.mean(roc_auc_scores):.5f} +- {np.std(roc_auc_scores):5f}')
print("")
def roc_curve_cv(model, X, y, kf, model_alias):
'''
Plots ROC Curve with AUC score for each fold in KFold cross-validation
for a provided model.
Inputs: * Classification Model
* X, y training data
* KFold parameters
* Model Alias (for plot)
'''
# sets up the figure
plt.figure(figsize=(6, 6), dpi=100)
# sets up the X, y for KFolds
X_kf, y_kf = np.array(X), np.array(y)
# to return mean and std of CV AUC's
auc_score_list = []
# to track the CV rounds
round = 1
for train_ind, val_ind in kf.split(X, y):
# Data split
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
proba = model.predict_proba(X_val)[:,1]
# ROC curve calculations and plotting
fpr, tpr, _ = roc_curve(y_val, proba)
auc_score = roc_auc_score(y_val, proba)
auc_score_list.append(auc_score)
plt.plot(fpr, tpr, lw=2, alpha=0.25, label='Fold %d (AUC = %0.4f)' % (round, auc_score))
round += 1
# Final output
print(f'Average AUC Score: {np.mean(auc_score_list):.4f} +- {np.std(auc_score_list):4f}')
# Plot formatting
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='grey', label='Chance Line', alpha=.8)
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('False Positive Rate',fontsize=10)
plt.ylabel('True Positive Rate',fontsize=10)
plt.title(f'Cross-Validation ROC of {model_alias}',fontsize=11)
plt.legend(loc="lower right", prop={'size': 9}, frameon=False)
sns.despine()
plt.show()
def precision_recall_cv(model, X, y, kf, model_alias):
'''
Plots Precision-Recall Curves for each fold in KFold cross-validation
for a provided model.
Inputs: * Classification Model
* X, y training data
* KFold parameters
* Model Alias (for plot)
'''
# sets up the figure
plt.figure(figsize=(6, 6), dpi=100)
# sets up the X, y for KFolds
X_kf, y_kf = np.array(X), np.array(y)
# to return mean and std of CV AUC's
prec_scores, recall_scores = [], []
# to track the CV rounds
round = 1
for train_ind, val_ind in kf.split(X, y):
# Data split
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
pred = model.predict(X_val)
proba = model.predict_proba(X_val)[:,1]
# Precicion/Recall curve calculations and plotting
model_precision, model_recall, _ = precision_recall_curve(y_val, proba)
prec_score = precision_score(y_val, pred)
rec_score = recall_score(y_val, pred)
prec_scores.append(prec_score)
recall_scores.append(rec_score)
plt.plot(model_recall, model_precision, marker=',', alpha=0.2,
label=f'Fold {round}: Precision: {prec_score:.2f} / Recall: {rec_score:.2f}')
round += 1
# Final output
print(f'Average Precision Score: {np.mean(prec_scores):.4f} +- {np.std(prec_scores):4f}')
print(f'Average Recall Score: {np.mean(recall_scores):.4f} +- {np.std(recall_scores):4f}')
# Plot formatting
no_skill = len(y_val[y_val==1]) / len(y_val)
plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('Recall',fontsize=10)
plt.ylabel('Precision',fontsize=10)
plt.title(f'Cross-Validated Precision-Recall Curves: {model_alias}',fontsize=11)
plt.legend(loc="best", prop={'size': 9}, frameon=False)
sns.despine()
plt.show()
def metrics_report(predicted_values, actual_values):
conf_matrix = confusion_matrix(predicted_values, actual_values)
print("Classification Metrics Report")
print("-----------------------------")
print('Accuracy: {:.4f}'.format(accuracy_score(actual_values, predicted_values)))
print('Precision: {:.4f}'.format(precision_score(actual_values, predicted_values)))
print('Recall: {:.4f}'.format(recall_score(actual_values, predicted_values)))
print('F1 Score: {:.4f}'.format(f1_score(actual_values, predicted_values)))
print("")
print(classification_report(actual_values, predicted_values))
print("")
plot_cm(conf_matrix, normalize=False, target_names=['human', 'bot'], title='Confusion Matrix')
def plot_cm(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
https://www.kaggle.com/grfiv4/plot-a-confusion-matrix
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(6, 6), dpi=100)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=10)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label', fontsize=10)
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass), fontsize=10)
plt.show();
def plot_feature_importance(model, features, model_alias):
importance = model.feature_importances_
feature_importance = list(zip(features, importance))
feature_importance.sort(key = lambda x: x[1])
# split sorted features_importance into x,y
feat = [f[0] for f in feature_importance]
imp = [i[1] for i in feature_importance]
# Plot feature importance
plt.figure(figsize=(7, 5), dpi=100)
plt.title(f'Feature Importance: {model_alias}', fontsize=11)
plt.barh(feat, imp, color='#3298dc')
plt.xlabel('Feature Score', fontsize=9)
sns.despine()
plt.show();
| multi_model_eval | identifier_name |
model_evaluation.py | '''
Helper functions for evaluating Classification Models
'''
# Imports
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Models
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
# Model support
from matplotlib.colors import ListedColormap
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (accuracy_score, classification_report, confusion_matrix, f1_score,
plot_confusion_matrix, precision_score, recall_score, roc_auc_score, roc_curve, auc,
precision_recall_curve)
def multi_model_eval(model_list, X, y, kf):
'''
Takes a list of models (at same scale) and performs KFolds cross-validation on each
Inputs: * list of models to be evaluated
* X, y training data
* KFolds parameters
Returns: * Scoring metrics for each CV Round
Metrics: Accuracy, Precision, Recall, F1, ROC AUC
'''
for model in model_list:
# Accuracy scores lists
acc_scores, prec_scores, recall_scores, f1_scores, roc_auc_scores = [], [], [], [], []
X_kf, y_kf = np.array(X), np.array(y)
for train_ind, val_ind in kf.split(X, y):
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
pred = model.predict(X_val)
# Score model and append to list
acc_scores.append(accuracy_score(y_val, pred))
prec_scores.append(precision_score(y_val, pred))
recall_scores.append(recall_score(y_val, pred))
f1_scores.append(f1_score(y_val, pred))
roc_auc_scores.append(roc_auc_score(y_val, model.predict_proba(X_val)[:,1]))
print(f'Model: {model}')
print("-"*30)
print(f'Accuracy: {np.mean(acc_scores):.5f} +- {np.std(acc_scores):5f}')
print(f'Precision: {np.mean(prec_scores):.5f} +- {np.std(prec_scores):5f}')
print(f'Recall: {np.mean(recall_scores):.5f} +- {np.std(recall_scores):5f}')
print(f'F1 Score: {np.mean(f1_scores):.5f} +- {np.std(f1_scores):5f}')
print(f'ROC AUC: {np.mean(roc_auc_scores):.5f} +- {np.std(roc_auc_scores):5f}')
print("")
def roc_curve_cv(model, X, y, kf, model_alias):
''' | Inputs: * Classification Model
* X, y training data
* KFold parameters
* Model Alias (for plot)
'''
# sets up the figure
plt.figure(figsize=(6, 6), dpi=100)
# sets up the X, y for KFolds
X_kf, y_kf = np.array(X), np.array(y)
# to return mean and std of CV AUC's
auc_score_list = []
# to track the CV rounds
round = 1
for train_ind, val_ind in kf.split(X, y):
# Data split
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
proba = model.predict_proba(X_val)[:,1]
# ROC curve calculations and plotting
fpr, tpr, _ = roc_curve(y_val, proba)
auc_score = roc_auc_score(y_val, proba)
auc_score_list.append(auc_score)
plt.plot(fpr, tpr, lw=2, alpha=0.25, label='Fold %d (AUC = %0.4f)' % (round, auc_score))
round += 1
# Final output
print(f'Average AUC Score: {np.mean(auc_score_list):.4f} +- {np.std(auc_score_list):4f}')
# Plot formatting
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='grey', label='Chance Line', alpha=.8)
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('False Positive Rate',fontsize=10)
plt.ylabel('True Positive Rate',fontsize=10)
plt.title(f'Cross-Validation ROC of {model_alias}',fontsize=11)
plt.legend(loc="lower right", prop={'size': 9}, frameon=False)
sns.despine()
plt.show()
def precision_recall_cv(model, X, y, kf, model_alias):
'''
Plots Precision-Recall Curves for each fold in KFold cross-validation
for a provided model.
Inputs: * Classification Model
* X, y training data
* KFold parameters
* Model Alias (for plot)
'''
# sets up the figure
plt.figure(figsize=(6, 6), dpi=100)
# sets up the X, y for KFolds
X_kf, y_kf = np.array(X), np.array(y)
# to return mean and std of CV AUC's
prec_scores, recall_scores = [], []
# to track the CV rounds
round = 1
for train_ind, val_ind in kf.split(X, y):
# Data split
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
pred = model.predict(X_val)
proba = model.predict_proba(X_val)[:,1]
# Precicion/Recall curve calculations and plotting
model_precision, model_recall, _ = precision_recall_curve(y_val, proba)
prec_score = precision_score(y_val, pred)
rec_score = recall_score(y_val, pred)
prec_scores.append(prec_score)
recall_scores.append(rec_score)
plt.plot(model_recall, model_precision, marker=',', alpha=0.2,
label=f'Fold {round}: Precision: {prec_score:.2f} / Recall: {rec_score:.2f}')
round += 1
# Final output
print(f'Average Precision Score: {np.mean(prec_scores):.4f} +- {np.std(prec_scores):4f}')
print(f'Average Recall Score: {np.mean(recall_scores):.4f} +- {np.std(recall_scores):4f}')
# Plot formatting
no_skill = len(y_val[y_val==1]) / len(y_val)
plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('Recall',fontsize=10)
plt.ylabel('Precision',fontsize=10)
plt.title(f'Cross-Validated Precision-Recall Curves: {model_alias}',fontsize=11)
plt.legend(loc="best", prop={'size': 9}, frameon=False)
sns.despine()
plt.show()
def metrics_report(predicted_values, actual_values):
conf_matrix = confusion_matrix(predicted_values, actual_values)
print("Classification Metrics Report")
print("-----------------------------")
print('Accuracy: {:.4f}'.format(accuracy_score(actual_values, predicted_values)))
print('Precision: {:.4f}'.format(precision_score(actual_values, predicted_values)))
print('Recall: {:.4f}'.format(recall_score(actual_values, predicted_values)))
print('F1 Score: {:.4f}'.format(f1_score(actual_values, predicted_values)))
print("")
print(classification_report(actual_values, predicted_values))
print("")
plot_cm(conf_matrix, normalize=False, target_names=['human', 'bot'], title='Confusion Matrix')
def plot_cm(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
https://www.kaggle.com/grfiv4/plot-a-confusion-matrix
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(6, 6), dpi=100)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=10)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label', fontsize=10)
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass), fontsize=10)
plt.show();
def plot_feature_importance(model, features, model_alias):
importance = model.feature_importances_
feature_importance = list(zip(features, importance))
feature_importance.sort(key = lambda x: x[1])
# split sorted features_importance into x,y
feat = [f[0] for f in feature_importance]
imp = [i[1] for i in feature_importance]
# Plot feature importance
plt.figure(figsize=(7, 5), dpi=100)
plt.title(f'Feature Importance: {model_alias}', fontsize=11)
plt.barh(feat, imp, color='#3298dc')
plt.xlabel('Feature Score', fontsize=9)
sns.despine()
plt.show(); | Plots ROC Curve with AUC score for each fold in KFold cross-validation
for a provided model. | random_line_split |
model_evaluation.py | '''
Helper functions for evaluating Classification Models
'''
# Imports
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Models
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
# Model support
from matplotlib.colors import ListedColormap
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (accuracy_score, classification_report, confusion_matrix, f1_score,
plot_confusion_matrix, precision_score, recall_score, roc_auc_score, roc_curve, auc,
precision_recall_curve)
def multi_model_eval(model_list, X, y, kf):
'''
Takes a list of models (at same scale) and performs KFolds cross-validation on each
Inputs: * list of models to be evaluated
* X, y training data
* KFolds parameters
Returns: * Scoring metrics for each CV Round
Metrics: Accuracy, Precision, Recall, F1, ROC AUC
'''
for model in model_list:
# Accuracy scores lists
acc_scores, prec_scores, recall_scores, f1_scores, roc_auc_scores = [], [], [], [], []
X_kf, y_kf = np.array(X), np.array(y)
for train_ind, val_ind in kf.split(X, y):
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
pred = model.predict(X_val)
# Score model and append to list
acc_scores.append(accuracy_score(y_val, pred))
prec_scores.append(precision_score(y_val, pred))
recall_scores.append(recall_score(y_val, pred))
f1_scores.append(f1_score(y_val, pred))
roc_auc_scores.append(roc_auc_score(y_val, model.predict_proba(X_val)[:,1]))
print(f'Model: {model}')
print("-"*30)
print(f'Accuracy: {np.mean(acc_scores):.5f} +- {np.std(acc_scores):5f}')
print(f'Precision: {np.mean(prec_scores):.5f} +- {np.std(prec_scores):5f}')
print(f'Recall: {np.mean(recall_scores):.5f} +- {np.std(recall_scores):5f}')
print(f'F1 Score: {np.mean(f1_scores):.5f} +- {np.std(f1_scores):5f}')
print(f'ROC AUC: {np.mean(roc_auc_scores):.5f} +- {np.std(roc_auc_scores):5f}')
print("")
def roc_curve_cv(model, X, y, kf, model_alias):
'''
Plots ROC Curve with AUC score for each fold in KFold cross-validation
for a provided model.
Inputs: * Classification Model
* X, y training data
* KFold parameters
* Model Alias (for plot)
'''
# sets up the figure
plt.figure(figsize=(6, 6), dpi=100)
# sets up the X, y for KFolds
X_kf, y_kf = np.array(X), np.array(y)
# to return mean and std of CV AUC's
auc_score_list = []
# to track the CV rounds
round = 1
for train_ind, val_ind in kf.split(X, y):
# Data split
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
proba = model.predict_proba(X_val)[:,1]
# ROC curve calculations and plotting
fpr, tpr, _ = roc_curve(y_val, proba)
auc_score = roc_auc_score(y_val, proba)
auc_score_list.append(auc_score)
plt.plot(fpr, tpr, lw=2, alpha=0.25, label='Fold %d (AUC = %0.4f)' % (round, auc_score))
round += 1
# Final output
print(f'Average AUC Score: {np.mean(auc_score_list):.4f} +- {np.std(auc_score_list):4f}')
# Plot formatting
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='grey', label='Chance Line', alpha=.8)
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('False Positive Rate',fontsize=10)
plt.ylabel('True Positive Rate',fontsize=10)
plt.title(f'Cross-Validation ROC of {model_alias}',fontsize=11)
plt.legend(loc="lower right", prop={'size': 9}, frameon=False)
sns.despine()
plt.show()
def precision_recall_cv(model, X, y, kf, model_alias):
|
def metrics_report(predicted_values, actual_values):
conf_matrix = confusion_matrix(predicted_values, actual_values)
print("Classification Metrics Report")
print("-----------------------------")
print('Accuracy: {:.4f}'.format(accuracy_score(actual_values, predicted_values)))
print('Precision: {:.4f}'.format(precision_score(actual_values, predicted_values)))
print('Recall: {:.4f}'.format(recall_score(actual_values, predicted_values)))
print('F1 Score: {:.4f}'.format(f1_score(actual_values, predicted_values)))
print("")
print(classification_report(actual_values, predicted_values))
print("")
plot_cm(conf_matrix, normalize=False, target_names=['human', 'bot'], title='Confusion Matrix')
def plot_cm(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
https://www.kaggle.com/grfiv4/plot-a-confusion-matrix
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(6, 6), dpi=100)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=10)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label', fontsize=10)
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass), fontsize=10)
plt.show();
def plot_feature_importance(model, features, model_alias):
importance = model.feature_importances_
feature_importance = list(zip(features, importance))
feature_importance.sort(key = lambda x: x[1])
# split sorted features_importance into x,y
feat = [f[0] for f in feature_importance]
imp = [i[1] for i in feature_importance]
# Plot feature importance
plt.figure(figsize=(7, 5), dpi=100)
plt.title(f'Feature Importance: {model_alias}', fontsize=11)
plt.barh(feat, imp, color='#3298dc')
plt.xlabel('Feature Score', fontsize=9)
sns.despine()
plt.show();
| '''
Plots Precision-Recall Curves for each fold in KFold cross-validation
for a provided model.
Inputs: * Classification Model
* X, y training data
* KFold parameters
* Model Alias (for plot)
'''
# sets up the figure
plt.figure(figsize=(6, 6), dpi=100)
# sets up the X, y for KFolds
X_kf, y_kf = np.array(X), np.array(y)
# to return mean and std of CV AUC's
prec_scores, recall_scores = [], []
# to track the CV rounds
round = 1
for train_ind, val_ind in kf.split(X, y):
# Data split
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
pred = model.predict(X_val)
proba = model.predict_proba(X_val)[:,1]
# Precicion/Recall curve calculations and plotting
model_precision, model_recall, _ = precision_recall_curve(y_val, proba)
prec_score = precision_score(y_val, pred)
rec_score = recall_score(y_val, pred)
prec_scores.append(prec_score)
recall_scores.append(rec_score)
plt.plot(model_recall, model_precision, marker=',', alpha=0.2,
label=f'Fold {round}: Precision: {prec_score:.2f} / Recall: {rec_score:.2f}')
round += 1
# Final output
print(f'Average Precision Score: {np.mean(prec_scores):.4f} +- {np.std(prec_scores):4f}')
print(f'Average Recall Score: {np.mean(recall_scores):.4f} +- {np.std(recall_scores):4f}')
# Plot formatting
no_skill = len(y_val[y_val==1]) / len(y_val)
plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('Recall',fontsize=10)
plt.ylabel('Precision',fontsize=10)
plt.title(f'Cross-Validated Precision-Recall Curves: {model_alias}',fontsize=11)
plt.legend(loc="best", prop={'size': 9}, frameon=False)
sns.despine()
plt.show() | identifier_body |
model_evaluation.py | '''
Helper functions for evaluating Classification Models
'''
# Imports
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Models
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
# Model support
from matplotlib.colors import ListedColormap
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (accuracy_score, classification_report, confusion_matrix, f1_score,
plot_confusion_matrix, precision_score, recall_score, roc_auc_score, roc_curve, auc,
precision_recall_curve)
def multi_model_eval(model_list, X, y, kf):
'''
Takes a list of models (at same scale) and performs KFolds cross-validation on each
Inputs: * list of models to be evaluated
* X, y training data
* KFolds parameters
Returns: * Scoring metrics for each CV Round
Metrics: Accuracy, Precision, Recall, F1, ROC AUC
'''
for model in model_list:
# Accuracy scores lists
acc_scores, prec_scores, recall_scores, f1_scores, roc_auc_scores = [], [], [], [], []
X_kf, y_kf = np.array(X), np.array(y)
for train_ind, val_ind in kf.split(X, y):
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
pred = model.predict(X_val)
# Score model and append to list
acc_scores.append(accuracy_score(y_val, pred))
prec_scores.append(precision_score(y_val, pred))
recall_scores.append(recall_score(y_val, pred))
f1_scores.append(f1_score(y_val, pred))
roc_auc_scores.append(roc_auc_score(y_val, model.predict_proba(X_val)[:,1]))
print(f'Model: {model}')
print("-"*30)
print(f'Accuracy: {np.mean(acc_scores):.5f} +- {np.std(acc_scores):5f}')
print(f'Precision: {np.mean(prec_scores):.5f} +- {np.std(prec_scores):5f}')
print(f'Recall: {np.mean(recall_scores):.5f} +- {np.std(recall_scores):5f}')
print(f'F1 Score: {np.mean(f1_scores):.5f} +- {np.std(f1_scores):5f}')
print(f'ROC AUC: {np.mean(roc_auc_scores):.5f} +- {np.std(roc_auc_scores):5f}')
print("")
def roc_curve_cv(model, X, y, kf, model_alias):
'''
Plots ROC Curve with AUC score for each fold in KFold cross-validation
for a provided model.
Inputs: * Classification Model
* X, y training data
* KFold parameters
* Model Alias (for plot)
'''
# sets up the figure
plt.figure(figsize=(6, 6), dpi=100)
# sets up the X, y for KFolds
X_kf, y_kf = np.array(X), np.array(y)
# to return mean and std of CV AUC's
auc_score_list = []
# to track the CV rounds
round = 1
for train_ind, val_ind in kf.split(X, y):
# Data split
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
proba = model.predict_proba(X_val)[:,1]
# ROC curve calculations and plotting
fpr, tpr, _ = roc_curve(y_val, proba)
auc_score = roc_auc_score(y_val, proba)
auc_score_list.append(auc_score)
plt.plot(fpr, tpr, lw=2, alpha=0.25, label='Fold %d (AUC = %0.4f)' % (round, auc_score))
round += 1
# Final output
print(f'Average AUC Score: {np.mean(auc_score_list):.4f} +- {np.std(auc_score_list):4f}')
# Plot formatting
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='grey', label='Chance Line', alpha=.8)
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('False Positive Rate',fontsize=10)
plt.ylabel('True Positive Rate',fontsize=10)
plt.title(f'Cross-Validation ROC of {model_alias}',fontsize=11)
plt.legend(loc="lower right", prop={'size': 9}, frameon=False)
sns.despine()
plt.show()
def precision_recall_cv(model, X, y, kf, model_alias):
'''
Plots Precision-Recall Curves for each fold in KFold cross-validation
for a provided model.
Inputs: * Classification Model
* X, y training data
* KFold parameters
* Model Alias (for plot)
'''
# sets up the figure
plt.figure(figsize=(6, 6), dpi=100)
# sets up the X, y for KFolds
X_kf, y_kf = np.array(X), np.array(y)
# to return mean and std of CV AUC's
prec_scores, recall_scores = [], []
# to track the CV rounds
round = 1
for train_ind, val_ind in kf.split(X, y):
# Data split
X_train, y_train = X_kf[train_ind], y_kf[train_ind]
X_val, y_val = X_kf[val_ind], y_kf[val_ind]
# Fit model and make predictions
model.fit(X_train, y_train)
pred = model.predict(X_val)
proba = model.predict_proba(X_val)[:,1]
# Precicion/Recall curve calculations and plotting
model_precision, model_recall, _ = precision_recall_curve(y_val, proba)
prec_score = precision_score(y_val, pred)
rec_score = recall_score(y_val, pred)
prec_scores.append(prec_score)
recall_scores.append(rec_score)
plt.plot(model_recall, model_precision, marker=',', alpha=0.2,
label=f'Fold {round}: Precision: {prec_score:.2f} / Recall: {rec_score:.2f}')
round += 1
# Final output
print(f'Average Precision Score: {np.mean(prec_scores):.4f} +- {np.std(prec_scores):4f}')
print(f'Average Recall Score: {np.mean(recall_scores):.4f} +- {np.std(recall_scores):4f}')
# Plot formatting
no_skill = len(y_val[y_val==1]) / len(y_val)
plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel('Recall',fontsize=10)
plt.ylabel('Precision',fontsize=10)
plt.title(f'Cross-Validated Precision-Recall Curves: {model_alias}',fontsize=11)
plt.legend(loc="best", prop={'size': 9}, frameon=False)
sns.despine()
plt.show()
def metrics_report(predicted_values, actual_values):
conf_matrix = confusion_matrix(predicted_values, actual_values)
print("Classification Metrics Report")
print("-----------------------------")
print('Accuracy: {:.4f}'.format(accuracy_score(actual_values, predicted_values)))
print('Precision: {:.4f}'.format(precision_score(actual_values, predicted_values)))
print('Recall: {:.4f}'.format(recall_score(actual_values, predicted_values)))
print('F1 Score: {:.4f}'.format(f1_score(actual_values, predicted_values)))
print("")
print(classification_report(actual_values, predicted_values))
print("")
plot_cm(conf_matrix, normalize=False, target_names=['human', 'bot'], title='Confusion Matrix')
def plot_cm(cm,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
https://www.kaggle.com/grfiv4/plot-a-confusion-matrix
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(6, 6), dpi=100)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=10)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
|
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label', fontsize=10)
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass), fontsize=10)
plt.show();
def plot_feature_importance(model, features, model_alias):
importance = model.feature_importances_
feature_importance = list(zip(features, importance))
feature_importance.sort(key = lambda x: x[1])
# split sorted features_importance into x,y
feat = [f[0] for f in feature_importance]
imp = [i[1] for i in feature_importance]
# Plot feature importance
plt.figure(figsize=(7, 5), dpi=100)
plt.title(f'Feature Importance: {model_alias}', fontsize=11)
plt.barh(feat, imp, color='#3298dc')
plt.xlabel('Feature Score', fontsize=9)
sns.despine()
plt.show();
| plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black") | conditional_block |
zocket.py | import select
import sys
MICROPYTHON = sys.implementation.name == "micropython"
BCAST_HOST = "255.255.255.255"
LOCAL_HOST = "0.0.0.0"
LOG_PREFIX = "[μNetwork] "
if MICROPYTHON:
i | else:
import traceback
import socket
import json
from time import sleep
def micropython_only(fn):
def wrapper(*args, **kwargs):
if MICROPYTHON:
return fn(*args, **kwargs)
return wrapper
class _Peer:
def __init__(
self,
port,
*,
ssid: str = None,
passwd: str = None,
enable_ap: bool = False,
namespace: str = "default",
retry_for: tuple = (),
retry_delay: float = 5,
chunksize: int = 1024,
):
"""
:param port:
The port to use for peer discovery.
:param ssid:
(Optional) SSID of a WIFI connection.
:param passwd:
(Optional) Password for WIFI connection.
:param enable_ap:
(Optional) Enable ESP's own Access Point.
:param namespace:
(Optional) The namespace to use for peer discovery.
:param retry_for:
(Optional) Retry if any of these Exceptions occur.
:param retry_delay:
(Optional) The time in seconds to wait for, before retrying.
"""
self.port = port
self.ssid = ssid
self.passwd = passwd
self.enable_ap = enable_ap
self.namespace = namespace
self.retry_for = retry_for
self.retry_delay = retry_delay
self.chunksize = chunksize
self.peer_sock, self.data_sock = None, None
self.disconnect()
self.connect()
_namespace_bytes, _namespace_size = None, None
@property
def namespace(self):
return self._namespace_bytes.decode()
@namespace.setter
def namespace(self, value):
self._namespace_bytes = value.encode("utf-8")
self._namespace_size = len(self._namespace_bytes)
def _handle_error(self, exc=None):
print(LOG_PREFIX + "\nCrash report:")
if MICROPYTHON:
sys.print_exception(exc)
else:
traceback.print_exc()
print(LOG_PREFIX + "Retrying in %d sec…\n" % self.retry_delay)
sleep(self.retry_delay)
@property
@micropython_only
def network_connected(self):
return ap_if.isconnected() or sta_if.isconnected()
@micropython_only
def _configure_network(self):
if self.enable_ap:
ap_if.active(True)
if self.ssid is not None:
sta_if.active(True)
sta_if.scan()
sta_if.disconnect()
sta_if.connect(self.ssid, self.passwd)
@micropython_only
def wait_for_network(self, *, max_tries=None, refresh_freq_hz=1):
wait_sec = 1 / refresh_freq_hz
print(LOG_PREFIX + "Waiting for network...", end="")
count = 0
while not self.network_connected:
count += 1
sleep(wait_sec)
if max_tries is not None and count > max_tries:
print()
if not self.network_connected:
raise OSError(
"Couldn't establish a connection even after %d tries."
% max_tries
)
return
else:
print("%d..." % count, end="")
@micropython_only
def _connect_network(self):
print(
LOG_PREFIX
+ "Connecting to network… (ssid: %r passwd: %r AP: %r)"
% (self.ssid, self.passwd, self.enable_ap)
)
while True:
try:
self._configure_network()
self.wait_for_network(max_tries=50)
except self.retry_for as e:
self._handle_error(e)
self._disconnect_network()
else:
print(LOG_PREFIX + "Connected to network!")
return
@micropython_only
def _disconnect_network(self):
ap_if.active(False)
sta_if.active(False)
def connect(self):
self._connect_network()
self.peer_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.peer_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not MICROPYTHON:
self.peer_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.data_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def disconnect(self):
self._disconnect_network()
if self.data_sock is not None:
self.data_sock.close()
self.data_sock = None
if self.peer_sock is not None:
self.peer_sock.close()
self.peer_sock = None
def _send(self, msg: bytes, **kwargs):
raise NotImplementedError
def _recv(self, **kwargs) -> tuple:
raise NotImplementedError
def send(self, msg_bytes: bytes, **kwargs):
if msg_bytes == b"":
raise ValueError(
"You may not send an empty datagram; it's reserved as a peer disconnect signal."
)
while True:
try:
return self._send(msg_bytes, **kwargs)
except self.retry_for as e:
self._handle_error(e)
self.connect()
def recv(self, **kwargs) -> tuple:
while True:
try:
return self._recv(**kwargs)
except self.retry_for as e:
self._handle_error(e)
self.connect()
def send_str(self, msg_str: str, **kwargs):
return self.send(msg_str.encode(), **kwargs)
def recv_str(self, **kwargs) -> tuple:
msg, address = self.recv(**kwargs)
return msg.decode(), address
def send_json(self, msg_json, **kwargs):
return self.send(json.dumps(msg_json).encode(), **kwargs)
def recv_json(self, **kwargs) -> tuple:
msg, address = self.recv(**kwargs)
return json.loads(msg), address
def __enter__(self):
return self
def __exit__(self, e, *args, **kwargs):
if e in self.retry_for:
self._handle_error(e)
self.connect()
return True
else:
self.disconnect()
def _get_local_ip() -> str:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not MICROPYTHON:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((BCAST_HOST, 0))
return sock.getsockname()[0]
class Client(_Peer):
"""
The client side of a p2p connection.
When :py:meth:`connect` is called,
the client find a :py:class:`Server` with the same :py:attr:`port` and :py:attr:`namespace`.
(:py:meth:`connect` is usually automatically called inside the constructor.)
Once connected, the client can send/recv messages as usual.
"""
def find_server(self) -> tuple:
while True:
self.peer_sock.sendto(self._namespace_bytes, (BCAST_HOST, self.port))
try:
server_addr = self.peer_sock.recv(self.chunksize).decode()
except OSError:
continue
print(LOG_PREFIX + "Found server @ " + server_addr)
host, port = server_addr.split(":")
return host, int(port)
def connect(self):
super().connect()
self.peer_sock.settimeout(1)
self.data_sock.connect(self.find_server())
def _send(self, msg: bytes):
while True:
try:
return self.data_sock.sendall(msg)
except OSError:
self.disconnect()
self.connect()
def _recv(self) -> tuple:
data, addr = self.data_sock.recvfrom(self.chunksize)
if data == b"":
raise OSError("Connection is closed.")
return data, addr
class Server(_Peer):
"""
The server side of a p2p connection.
For this class, :py:meth:`recv` and siblings are essential in facilitating the peer-discovery mechanism.
If a :py:class:`Server` instance doesn't call :py:meth:`recv` in its lifetime,
then it is hidden to other :py:class:`Client` instances on the network.
:py:meth:`recv` will respond to all peer-discovery requests until a client sends an actual piece of data,
sent using :py:meth:`send`.
The :py:attr:`clients` will keep a mapping of client hostnames, to a TCP socket connecting to them.
"""
def __init__(
self,
port,
*,
ssid: str = None,
passwd: str = None,
enable_ap: bool = False,
namespace: str = "default",
retry_for: tuple = (),
retry_delay: float = 5,
):
self.clients = None
self._local_ip = None
super().__init__(
port,
ssid=ssid,
passwd=passwd,
enable_ap=enable_ap,
namespace=namespace,
retry_for=retry_for,
retry_delay=retry_delay,
)
def connect(self):
super().connect()
self.clients = {}
self.peer_sock.bind((LOCAL_HOST, self.port))
self.data_sock.bind((LOCAL_HOST, 0))
self.data_sock.listen(1)
self._local_ip = (
_get_local_ip() + ":" + str(self.data_sock.getsockname()[1])
).encode()
print(LOG_PREFIX, "local i[ is", self._local_ip)
def _send(self, msg: bytes, *, host=None):
if host is None:
socks = self.clients.values()
else:
try:
socks = [self.clients[host]]
except KeyError:
raise ValueError(
"Client with provided host (%r) does not exist." % host
)
for sock in socks:
sock.sendall(msg)
def _recv(self) -> tuple:
while True:
client_socks = list(self.clients.values())
readable, *_ = select.select(client_socks + [self.data_sock, self.peer_sock] , [], [])
print("got sock to read:", readable)
for sock in readable:
if sock is self.data_sock:
data_sock, address = sock.accept()
self.clients[address] = data_sock
chunk = data_sock.recv(self.chunksize)
if chunk == b"":
del self.clients[address]
print(LOG_PREFIX, "Client @ %s disconnected" % address[1])
continue
return chunk, address
elif sock is self.peer_sock:
msg, address = sock.recvfrom(self.chunksize)
if msg != self._namespace_bytes:
continue
self.peer_sock.sendto(self._local_ip, address)
print(LOG_PREFIX + "Client @ %s requested discovery" % address[0])
else:
chunk = sock.recv(self.chunksize)
for address, maybe_del in self.clients.items():
if maybe_del is not sock:
continue
if chunk == b"":
del self.clients[address]
print(LOG_PREFIX, "Client @ %s disconnected" % address[1])
break
return chunk, address
continue
| mport usocket as socket
import ujson as json
import network
from utime import sleep
ap_if = network.WLAN(network.AP_IF)
sta_if = network.WLAN(network.STA_IF)
| conditional_block |
zocket.py | import select
import sys
MICROPYTHON = sys.implementation.name == "micropython"
BCAST_HOST = "255.255.255.255"
LOCAL_HOST = "0.0.0.0"
LOG_PREFIX = "[μNetwork] "
if MICROPYTHON:
import usocket as socket
import ujson as json
import network
from utime import sleep
ap_if = network.WLAN(network.AP_IF)
sta_if = network.WLAN(network.STA_IF)
else:
import traceback
import socket
import json
from time import sleep
def micropython_only(fn):
def wrapper(*args, **kwargs):
if MICROPYTHON:
return fn(*args, **kwargs)
return wrapper
class _Peer:
def __init__(
self,
port,
*,
ssid: str = None,
passwd: str = None,
enable_ap: bool = False,
namespace: str = "default",
retry_for: tuple = (),
retry_delay: float = 5,
chunksize: int = 1024,
):
"""
:param port:
The port to use for peer discovery.
:param ssid:
(Optional) SSID of a WIFI connection.
:param passwd:
(Optional) Password for WIFI connection.
:param enable_ap:
(Optional) Enable ESP's own Access Point.
:param namespace:
(Optional) The namespace to use for peer discovery.
:param retry_for:
(Optional) Retry if any of these Exceptions occur.
:param retry_delay:
(Optional) The time in seconds to wait for, before retrying.
"""
self.port = port
self.ssid = ssid
self.passwd = passwd
self.enable_ap = enable_ap
self.namespace = namespace
self.retry_for = retry_for
self.retry_delay = retry_delay
self.chunksize = chunksize
self.peer_sock, self.data_sock = None, None
self.disconnect()
self.connect()
_namespace_bytes, _namespace_size = None, None
@property
def namespace(self):
return self._namespace_bytes.decode()
@namespace.setter
def namespace(self, value):
self._namespace_bytes = value.encode("utf-8")
self._namespace_size = len(self._namespace_bytes)
def _handle_error(self, exc=None):
print(LOG_PREFIX + "\nCrash report:")
if MICROPYTHON:
sys.print_exception(exc)
else:
traceback.print_exc()
print(LOG_PREFIX + "Retrying in %d sec…\n" % self.retry_delay)
sleep(self.retry_delay)
@property
@micropython_only
def network_connected(self):
return ap_if.isconnected() or sta_if.isconnected()
@micropython_only
def _configure_network(self):
if self.enable_ap:
ap_if.active(True)
if self.ssid is not None:
sta_if.active(True)
sta_if.scan()
sta_if.disconnect()
sta_if.connect(self.ssid, self.passwd)
@micropython_only
def wait_for_network(self, *, max_tries=None, refresh_freq_hz=1):
wait_sec = 1 / refresh_freq_hz
print(LOG_PREFIX + "Waiting for network...", end="")
count = 0
while not self.network_connected:
count += 1
sleep(wait_sec)
if max_tries is not None and count > max_tries:
print()
if not self.network_connected:
raise OSError(
"Couldn't establish a connection even after %d tries."
% max_tries
)
return
else:
print("%d..." % count, end="")
@micropython_only
def _connect_network(self):
print(
LOG_PREFIX
+ "Connecting to network… (ssid: %r passwd: %r AP: %r)"
% (self.ssid, self.passwd, self.enable_ap)
)
while True:
try:
self._configure_network()
self.wait_for_network(max_tries=50)
except self.retry_for as e:
self._handle_error(e)
self._disconnect_network()
else:
print(LOG_PREFIX + "Connected to network!")
return
@micropython_only
def _disconnect_network(self):
ap_if.active(False)
sta_if.active(False)
def connect(self):
self._connect_network()
self.peer_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.peer_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not MICROPYTHON:
self.peer_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.data_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def disconnect(self):
self._disconnect_network()
if self.data_sock is not None:
self.data_sock.close()
self.data_sock = None
if self.peer_sock is not None:
self.peer_sock.close()
self.peer_sock = None
def _send(self, msg: bytes, **kwargs):
raise NotImplementedError
def _recv(self, **kwargs) -> tuple:
raise NotImplementedError
def send(self, msg_bytes: bytes, **kwargs):
if msg_bytes == b"":
raise ValueError(
"You may not send an empty datagram; it's reserved as a peer disconnect signal."
)
while True:
try:
return self._send(msg_bytes, **kwargs)
except self.retry_for as e:
self._handle_error(e)
self.connect()
def recv(self, **kwargs) -> tuple:
while True:
try:
return self._recv(**kwargs)
except self.retry_for as e:
self._handle_error(e)
self.connect()
def send_str(self, msg_str: str, **kwargs):
return self.send(msg_str.encode(), **kwargs)
def recv_str(self, **kwargs) -> tuple:
msg, address = self.recv(**kwargs)
return msg.decode(), address
def send_json(self, msg_json, **kwargs):
return self.send(json.dumps(msg_json).encode(), **kwargs)
def recv_json(self, **kwargs) -> tuple:
msg, address = self.recv(**kwargs)
return json.loads(msg), address
def __enter__(self):
return self
def __exit__(self, e, *args, **kwargs):
if e in self.retry_for:
self._handle_error(e)
self.connect()
return True
else:
self.disconnect()
def _get_local_ip() -> str:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not MICROPYTHON:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((BCAST_HOST, 0))
return sock.getsockname()[0]
class Client(_Peer):
"""
The client side of a p2p connection.
When :py:meth:`connect` is called,
the client find a :py:class:`Server` with the same :py:attr:`port` and :py:attr:`namespace`.
(:py:meth:`connect` is usually automatically called inside the constructor.)
Once connected, the client can send/recv messages as usual.
"""
def find_server(self) -> tuple:
while True:
self.peer_sock.sendto(self._namespace_bytes, (BCAST_HOST, self.port))
try:
server_addr = self.peer_sock.recv(self.chunksize).decode()
except OSError:
continue
print(LOG_PREFIX + "Found server @ " + server_addr)
host, port = server_addr.split(":")
return host, int(port)
def connect(self):
super | def _send(self, msg: bytes):
while True:
try:
return self.data_sock.sendall(msg)
except OSError:
self.disconnect()
self.connect()
def _recv(self) -> tuple:
data, addr = self.data_sock.recvfrom(self.chunksize)
if data == b"":
raise OSError("Connection is closed.")
return data, addr
class Server(_Peer):
"""
The server side of a p2p connection.
For this class, :py:meth:`recv` and siblings are essential in facilitating the peer-discovery mechanism.
If a :py:class:`Server` instance doesn't call :py:meth:`recv` in its lifetime,
then it is hidden to other :py:class:`Client` instances on the network.
:py:meth:`recv` will respond to all peer-discovery requests until a client sends an actual piece of data,
sent using :py:meth:`send`.
The :py:attr:`clients` will keep a mapping of client hostnames, to a TCP socket connecting to them.
"""
def __init__(
self,
port,
*,
ssid: str = None,
passwd: str = None,
enable_ap: bool = False,
namespace: str = "default",
retry_for: tuple = (),
retry_delay: float = 5,
):
self.clients = None
self._local_ip = None
super().__init__(
port,
ssid=ssid,
passwd=passwd,
enable_ap=enable_ap,
namespace=namespace,
retry_for=retry_for,
retry_delay=retry_delay,
)
def connect(self):
super().connect()
self.clients = {}
self.peer_sock.bind((LOCAL_HOST, self.port))
self.data_sock.bind((LOCAL_HOST, 0))
self.data_sock.listen(1)
self._local_ip = (
_get_local_ip() + ":" + str(self.data_sock.getsockname()[1])
).encode()
print(LOG_PREFIX, "local i[ is", self._local_ip)
def _send(self, msg: bytes, *, host=None):
if host is None:
socks = self.clients.values()
else:
try:
socks = [self.clients[host]]
except KeyError:
raise ValueError(
"Client with provided host (%r) does not exist." % host
)
for sock in socks:
sock.sendall(msg)
def _recv(self) -> tuple:
while True:
client_socks = list(self.clients.values())
readable, *_ = select.select(client_socks + [self.data_sock, self.peer_sock] , [], [])
print("got sock to read:", readable)
for sock in readable:
if sock is self.data_sock:
data_sock, address = sock.accept()
self.clients[address] = data_sock
chunk = data_sock.recv(self.chunksize)
if chunk == b"":
del self.clients[address]
print(LOG_PREFIX, "Client @ %s disconnected" % address[1])
continue
return chunk, address
elif sock is self.peer_sock:
msg, address = sock.recvfrom(self.chunksize)
if msg != self._namespace_bytes:
continue
self.peer_sock.sendto(self._local_ip, address)
print(LOG_PREFIX + "Client @ %s requested discovery" % address[0])
else:
chunk = sock.recv(self.chunksize)
for address, maybe_del in self.clients.items():
if maybe_del is not sock:
continue
if chunk == b"":
del self.clients[address]
print(LOG_PREFIX, "Client @ %s disconnected" % address[1])
break
return chunk, address
continue
| ().connect()
self.peer_sock.settimeout(1)
self.data_sock.connect(self.find_server())
| identifier_body |
zocket.py | import select
import sys
MICROPYTHON = sys.implementation.name == "micropython"
BCAST_HOST = "255.255.255.255"
LOCAL_HOST = "0.0.0.0"
LOG_PREFIX = "[μNetwork] "
if MICROPYTHON:
import usocket as socket
import ujson as json
import network
from utime import sleep
ap_if = network.WLAN(network.AP_IF)
sta_if = network.WLAN(network.STA_IF)
else:
import traceback
import socket
import json
from time import sleep
def micropython_only(fn):
def wrapper(*args, **kwargs):
if MICROPYTHON:
return fn(*args, **kwargs)
return wrapper
class _Peer:
def __init__(
self,
port,
*,
ssid: str = None,
passwd: str = None,
enable_ap: bool = False,
namespace: str = "default",
retry_for: tuple = (),
retry_delay: float = 5,
chunksize: int = 1024,
):
"""
:param port:
The port to use for peer discovery.
:param ssid:
(Optional) SSID of a WIFI connection.
:param passwd:
(Optional) Password for WIFI connection.
:param enable_ap:
(Optional) Enable ESP's own Access Point.
:param namespace:
(Optional) The namespace to use for peer discovery.
:param retry_for:
(Optional) Retry if any of these Exceptions occur.
:param retry_delay:
(Optional) The time in seconds to wait for, before retrying.
"""
self.port = port
self.ssid = ssid
self.passwd = passwd
self.enable_ap = enable_ap
self.namespace = namespace
self.retry_for = retry_for
self.retry_delay = retry_delay
self.chunksize = chunksize
self.peer_sock, self.data_sock = None, None
self.disconnect()
self.connect()
_namespace_bytes, _namespace_size = None, None
@property
def namespace(self):
return self._namespace_bytes.decode()
@namespace.setter
def namespace(self, value):
self._namespace_bytes = value.encode("utf-8")
self._namespace_size = len(self._namespace_bytes)
def _handle_error(self, exc=None):
print(LOG_PREFIX + "\nCrash report:")
if MICROPYTHON:
sys.print_exception(exc)
else:
traceback.print_exc()
print(LOG_PREFIX + "Retrying in %d sec…\n" % self.retry_delay)
sleep(self.retry_delay)
@property | @micropython_only
def network_connected(self):
return ap_if.isconnected() or sta_if.isconnected()
@micropython_only
def _configure_network(self):
if self.enable_ap:
ap_if.active(True)
if self.ssid is not None:
sta_if.active(True)
sta_if.scan()
sta_if.disconnect()
sta_if.connect(self.ssid, self.passwd)
@micropython_only
def wait_for_network(self, *, max_tries=None, refresh_freq_hz=1):
wait_sec = 1 / refresh_freq_hz
print(LOG_PREFIX + "Waiting for network...", end="")
count = 0
while not self.network_connected:
count += 1
sleep(wait_sec)
if max_tries is not None and count > max_tries:
print()
if not self.network_connected:
raise OSError(
"Couldn't establish a connection even after %d tries."
% max_tries
)
return
else:
print("%d..." % count, end="")
@micropython_only
def _connect_network(self):
print(
LOG_PREFIX
+ "Connecting to network… (ssid: %r passwd: %r AP: %r)"
% (self.ssid, self.passwd, self.enable_ap)
)
while True:
try:
self._configure_network()
self.wait_for_network(max_tries=50)
except self.retry_for as e:
self._handle_error(e)
self._disconnect_network()
else:
print(LOG_PREFIX + "Connected to network!")
return
@micropython_only
def _disconnect_network(self):
ap_if.active(False)
sta_if.active(False)
def connect(self):
self._connect_network()
self.peer_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.peer_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not MICROPYTHON:
self.peer_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.data_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def disconnect(self):
self._disconnect_network()
if self.data_sock is not None:
self.data_sock.close()
self.data_sock = None
if self.peer_sock is not None:
self.peer_sock.close()
self.peer_sock = None
def _send(self, msg: bytes, **kwargs):
raise NotImplementedError
def _recv(self, **kwargs) -> tuple:
raise NotImplementedError
def send(self, msg_bytes: bytes, **kwargs):
if msg_bytes == b"":
raise ValueError(
"You may not send an empty datagram; it's reserved as a peer disconnect signal."
)
while True:
try:
return self._send(msg_bytes, **kwargs)
except self.retry_for as e:
self._handle_error(e)
self.connect()
def recv(self, **kwargs) -> tuple:
while True:
try:
return self._recv(**kwargs)
except self.retry_for as e:
self._handle_error(e)
self.connect()
def send_str(self, msg_str: str, **kwargs):
return self.send(msg_str.encode(), **kwargs)
def recv_str(self, **kwargs) -> tuple:
msg, address = self.recv(**kwargs)
return msg.decode(), address
def send_json(self, msg_json, **kwargs):
return self.send(json.dumps(msg_json).encode(), **kwargs)
def recv_json(self, **kwargs) -> tuple:
msg, address = self.recv(**kwargs)
return json.loads(msg), address
def __enter__(self):
return self
def __exit__(self, e, *args, **kwargs):
if e in self.retry_for:
self._handle_error(e)
self.connect()
return True
else:
self.disconnect()
def _get_local_ip() -> str:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not MICROPYTHON:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((BCAST_HOST, 0))
return sock.getsockname()[0]
class Client(_Peer):
"""
The client side of a p2p connection.
When :py:meth:`connect` is called,
the client find a :py:class:`Server` with the same :py:attr:`port` and :py:attr:`namespace`.
(:py:meth:`connect` is usually automatically called inside the constructor.)
Once connected, the client can send/recv messages as usual.
"""
def find_server(self) -> tuple:
while True:
self.peer_sock.sendto(self._namespace_bytes, (BCAST_HOST, self.port))
try:
server_addr = self.peer_sock.recv(self.chunksize).decode()
except OSError:
continue
print(LOG_PREFIX + "Found server @ " + server_addr)
host, port = server_addr.split(":")
return host, int(port)
def connect(self):
super().connect()
self.peer_sock.settimeout(1)
self.data_sock.connect(self.find_server())
def _send(self, msg: bytes):
while True:
try:
return self.data_sock.sendall(msg)
except OSError:
self.disconnect()
self.connect()
def _recv(self) -> tuple:
data, addr = self.data_sock.recvfrom(self.chunksize)
if data == b"":
raise OSError("Connection is closed.")
return data, addr
class Server(_Peer):
"""
The server side of a p2p connection.
For this class, :py:meth:`recv` and siblings are essential in facilitating the peer-discovery mechanism.
If a :py:class:`Server` instance doesn't call :py:meth:`recv` in its lifetime,
then it is hidden to other :py:class:`Client` instances on the network.
:py:meth:`recv` will respond to all peer-discovery requests until a client sends an actual piece of data,
sent using :py:meth:`send`.
The :py:attr:`clients` will keep a mapping of client hostnames, to a TCP socket connecting to them.
"""
def __init__(
self,
port,
*,
ssid: str = None,
passwd: str = None,
enable_ap: bool = False,
namespace: str = "default",
retry_for: tuple = (),
retry_delay: float = 5,
):
self.clients = None
self._local_ip = None
super().__init__(
port,
ssid=ssid,
passwd=passwd,
enable_ap=enable_ap,
namespace=namespace,
retry_for=retry_for,
retry_delay=retry_delay,
)
def connect(self):
super().connect()
self.clients = {}
self.peer_sock.bind((LOCAL_HOST, self.port))
self.data_sock.bind((LOCAL_HOST, 0))
self.data_sock.listen(1)
self._local_ip = (
_get_local_ip() + ":" + str(self.data_sock.getsockname()[1])
).encode()
print(LOG_PREFIX, "local i[ is", self._local_ip)
def _send(self, msg: bytes, *, host=None):
if host is None:
socks = self.clients.values()
else:
try:
socks = [self.clients[host]]
except KeyError:
raise ValueError(
"Client with provided host (%r) does not exist." % host
)
for sock in socks:
sock.sendall(msg)
def _recv(self) -> tuple:
while True:
client_socks = list(self.clients.values())
readable, *_ = select.select(client_socks + [self.data_sock, self.peer_sock] , [], [])
print("got sock to read:", readable)
for sock in readable:
if sock is self.data_sock:
data_sock, address = sock.accept()
self.clients[address] = data_sock
chunk = data_sock.recv(self.chunksize)
if chunk == b"":
del self.clients[address]
print(LOG_PREFIX, "Client @ %s disconnected" % address[1])
continue
return chunk, address
elif sock is self.peer_sock:
msg, address = sock.recvfrom(self.chunksize)
if msg != self._namespace_bytes:
continue
self.peer_sock.sendto(self._local_ip, address)
print(LOG_PREFIX + "Client @ %s requested discovery" % address[0])
else:
chunk = sock.recv(self.chunksize)
for address, maybe_del in self.clients.items():
if maybe_del is not sock:
continue
if chunk == b"":
del self.clients[address]
print(LOG_PREFIX, "Client @ %s disconnected" % address[1])
break
return chunk, address
continue | random_line_split | |
zocket.py | import select
import sys
MICROPYTHON = sys.implementation.name == "micropython"
BCAST_HOST = "255.255.255.255"
LOCAL_HOST = "0.0.0.0"
LOG_PREFIX = "[μNetwork] "
if MICROPYTHON:
import usocket as socket
import ujson as json
import network
from utime import sleep
ap_if = network.WLAN(network.AP_IF)
sta_if = network.WLAN(network.STA_IF)
else:
import traceback
import socket
import json
from time import sleep
def micropython_only(fn):
def wrapper(*args, **kwargs):
if MICROPYTHON:
return fn(*args, **kwargs)
return wrapper
class _Peer:
def __init__(
self,
port,
*,
ssid: str = None,
passwd: str = None,
enable_ap: bool = False,
namespace: str = "default",
retry_for: tuple = (),
retry_delay: float = 5,
chunksize: int = 1024,
):
"""
:param port:
The port to use for peer discovery.
:param ssid:
(Optional) SSID of a WIFI connection.
:param passwd:
(Optional) Password for WIFI connection.
:param enable_ap:
(Optional) Enable ESP's own Access Point.
:param namespace:
(Optional) The namespace to use for peer discovery.
:param retry_for:
(Optional) Retry if any of these Exceptions occur.
:param retry_delay:
(Optional) The time in seconds to wait for, before retrying.
"""
self.port = port
self.ssid = ssid
self.passwd = passwd
self.enable_ap = enable_ap
self.namespace = namespace
self.retry_for = retry_for
self.retry_delay = retry_delay
self.chunksize = chunksize
self.peer_sock, self.data_sock = None, None
self.disconnect()
self.connect()
_namespace_bytes, _namespace_size = None, None
@property
def namespace(self):
return self._namespace_bytes.decode()
@namespace.setter
def namespace(self, value):
self._namespace_bytes = value.encode("utf-8")
self._namespace_size = len(self._namespace_bytes)
def _ | self, exc=None):
print(LOG_PREFIX + "\nCrash report:")
if MICROPYTHON:
sys.print_exception(exc)
else:
traceback.print_exc()
print(LOG_PREFIX + "Retrying in %d sec…\n" % self.retry_delay)
sleep(self.retry_delay)
@property
@micropython_only
def network_connected(self):
return ap_if.isconnected() or sta_if.isconnected()
@micropython_only
def _configure_network(self):
if self.enable_ap:
ap_if.active(True)
if self.ssid is not None:
sta_if.active(True)
sta_if.scan()
sta_if.disconnect()
sta_if.connect(self.ssid, self.passwd)
@micropython_only
def wait_for_network(self, *, max_tries=None, refresh_freq_hz=1):
wait_sec = 1 / refresh_freq_hz
print(LOG_PREFIX + "Waiting for network...", end="")
count = 0
while not self.network_connected:
count += 1
sleep(wait_sec)
if max_tries is not None and count > max_tries:
print()
if not self.network_connected:
raise OSError(
"Couldn't establish a connection even after %d tries."
% max_tries
)
return
else:
print("%d..." % count, end="")
@micropython_only
def _connect_network(self):
print(
LOG_PREFIX
+ "Connecting to network… (ssid: %r passwd: %r AP: %r)"
% (self.ssid, self.passwd, self.enable_ap)
)
while True:
try:
self._configure_network()
self.wait_for_network(max_tries=50)
except self.retry_for as e:
self._handle_error(e)
self._disconnect_network()
else:
print(LOG_PREFIX + "Connected to network!")
return
@micropython_only
def _disconnect_network(self):
ap_if.active(False)
sta_if.active(False)
def connect(self):
self._connect_network()
self.peer_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.peer_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not MICROPYTHON:
self.peer_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.data_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def disconnect(self):
self._disconnect_network()
if self.data_sock is not None:
self.data_sock.close()
self.data_sock = None
if self.peer_sock is not None:
self.peer_sock.close()
self.peer_sock = None
def _send(self, msg: bytes, **kwargs):
raise NotImplementedError
def _recv(self, **kwargs) -> tuple:
raise NotImplementedError
def send(self, msg_bytes: bytes, **kwargs):
if msg_bytes == b"":
raise ValueError(
"You may not send an empty datagram; it's reserved as a peer disconnect signal."
)
while True:
try:
return self._send(msg_bytes, **kwargs)
except self.retry_for as e:
self._handle_error(e)
self.connect()
def recv(self, **kwargs) -> tuple:
while True:
try:
return self._recv(**kwargs)
except self.retry_for as e:
self._handle_error(e)
self.connect()
def send_str(self, msg_str: str, **kwargs):
return self.send(msg_str.encode(), **kwargs)
def recv_str(self, **kwargs) -> tuple:
msg, address = self.recv(**kwargs)
return msg.decode(), address
def send_json(self, msg_json, **kwargs):
return self.send(json.dumps(msg_json).encode(), **kwargs)
def recv_json(self, **kwargs) -> tuple:
msg, address = self.recv(**kwargs)
return json.loads(msg), address
def __enter__(self):
return self
def __exit__(self, e, *args, **kwargs):
if e in self.retry_for:
self._handle_error(e)
self.connect()
return True
else:
self.disconnect()
def _get_local_ip() -> str:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not MICROPYTHON:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((BCAST_HOST, 0))
return sock.getsockname()[0]
class Client(_Peer):
"""
The client side of a p2p connection.
When :py:meth:`connect` is called,
the client find a :py:class:`Server` with the same :py:attr:`port` and :py:attr:`namespace`.
(:py:meth:`connect` is usually automatically called inside the constructor.)
Once connected, the client can send/recv messages as usual.
"""
def find_server(self) -> tuple:
while True:
self.peer_sock.sendto(self._namespace_bytes, (BCAST_HOST, self.port))
try:
server_addr = self.peer_sock.recv(self.chunksize).decode()
except OSError:
continue
print(LOG_PREFIX + "Found server @ " + server_addr)
host, port = server_addr.split(":")
return host, int(port)
def connect(self):
super().connect()
self.peer_sock.settimeout(1)
self.data_sock.connect(self.find_server())
def _send(self, msg: bytes):
while True:
try:
return self.data_sock.sendall(msg)
except OSError:
self.disconnect()
self.connect()
def _recv(self) -> tuple:
data, addr = self.data_sock.recvfrom(self.chunksize)
if data == b"":
raise OSError("Connection is closed.")
return data, addr
class Server(_Peer):
"""
The server side of a p2p connection.
For this class, :py:meth:`recv` and siblings are essential in facilitating the peer-discovery mechanism.
If a :py:class:`Server` instance doesn't call :py:meth:`recv` in its lifetime,
then it is hidden to other :py:class:`Client` instances on the network.
:py:meth:`recv` will respond to all peer-discovery requests until a client sends an actual piece of data,
sent using :py:meth:`send`.
The :py:attr:`clients` will keep a mapping of client hostnames, to a TCP socket connecting to them.
"""
def __init__(
self,
port,
*,
ssid: str = None,
passwd: str = None,
enable_ap: bool = False,
namespace: str = "default",
retry_for: tuple = (),
retry_delay: float = 5,
):
self.clients = None
self._local_ip = None
super().__init__(
port,
ssid=ssid,
passwd=passwd,
enable_ap=enable_ap,
namespace=namespace,
retry_for=retry_for,
retry_delay=retry_delay,
)
def connect(self):
super().connect()
self.clients = {}
self.peer_sock.bind((LOCAL_HOST, self.port))
self.data_sock.bind((LOCAL_HOST, 0))
self.data_sock.listen(1)
self._local_ip = (
_get_local_ip() + ":" + str(self.data_sock.getsockname()[1])
).encode()
print(LOG_PREFIX, "local i[ is", self._local_ip)
def _send(self, msg: bytes, *, host=None):
if host is None:
socks = self.clients.values()
else:
try:
socks = [self.clients[host]]
except KeyError:
raise ValueError(
"Client with provided host (%r) does not exist." % host
)
for sock in socks:
sock.sendall(msg)
def _recv(self) -> tuple:
while True:
client_socks = list(self.clients.values())
readable, *_ = select.select(client_socks + [self.data_sock, self.peer_sock] , [], [])
print("got sock to read:", readable)
for sock in readable:
if sock is self.data_sock:
data_sock, address = sock.accept()
self.clients[address] = data_sock
chunk = data_sock.recv(self.chunksize)
if chunk == b"":
del self.clients[address]
print(LOG_PREFIX, "Client @ %s disconnected" % address[1])
continue
return chunk, address
elif sock is self.peer_sock:
msg, address = sock.recvfrom(self.chunksize)
if msg != self._namespace_bytes:
continue
self.peer_sock.sendto(self._local_ip, address)
print(LOG_PREFIX + "Client @ %s requested discovery" % address[0])
else:
chunk = sock.recv(self.chunksize)
for address, maybe_del in self.clients.items():
if maybe_del is not sock:
continue
if chunk == b"":
del self.clients[address]
print(LOG_PREFIX, "Client @ %s disconnected" % address[1])
break
return chunk, address
continue
| handle_error( | identifier_name |
claim_name.rs | use std::{ops::Deref, sync::Arc};
use aho_corasick::{AhoCorasick, AhoCorasickBuilder};
use bathbot_macros::SlashCommand;
use bathbot_model::{
rkyv_util::time::{DateRkyv, DateTimeRkyv},
rosu_v2::user::{ArchivedUser, User, UserHighestRank as UserHighestRankRkyv, UserStatistics},
};
use bathbot_util::{constants::OSU_API_ISSUE, MessageBuilder};
use eyre::{Report, Result};
use futures::{future, stream::FuturesUnordered, TryStreamExt};
use once_cell::sync::OnceCell;
use rkyv::{
with::{DeserializeWith, Map},
Archived, Infallible,
};
use rosu_v2::prelude::{CountryCode, GameMode, OsuError, UserHighestRank, Username};
use time::{OffsetDateTime, Time};
use twilight_interactions::command::{CommandModel, CreateCommand};
use crate::{
core::Context,
embeds::ClaimNameEmbed,
embeds::EmbedData,
manager::redis::{osu::UserArgs, RedisData},
util::{interaction::InteractionCommand, InteractionCommandExt},
};
#[derive(CommandModel, CreateCommand, SlashCommand)]
#[command(
name = "claimname",
desc = "Check how much longer to wait until a name is up for grabs",
help = "If a player has not signed in for at least 6 months and has no plays,\
their username may be claimed.\n\
If that player does have any plays across all game modes, \
a [non-linear function](https://www.desmos.com/calculator/b89siyv9j8) is used to calculate \
how much extra time is added to those 6 months.\n\
This is to prevent people from stealing the usernames of active or recently retired players."
)]
pub struct ClaimName {
#[command(desc = "Specify a username")]
name: String,
}
async fn slash_claimname(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> {
let ClaimName { name } = ClaimName::from_interaction(command.input_data())?;
let content = if name.chars().count() > 15 {
Some("Names can have at most 15 characters so your name won't be accepted".to_owned())
} else if let Some(c) = name
.chars()
.find(|c| !matches!(c, 'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '[' | ']' | '_' | ' '))
{
Some(format!(
"`{c}` is an invalid character for usernames so `{name}` won't be accepted"
))
} else if name.len() < 3 {
Some(format!(
"Names must be at least 3 characters long so `{name}` won't be accepted"
))
} else if name.contains('_') && name.contains(' ') {
Some(format!(
"Names may contains underscores or spaces but not both \
so `{name}` won't be accepted"
))
} else if name.starts_with(' ') || name.ends_with(' ') {
Some(format!(
"Names can't start or end with spaces so `{name}` won't be accepted"
))
} else {
None
};
if let Some(content) = content {
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
let user_id = match UserArgs::username(&ctx, &name).await {
UserArgs::Args(args) => args.user_id,
UserArgs::User { user, .. } => user.user_id,
UserArgs::Err(OsuError::NotFound) => {
let content = if ClaimNameValidator::is_valid(&name) {
format!("User `{name}` was not found, the name should be available to claim")
} else {
format!("`{name}` does not seem to be taken but it likely won't be accepted")
};
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
UserArgs::Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let args = [
GameMode::Osu,
GameMode::Taiko,
GameMode::Catch,
GameMode::Mania,
]
.map(|mode| UserArgs::user_id(user_id).mode(mode));
let user_fut = args
.into_iter()
.map(|args| ctx.redis().osu_user(args))
.collect::<FuturesUnordered<_>>()
.try_fold(None, |user: Option<ClaimNameUser>, next| match user {
Some(mut user) => {
let next_stats = next.stats();
match user.statistics {
Some(ref mut accum) => accum.playcount += next_stats.playcount(),
None => user.statistics = Some(next_stats.to_owned()),
}
let (next_highest_rank, next_last_visit) = match next {
RedisData::Original(next) => {
let rank = next.highest_rank;
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| count.start_date.with_time(Time::MIDNIGHT).assume_utc());
let last_visit = match (next.last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
RedisData::Archive(next) => {
let next: &Archived<User> = &next;
let rank = Map::<UserHighestRankRkyv>::deserialize_with(
&next.highest_rank,
&mut Infallible,
)
.unwrap();
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| {
DateRkyv::deserialize_with(&count.start_date, &mut Infallible)
.unwrap()
.with_time(Time::MIDNIGHT)
.assume_utc()
});
let last_visit = next.last_visit.as_ref().map(|time| {
DateTimeRkyv::deserialize_with(time, &mut Infallible).unwrap()
});
let last_visit = match (last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
};
match (user.highest_rank.as_mut(), next_highest_rank) {
(Some(curr), Some(next)) if curr.rank > next.rank => *curr = next,
(None, next @ Some(_)) => user.highest_rank = next,
_ => {}
}
match (user.last_visit.as_mut(), next_last_visit) {
(Some(curr), Some(next)) if *curr < next => *curr = next,
(None, next @ Some(_)) => user.last_visit = next,
_ => {}
}
future::ready(Ok(Some(user)))
}
None => future::ready(Ok(Some(ClaimNameUser::from(next)))),
});
let user = match user_fut.await {
Ok(user) => user.unwrap(),
Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let embed = ClaimNameEmbed::new(&user, &name).build();
let builder = MessageBuilder::new().embed(embed);
command.update(&ctx, builder).await?;
Ok(())
}
pub struct ClaimNameUser {
pub avatar_url: Box<str>,
pub country_code: CountryCode,
pub has_badges: bool,
pub has_ranked_mapsets: bool,
pub highest_rank: Option<UserHighestRank>,
pub last_visit: Option<OffsetDateTime>,
pub statistics: Option<UserStatistics>,
pub username: Username,
pub user_id: u32,
}
impl From<User> for ClaimNameUser {
#[inline]
fn from(user: User) -> Self {
Self {
avatar_url: user.avatar_url,
country_code: user.country_code,
has_badges: !user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: user.highest_rank,
last_visit: user.last_visit,
statistics: user.statistics.map(UserStatistics::from),
username: user.username,
user_id: user.user_id,
}
}
}
impl From<&ArchivedUser> for ClaimNameUser {
#[inline]
fn from(user: &ArchivedUser) -> Self {
Self {
avatar_url: user.avatar_url.as_ref().into(),
country_code: user.country_code.as_str().into(),
has_badges: !user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: Map::<UserHighestRankRkyv>::deserialize_with(
&user.highest_rank,
&mut Infallible,
)
.unwrap(),
last_visit: Map::<DateTimeRkyv>::deserialize_with(&user.last_visit, &mut Infallible)
.unwrap(),
statistics: user.statistics.as_ref().cloned(),
username: user.username.as_str().into(),
user_id: user.user_id,
}
}
}
impl From<RedisData<User>> for ClaimNameUser {
#[inline]
fn from(user: RedisData<User>) -> Self |
}
pub struct ClaimNameValidator;
impl ClaimNameValidator {
pub fn is_valid(prefix: &str) -> bool {
!VALIDATOR
.get_or_init(|| {
let needles = [
"qfqqz",
"dppljf{",
"difbu",
"ojhhfs",
"mpmj",
"gvdl",
"ejmep",
"gbhhpu",
"dvou",
"tijhfupsb",
"qpso",
"cbodip",
"qfojt",
"wbhjob",
"qvttz",
"ejdl",
"dpdl",
"brvjmb",
"ijumfs",
"ibdl",
"tibwju",
"gsjfoepl",
]
.into_iter()
.map(String::from)
.map(|mut needle| {
unsafe { needle.as_bytes_mut() }
.iter_mut()
.for_each(|byte| *byte -= 1);
needle
});
AhoCorasickBuilder::new()
.ascii_case_insensitive(true)
.dfa(true)
.build_with_size(needles)
.unwrap()
})
.is_match(prefix)
}
}
static VALIDATOR: OnceCell<AhoCorasick<u16>> = OnceCell::new();
| {
match user {
RedisData::Original(user) => Self::from(user),
RedisData::Archive(user) => Self::from(user.deref()),
}
} | identifier_body |
claim_name.rs | use std::{ops::Deref, sync::Arc};
use aho_corasick::{AhoCorasick, AhoCorasickBuilder};
use bathbot_macros::SlashCommand;
use bathbot_model::{
rkyv_util::time::{DateRkyv, DateTimeRkyv},
rosu_v2::user::{ArchivedUser, User, UserHighestRank as UserHighestRankRkyv, UserStatistics},
};
use bathbot_util::{constants::OSU_API_ISSUE, MessageBuilder};
use eyre::{Report, Result};
use futures::{future, stream::FuturesUnordered, TryStreamExt};
use once_cell::sync::OnceCell;
use rkyv::{
with::{DeserializeWith, Map},
Archived, Infallible,
};
use rosu_v2::prelude::{CountryCode, GameMode, OsuError, UserHighestRank, Username};
use time::{OffsetDateTime, Time};
use twilight_interactions::command::{CommandModel, CreateCommand};
use crate::{
core::Context,
embeds::ClaimNameEmbed,
embeds::EmbedData,
manager::redis::{osu::UserArgs, RedisData},
util::{interaction::InteractionCommand, InteractionCommandExt},
};
#[derive(CommandModel, CreateCommand, SlashCommand)]
#[command(
name = "claimname",
desc = "Check how much longer to wait until a name is up for grabs",
help = "If a player has not signed in for at least 6 months and has no plays,\
their username may be claimed.\n\
If that player does have any plays across all game modes, \
a [non-linear function](https://www.desmos.com/calculator/b89siyv9j8) is used to calculate \
how much extra time is added to those 6 months.\n\
This is to prevent people from stealing the usernames of active or recently retired players."
)]
pub struct ClaimName {
#[command(desc = "Specify a username")]
name: String,
}
async fn slash_claimname(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> {
let ClaimName { name } = ClaimName::from_interaction(command.input_data())?;
let content = if name.chars().count() > 15 {
Some("Names can have at most 15 characters so your name won't be accepted".to_owned())
} else if let Some(c) = name
.chars()
.find(|c| !matches!(c, 'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '[' | ']' | '_' | ' '))
{
Some(format!(
"`{c}` is an invalid character for usernames so `{name}` won't be accepted"
))
} else if name.len() < 3 {
Some(format!(
"Names must be at least 3 characters long so `{name}` won't be accepted"
))
} else if name.contains('_') && name.contains(' ') {
Some(format!(
"Names may contains underscores or spaces but not both \
so `{name}` won't be accepted"
))
} else if name.starts_with(' ') || name.ends_with(' ') {
Some(format!(
"Names can't start or end with spaces so `{name}` won't be accepted"
))
} else {
None
};
if let Some(content) = content {
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
let user_id = match UserArgs::username(&ctx, &name).await {
UserArgs::Args(args) => args.user_id,
UserArgs::User { user, .. } => user.user_id,
UserArgs::Err(OsuError::NotFound) => {
let content = if ClaimNameValidator::is_valid(&name) {
format!("User `{name}` was not found, the name should be available to claim")
} else {
format!("`{name}` does not seem to be taken but it likely won't be accepted")
};
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
UserArgs::Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let args = [
GameMode::Osu,
GameMode::Taiko,
GameMode::Catch,
GameMode::Mania,
]
.map(|mode| UserArgs::user_id(user_id).mode(mode));
let user_fut = args
.into_iter()
.map(|args| ctx.redis().osu_user(args))
.collect::<FuturesUnordered<_>>()
.try_fold(None, |user: Option<ClaimNameUser>, next| match user {
Some(mut user) => {
let next_stats = next.stats();
match user.statistics {
Some(ref mut accum) => accum.playcount += next_stats.playcount(),
None => user.statistics = Some(next_stats.to_owned()),
}
let (next_highest_rank, next_last_visit) = match next {
RedisData::Original(next) => {
let rank = next.highest_rank;
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| count.start_date.with_time(Time::MIDNIGHT).assume_utc());
let last_visit = match (next.last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
RedisData::Archive(next) => {
let next: &Archived<User> = &next;
let rank = Map::<UserHighestRankRkyv>::deserialize_with(
&next.highest_rank,
&mut Infallible,
)
.unwrap();
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| {
DateRkyv::deserialize_with(&count.start_date, &mut Infallible)
.unwrap()
.with_time(Time::MIDNIGHT)
.assume_utc()
});
let last_visit = next.last_visit.as_ref().map(|time| {
DateTimeRkyv::deserialize_with(time, &mut Infallible).unwrap()
});
let last_visit = match (last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
};
match (user.highest_rank.as_mut(), next_highest_rank) {
(Some(curr), Some(next)) if curr.rank > next.rank => *curr = next,
(None, next @ Some(_)) => user.highest_rank = next,
_ => {}
}
match (user.last_visit.as_mut(), next_last_visit) {
(Some(curr), Some(next)) if *curr < next => *curr = next,
(None, next @ Some(_)) => user.last_visit = next,
_ => {}
}
future::ready(Ok(Some(user)))
}
None => future::ready(Ok(Some(ClaimNameUser::from(next)))),
});
let user = match user_fut.await {
Ok(user) => user.unwrap(),
Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let embed = ClaimNameEmbed::new(&user, &name).build();
let builder = MessageBuilder::new().embed(embed);
command.update(&ctx, builder).await?;
Ok(())
}
pub struct ClaimNameUser {
pub avatar_url: Box<str>,
pub country_code: CountryCode,
pub has_badges: bool,
pub has_ranked_mapsets: bool,
pub highest_rank: Option<UserHighestRank>,
pub last_visit: Option<OffsetDateTime>,
pub statistics: Option<UserStatistics>,
pub username: Username,
pub user_id: u32,
}
impl From<User> for ClaimNameUser {
#[inline]
fn from(user: User) -> Self {
Self {
avatar_url: user.avatar_url,
country_code: user.country_code,
has_badges: !user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: user.highest_rank,
last_visit: user.last_visit,
statistics: user.statistics.map(UserStatistics::from),
username: user.username,
user_id: user.user_id,
}
}
}
impl From<&ArchivedUser> for ClaimNameUser {
#[inline]
fn from(user: &ArchivedUser) -> Self {
Self {
avatar_url: user.avatar_url.as_ref().into(),
country_code: user.country_code.as_str().into(),
has_badges: !user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: Map::<UserHighestRankRkyv>::deserialize_with(
&user.highest_rank,
&mut Infallible,
)
.unwrap(),
last_visit: Map::<DateTimeRkyv>::deserialize_with(&user.last_visit, &mut Infallible)
.unwrap(),
statistics: user.statistics.as_ref().cloned(),
username: user.username.as_str().into(),
user_id: user.user_id,
}
}
}
impl From<RedisData<User>> for ClaimNameUser {
#[inline]
fn | (user: RedisData<User>) -> Self {
match user {
RedisData::Original(user) => Self::from(user),
RedisData::Archive(user) => Self::from(user.deref()),
}
}
}
pub struct ClaimNameValidator;
impl ClaimNameValidator {
pub fn is_valid(prefix: &str) -> bool {
!VALIDATOR
.get_or_init(|| {
let needles = [
"qfqqz",
"dppljf{",
"difbu",
"ojhhfs",
"mpmj",
"gvdl",
"ejmep",
"gbhhpu",
"dvou",
"tijhfupsb",
"qpso",
"cbodip",
"qfojt",
"wbhjob",
"qvttz",
"ejdl",
"dpdl",
"brvjmb",
"ijumfs",
"ibdl",
"tibwju",
"gsjfoepl",
]
.into_iter()
.map(String::from)
.map(|mut needle| {
unsafe { needle.as_bytes_mut() }
.iter_mut()
.for_each(|byte| *byte -= 1);
needle
});
AhoCorasickBuilder::new()
.ascii_case_insensitive(true)
.dfa(true)
.build_with_size(needles)
.unwrap()
})
.is_match(prefix)
}
}
static VALIDATOR: OnceCell<AhoCorasick<u16>> = OnceCell::new();
| from | identifier_name |
claim_name.rs | use std::{ops::Deref, sync::Arc};
use aho_corasick::{AhoCorasick, AhoCorasickBuilder};
use bathbot_macros::SlashCommand;
use bathbot_model::{
rkyv_util::time::{DateRkyv, DateTimeRkyv},
rosu_v2::user::{ArchivedUser, User, UserHighestRank as UserHighestRankRkyv, UserStatistics},
};
use bathbot_util::{constants::OSU_API_ISSUE, MessageBuilder};
use eyre::{Report, Result};
use futures::{future, stream::FuturesUnordered, TryStreamExt};
use once_cell::sync::OnceCell;
use rkyv::{
with::{DeserializeWith, Map},
Archived, Infallible,
};
use rosu_v2::prelude::{CountryCode, GameMode, OsuError, UserHighestRank, Username};
use time::{OffsetDateTime, Time};
use twilight_interactions::command::{CommandModel, CreateCommand};
use crate::{
core::Context,
embeds::ClaimNameEmbed,
embeds::EmbedData,
manager::redis::{osu::UserArgs, RedisData},
util::{interaction::InteractionCommand, InteractionCommandExt},
};
#[derive(CommandModel, CreateCommand, SlashCommand)]
#[command(
name = "claimname",
desc = "Check how much longer to wait until a name is up for grabs",
help = "If a player has not signed in for at least 6 months and has no plays,\
their username may be claimed.\n\
If that player does have any plays across all game modes, \
a [non-linear function](https://www.desmos.com/calculator/b89siyv9j8) is used to calculate \
how much extra time is added to those 6 months.\n\
This is to prevent people from stealing the usernames of active or recently retired players."
)]
pub struct ClaimName {
#[command(desc = "Specify a username")]
name: String,
}
async fn slash_claimname(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> {
let ClaimName { name } = ClaimName::from_interaction(command.input_data())?;
let content = if name.chars().count() > 15 {
Some("Names can have at most 15 characters so your name won't be accepted".to_owned())
} else if let Some(c) = name
.chars()
.find(|c| !matches!(c, 'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '[' | ']' | '_' | ' '))
{
Some(format!(
"`{c}` is an invalid character for usernames so `{name}` won't be accepted"
))
} else if name.len() < 3 {
Some(format!(
"Names must be at least 3 characters long so `{name}` won't be accepted"
))
} else if name.contains('_') && name.contains(' ') {
Some(format!(
"Names may contains underscores or spaces but not both \
so `{name}` won't be accepted"
))
} else if name.starts_with(' ') || name.ends_with(' ') {
Some(format!(
"Names can't start or end with spaces so `{name}` won't be accepted"
))
} else {
None
};
if let Some(content) = content {
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
let user_id = match UserArgs::username(&ctx, &name).await {
UserArgs::Args(args) => args.user_id,
UserArgs::User { user, .. } => user.user_id,
UserArgs::Err(OsuError::NotFound) => {
let content = if ClaimNameValidator::is_valid(&name) {
format!("User `{name}` was not found, the name should be available to claim")
} else {
format!("`{name}` does not seem to be taken but it likely won't be accepted")
};
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
UserArgs::Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let args = [
GameMode::Osu,
GameMode::Taiko,
GameMode::Catch,
GameMode::Mania,
]
.map(|mode| UserArgs::user_id(user_id).mode(mode));
let user_fut = args
.into_iter()
.map(|args| ctx.redis().osu_user(args))
.collect::<FuturesUnordered<_>>()
.try_fold(None, |user: Option<ClaimNameUser>, next| match user {
Some(mut user) => {
let next_stats = next.stats();
match user.statistics {
Some(ref mut accum) => accum.playcount += next_stats.playcount(),
None => user.statistics = Some(next_stats.to_owned()),
}
let (next_highest_rank, next_last_visit) = match next {
RedisData::Original(next) => {
let rank = next.highest_rank;
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| count.start_date.with_time(Time::MIDNIGHT).assume_utc());
let last_visit = match (next.last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
RedisData::Archive(next) => {
let next: &Archived<User> = &next;
let rank = Map::<UserHighestRankRkyv>::deserialize_with(
&next.highest_rank,
&mut Infallible,
)
.unwrap();
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| {
DateRkyv::deserialize_with(&count.start_date, &mut Infallible)
.unwrap()
.with_time(Time::MIDNIGHT)
.assume_utc()
});
let last_visit = next.last_visit.as_ref().map(|time| {
DateTimeRkyv::deserialize_with(time, &mut Infallible).unwrap()
});
let last_visit = match (last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
};
match (user.highest_rank.as_mut(), next_highest_rank) {
(Some(curr), Some(next)) if curr.rank > next.rank => *curr = next,
(None, next @ Some(_)) => user.highest_rank = next,
_ => {}
}
match (user.last_visit.as_mut(), next_last_visit) {
(Some(curr), Some(next)) if *curr < next => *curr = next,
(None, next @ Some(_)) => user.last_visit = next,
_ => {}
}
future::ready(Ok(Some(user)))
}
None => future::ready(Ok(Some(ClaimNameUser::from(next)))),
});
let user = match user_fut.await {
Ok(user) => user.unwrap(),
Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let embed = ClaimNameEmbed::new(&user, &name).build();
let builder = MessageBuilder::new().embed(embed);
command.update(&ctx, builder).await?;
Ok(())
}
pub struct ClaimNameUser {
pub avatar_url: Box<str>,
pub country_code: CountryCode,
pub has_badges: bool,
pub has_ranked_mapsets: bool,
pub highest_rank: Option<UserHighestRank>,
pub last_visit: Option<OffsetDateTime>,
pub statistics: Option<UserStatistics>, | pub user_id: u32,
}
impl From<User> for ClaimNameUser {
#[inline]
fn from(user: User) -> Self {
Self {
avatar_url: user.avatar_url,
country_code: user.country_code,
has_badges: !user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: user.highest_rank,
last_visit: user.last_visit,
statistics: user.statistics.map(UserStatistics::from),
username: user.username,
user_id: user.user_id,
}
}
}
impl From<&ArchivedUser> for ClaimNameUser {
#[inline]
fn from(user: &ArchivedUser) -> Self {
Self {
avatar_url: user.avatar_url.as_ref().into(),
country_code: user.country_code.as_str().into(),
has_badges: !user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: Map::<UserHighestRankRkyv>::deserialize_with(
&user.highest_rank,
&mut Infallible,
)
.unwrap(),
last_visit: Map::<DateTimeRkyv>::deserialize_with(&user.last_visit, &mut Infallible)
.unwrap(),
statistics: user.statistics.as_ref().cloned(),
username: user.username.as_str().into(),
user_id: user.user_id,
}
}
}
impl From<RedisData<User>> for ClaimNameUser {
#[inline]
fn from(user: RedisData<User>) -> Self {
match user {
RedisData::Original(user) => Self::from(user),
RedisData::Archive(user) => Self::from(user.deref()),
}
}
}
pub struct ClaimNameValidator;
impl ClaimNameValidator {
pub fn is_valid(prefix: &str) -> bool {
!VALIDATOR
.get_or_init(|| {
let needles = [
"qfqqz",
"dppljf{",
"difbu",
"ojhhfs",
"mpmj",
"gvdl",
"ejmep",
"gbhhpu",
"dvou",
"tijhfupsb",
"qpso",
"cbodip",
"qfojt",
"wbhjob",
"qvttz",
"ejdl",
"dpdl",
"brvjmb",
"ijumfs",
"ibdl",
"tibwju",
"gsjfoepl",
]
.into_iter()
.map(String::from)
.map(|mut needle| {
unsafe { needle.as_bytes_mut() }
.iter_mut()
.for_each(|byte| *byte -= 1);
needle
});
AhoCorasickBuilder::new()
.ascii_case_insensitive(true)
.dfa(true)
.build_with_size(needles)
.unwrap()
})
.is_match(prefix)
}
}
static VALIDATOR: OnceCell<AhoCorasick<u16>> = OnceCell::new(); | pub username: Username, | random_line_split |
createVm.go | package main
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path/filepath"
"strings"
"time"
hyperclient "github.com/Cloud-Foundations/Dominator/hypervisor/client"
"github.com/Cloud-Foundations/Dominator/lib/filesystem/util"
"github.com/Cloud-Foundations/Dominator/lib/format"
"github.com/Cloud-Foundations/Dominator/lib/images/virtualbox"
"github.com/Cloud-Foundations/Dominator/lib/json"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
"github.com/Cloud-Foundations/Dominator/lib/tags"
fm_proto "github.com/Cloud-Foundations/Dominator/proto/fleetmanager"
hyper_proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
var sysfsDirectory = "/sys/block"
type volumeInitParams struct {
hyper_proto.VolumeInitialisationInfo
MountPoint string
}
type wrappedReadCloser struct {
real io.Closer
wrap io.Reader
}
func init() {
rand.Seed(time.Now().Unix() + time.Now().UnixNano())
}
func createVmSubcommand(args []string, logger log.DebugLogger) error {
if err := createVm(logger); err != nil {
return fmt.Errorf("error creating VM: %s", err)
}
return nil
}
func callCreateVm(client *srpc.Client, request hyper_proto.CreateVmRequest,
reply *hyper_proto.CreateVmResponse, imageReader, userDataReader io.Reader,
imageSize, userDataSize int64, logger log.DebugLogger) error {
conn, err := client.Call("Hypervisor.CreateVm")
if err != nil {
return fmt.Errorf("error calling Hypervisor.CreateVm: %s", err)
}
defer conn.Close()
if err := conn.Encode(request); err != nil {
return fmt.Errorf("error encoding request: %s", err)
}
// Stream any required data.
if imageReader != nil {
logger.Debugln(0, "uploading image")
startTime := time.Now()
if nCopied, err := io.CopyN(conn, imageReader, imageSize); err != nil {
return fmt.Errorf("error uploading image: %s got %d of %d bytes",
err, nCopied, imageSize)
} else {
duration := time.Since(startTime)
speed := uint64(float64(nCopied) / duration.Seconds())
logger.Debugf(0, "uploaded image in %s (%s/s)\n",
format.Duration(duration), format.FormatBytes(speed))
}
}
if userDataReader != nil {
logger.Debugln(0, "uploading user data")
nCopied, err := io.CopyN(conn, userDataReader, userDataSize)
if err != nil {
return fmt.Errorf(
"error uploading user data: %s got %d of %d bytes",
err, nCopied, userDataSize)
}
}
response, err := processCreateVmResponses(conn, logger)
*reply = response
return err
}
func createVm(logger log.DebugLogger) error {
if *vmHostname == "" {
if name := vmTags["Name"]; name == "" {
return errors.New("no hostname specified")
} else {
*vmHostname = name
}
} else {
if name := vmTags["Name"]; name == "" {
if vmTags == nil {
vmTags = make(tags.Tags)
}
vmTags["Name"] = *vmHostname
}
}
if hypervisor, err := getHypervisorAddress(); err != nil {
return err
} else {
logger.Debugf(0, "creating VM on %s\n", hypervisor)
return createVmOnHypervisor(hypervisor, logger)
}
}
func createVmInfoFromFlags() hyper_proto.VmInfo {
return hyper_proto.VmInfo{
ConsoleType: consoleType,
DestroyProtection: *destroyProtection,
DisableVirtIO: *disableVirtIO,
Hostname: *vmHostname,
MemoryInMiB: uint64(memory >> 20),
MilliCPUs: *milliCPUs,
OwnerGroups: ownerGroups,
OwnerUsers: ownerUsers,
Tags: vmTags,
SecondarySubnetIDs: secondarySubnetIDs,
SubnetId: *subnetId,
VirtualCPUs: *virtualCPUs,
}
}
func createVmOnHypervisor(hypervisor string, logger log.DebugLogger) error {
request := hyper_proto.CreateVmRequest{
DhcpTimeout: *dhcpTimeout,
EnableNetboot: *enableNetboot,
MinimumFreeBytes: uint64(minFreeBytes),
RoundupPower: *roundupPower,
SkipMemoryCheck: *skipMemoryCheck,
VmInfo: createVmInfoFromFlags(),
}
if request.VmInfo.MemoryInMiB < 1 {
request.VmInfo.MemoryInMiB = 1024
}
if request.VmInfo.MilliCPUs < 1 {
request.VmInfo.MilliCPUs = 250
}
minimumCPUs := request.VmInfo.MilliCPUs / 1000
if request.VmInfo.VirtualCPUs > 0 &&
request.VmInfo.VirtualCPUs < minimumCPUs {
return fmt.Errorf("vCPUs must be at least %d", minimumCPUs)
}
if len(requestIPs) > 0 && requestIPs[0] != "" {
ipAddr := net.ParseIP(requestIPs[0])
if ipAddr == nil {
return fmt.Errorf("invalid IP address: %s", requestIPs[0])
}
request.Address.IpAddress = ipAddr
}
if len(requestIPs) > 1 && len(secondarySubnetIDs) > 0 {
request.SecondaryAddresses = make([]hyper_proto.Address,
len(secondarySubnetIDs))
for index, addr := range requestIPs[1:] {
if addr == "" {
continue
}
ipAddr := net.ParseIP(addr)
if ipAddr == nil {
return fmt.Errorf("invalid IP address: %s", requestIPs[0])
}
request.SecondaryAddresses[index] = hyper_proto.Address{
IpAddress: ipAddr}
}
}
secondaryFstab := &bytes.Buffer{}
var vinitParams []volumeInitParams
if *secondaryVolumesInitParams == "" {
vinitParams = makeVolumeInitParams(uint(len(secondaryVolumeSizes)))
} else {
err := json.ReadFromFile(*secondaryVolumesInitParams, &vinitParams)
if err != nil {
return err
}
}
for index, size := range secondaryVolumeSizes {
request.SecondaryVolumes = append(request.SecondaryVolumes,
hyper_proto.Volume{Size: uint64(size)})
if *initialiseSecondaryVolumes &&
index < len(vinitParams) {
vinit := vinitParams[index]
if vinit.Label == "" {
return fmt.Errorf("VolumeInit[%d] missing Label", index)
}
if vinit.MountPoint == "" {
return fmt.Errorf("VolumeInit[%d] missing MountPoint", index)
}
request.OverlayDirectories = append(request.OverlayDirectories,
vinit.MountPoint)
request.SecondaryVolumesInit = append(request.SecondaryVolumesInit,
vinit.VolumeInitialisationInfo)
util.WriteFstabEntry(secondaryFstab, "LABEL="+vinit.Label,
vinit.MountPoint, "ext4", "discard", 0, 2)
}
}
var imageReader, userDataReader io.Reader
if *imageName != "" {
request.ImageName = *imageName
request.ImageTimeout = *imageTimeout
request.SkipBootloader = *skipBootloader
if overlayFiles, err := loadOverlayFiles(); err != nil {
return err
} else {
request.OverlayFiles = overlayFiles
}
secondaryFstab.Write(request.OverlayFiles["/etc/fstab"])
if secondaryFstab.Len() > 0 {
if request.OverlayFiles == nil {
request.OverlayFiles = make(map[string][]byte)
}
request.OverlayFiles["/etc/fstab"] = secondaryFstab.Bytes()
}
} else if *imageURL != "" {
request.ImageURL = *imageURL
} else if *imageFile != "" {
file, size, err := getReader(*imageFile)
if err != nil {
return err
} else {
defer file.Close()
request.ImageDataSize = uint64(size)
imageReader = file
}
} else {
return errors.New("no image specified")
}
if *userDataFile != "" {
file, size, err := getReader(*userDataFile)
if err != nil {
return err
} else {
defer file.Close()
request.UserDataSize = uint64(size)
userDataReader = file
}
}
client, err := dialHypervisor(hypervisor)
if err != nil {
return err
}
defer client.Close()
var reply hyper_proto.CreateVmResponse
err = callCreateVm(client, request, &reply, imageReader, userDataReader,
int64(request.ImageDataSize), int64(request.UserDataSize), logger)
if err != nil {
return err
}
if err := hyperclient.AcknowledgeVm(client, reply.IpAddress); err != nil {
return fmt.Errorf("error acknowledging VM: %s", err)
}
fmt.Println(reply.IpAddress)
if reply.DhcpTimedOut {
return errors.New("DHCP ACK timed out")
}
if *dhcpTimeout > 0 {
logger.Debugln(0, "Received DHCP ACK")
}
return maybeWatchVm(client, hypervisor, reply.IpAddress, logger)
}
func getHypervisorAddress() (string, error) {
if *hypervisorHostname != "" {
return fmt.Sprintf("%s:%d", *hypervisorHostname, *hypervisorPortNum),
nil
}
client, err := dialFleetManager(fmt.Sprintf("%s:%d",
*fleetManagerHostname, *fleetManagerPortNum))
if err != nil {
return "", err
}
defer client.Close()
if *adjacentVM != "" {
if adjacentVmIpAddr, err := lookupIP(*adjacentVM); err != nil {
return "", err
} else {
return findHypervisorClient(client, adjacentVmIpAddr)
}
}
request := fm_proto.ListHypervisorsInLocationRequest{
Location: *location,
SubnetId: *subnetId,
}
var reply fm_proto.ListHypervisorsInLocationResponse
err = client.RequestReply("FleetManager.ListHypervisorsInLocation",
request, &reply)
if err != nil {
return "", err
}
if reply.Error != "" {
return "", errors.New(reply.Error)
}
if numHyper := len(reply.HypervisorAddresses); numHyper < 1 {
return "", errors.New("no active Hypervisors in location")
} else if numHyper < 2 {
return reply.HypervisorAddresses[0], nil
} else {
return reply.HypervisorAddresses[rand.Intn(numHyper-1)], nil
}
}
func getReader(filename string) (io.ReadCloser, int64, error) {
if file, err := os.Open(filename); err != nil {
return nil, -1, err
} else if filepath.Ext(filename) == ".vdi" {
vdi, err := virtualbox.NewReader(file)
if err != nil {
file.Close()
return nil, -1, err
}
return &wrappedReadCloser{real: file, wrap: vdi}, int64(vdi.Size), nil
} else {
fi, err := file.Stat()
if err != nil {
file.Close()
return nil, -1, err
}
switch fi.Mode() & os.ModeType {
case 0:
return file, fi.Size(), nil
case os.ModeDevice:
if size, err := readBlockDeviceSize(filename); err != nil {
file.Close()
return nil, -1, err
} else {
return file, size, nil
}
default:
file.Close()
return nil, -1, errors.New("unsupported file type")
}
}
}
func loadOverlayFiles() (map[string][]byte, error) |
func makeVolumeInitParams(numVolumes uint) []volumeInitParams {
vinitParams := make([]volumeInitParams, numVolumes)
for index := 0; index < int(numVolumes); index++ {
label := fmt.Sprintf("/data/%d", index)
vinitParams[index].Label = label
vinitParams[index].MountPoint = label
}
return vinitParams
}
func processCreateVmResponses(conn *srpc.Conn,
logger log.DebugLogger) (hyper_proto.CreateVmResponse, error) {
var zeroResponse hyper_proto.CreateVmResponse
if err := conn.Flush(); err != nil {
return zeroResponse, fmt.Errorf("error flushing: %s", err)
}
for {
var response hyper_proto.CreateVmResponse
if err := conn.Decode(&response); err != nil {
return zeroResponse, fmt.Errorf("error decoding: %s", err)
}
if response.Error != "" {
return zeroResponse, errors.New(response.Error)
}
if response.ProgressMessage != "" {
logger.Debugln(0, response.ProgressMessage)
}
if response.Final {
return response, nil
}
}
}
func readBlockDeviceSize(filename string) (int64, error) {
if strings.HasPrefix(filename, "/dev/") {
filename = filename[5:]
}
deviceBlocks, err := readSysfsInt64(
filepath.Join(sysfsDirectory, filename, "size"))
if err != nil {
return 0, err
}
return deviceBlocks * 512, nil
}
func readSysfsInt64(filename string) (int64, error) {
file, err := os.Open(filename)
if err != nil {
return 0, err
}
defer file.Close()
var value int64
nScanned, err := fmt.Fscanf(file, "%d", &value)
if err != nil {
return 0, err
}
if nScanned < 1 {
return 0, fmt.Errorf("only read %d values from: %s", nScanned, filename)
}
return value, nil
}
func (r *wrappedReadCloser) Close() error {
return r.real.Close()
}
func (r *wrappedReadCloser) Read(p []byte) (n int, err error) {
return r.wrap.Read(p)
}
| {
if *overlayDirectory == "" {
return nil, nil
}
overlayFiles := make(map[string][]byte)
err := filepath.Walk(*overlayDirectory,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
overlayFiles[path[len(*overlayDirectory):]] = data
return nil
})
return overlayFiles, err
} | identifier_body |
createVm.go | package main
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path/filepath"
"strings"
"time"
hyperclient "github.com/Cloud-Foundations/Dominator/hypervisor/client"
"github.com/Cloud-Foundations/Dominator/lib/filesystem/util"
"github.com/Cloud-Foundations/Dominator/lib/format"
"github.com/Cloud-Foundations/Dominator/lib/images/virtualbox"
"github.com/Cloud-Foundations/Dominator/lib/json"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
"github.com/Cloud-Foundations/Dominator/lib/tags"
fm_proto "github.com/Cloud-Foundations/Dominator/proto/fleetmanager"
hyper_proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
var sysfsDirectory = "/sys/block"
type volumeInitParams struct {
hyper_proto.VolumeInitialisationInfo
MountPoint string
}
type wrappedReadCloser struct {
real io.Closer
wrap io.Reader
}
func init() {
rand.Seed(time.Now().Unix() + time.Now().UnixNano())
}
func createVmSubcommand(args []string, logger log.DebugLogger) error {
if err := createVm(logger); err != nil {
return fmt.Errorf("error creating VM: %s", err)
}
return nil
}
func callCreateVm(client *srpc.Client, request hyper_proto.CreateVmRequest,
reply *hyper_proto.CreateVmResponse, imageReader, userDataReader io.Reader,
imageSize, userDataSize int64, logger log.DebugLogger) error {
conn, err := client.Call("Hypervisor.CreateVm")
if err != nil {
return fmt.Errorf("error calling Hypervisor.CreateVm: %s", err)
}
defer conn.Close()
if err := conn.Encode(request); err != nil {
return fmt.Errorf("error encoding request: %s", err)
}
// Stream any required data.
if imageReader != nil {
logger.Debugln(0, "uploading image")
startTime := time.Now()
if nCopied, err := io.CopyN(conn, imageReader, imageSize); err != nil {
return fmt.Errorf("error uploading image: %s got %d of %d bytes",
err, nCopied, imageSize)
} else {
duration := time.Since(startTime)
speed := uint64(float64(nCopied) / duration.Seconds())
logger.Debugf(0, "uploaded image in %s (%s/s)\n",
format.Duration(duration), format.FormatBytes(speed))
}
}
if userDataReader != nil {
logger.Debugln(0, "uploading user data")
nCopied, err := io.CopyN(conn, userDataReader, userDataSize)
if err != nil {
return fmt.Errorf(
"error uploading user data: %s got %d of %d bytes",
err, nCopied, userDataSize)
}
}
response, err := processCreateVmResponses(conn, logger)
*reply = response
return err
}
func createVm(logger log.DebugLogger) error {
if *vmHostname == "" {
if name := vmTags["Name"]; name == "" {
return errors.New("no hostname specified")
} else {
*vmHostname = name
}
} else {
if name := vmTags["Name"]; name == "" {
if vmTags == nil {
vmTags = make(tags.Tags)
}
vmTags["Name"] = *vmHostname
}
}
if hypervisor, err := getHypervisorAddress(); err != nil {
return err
} else {
logger.Debugf(0, "creating VM on %s\n", hypervisor)
return createVmOnHypervisor(hypervisor, logger)
}
}
func createVmInfoFromFlags() hyper_proto.VmInfo {
return hyper_proto.VmInfo{
ConsoleType: consoleType,
DestroyProtection: *destroyProtection,
DisableVirtIO: *disableVirtIO,
Hostname: *vmHostname,
MemoryInMiB: uint64(memory >> 20),
MilliCPUs: *milliCPUs,
OwnerGroups: ownerGroups,
OwnerUsers: ownerUsers,
Tags: vmTags,
SecondarySubnetIDs: secondarySubnetIDs,
SubnetId: *subnetId,
VirtualCPUs: *virtualCPUs,
}
}
func createVmOnHypervisor(hypervisor string, logger log.DebugLogger) error {
request := hyper_proto.CreateVmRequest{
DhcpTimeout: *dhcpTimeout,
EnableNetboot: *enableNetboot,
MinimumFreeBytes: uint64(minFreeBytes),
RoundupPower: *roundupPower,
SkipMemoryCheck: *skipMemoryCheck,
VmInfo: createVmInfoFromFlags(),
}
if request.VmInfo.MemoryInMiB < 1 {
request.VmInfo.MemoryInMiB = 1024
}
if request.VmInfo.MilliCPUs < 1 {
request.VmInfo.MilliCPUs = 250
}
minimumCPUs := request.VmInfo.MilliCPUs / 1000
if request.VmInfo.VirtualCPUs > 0 &&
request.VmInfo.VirtualCPUs < minimumCPUs {
return fmt.Errorf("vCPUs must be at least %d", minimumCPUs)
}
if len(requestIPs) > 0 && requestIPs[0] != "" {
ipAddr := net.ParseIP(requestIPs[0])
if ipAddr == nil {
return fmt.Errorf("invalid IP address: %s", requestIPs[0])
}
request.Address.IpAddress = ipAddr
}
if len(requestIPs) > 1 && len(secondarySubnetIDs) > 0 {
request.SecondaryAddresses = make([]hyper_proto.Address,
len(secondarySubnetIDs))
for index, addr := range requestIPs[1:] {
if addr == "" {
continue
}
ipAddr := net.ParseIP(addr)
if ipAddr == nil {
return fmt.Errorf("invalid IP address: %s", requestIPs[0])
}
request.SecondaryAddresses[index] = hyper_proto.Address{
IpAddress: ipAddr}
}
}
secondaryFstab := &bytes.Buffer{}
var vinitParams []volumeInitParams
if *secondaryVolumesInitParams == "" {
vinitParams = makeVolumeInitParams(uint(len(secondaryVolumeSizes)))
} else {
err := json.ReadFromFile(*secondaryVolumesInitParams, &vinitParams)
if err != nil {
return err
}
}
for index, size := range secondaryVolumeSizes {
request.SecondaryVolumes = append(request.SecondaryVolumes,
hyper_proto.Volume{Size: uint64(size)})
if *initialiseSecondaryVolumes &&
index < len(vinitParams) {
vinit := vinitParams[index]
if vinit.Label == "" {
return fmt.Errorf("VolumeInit[%d] missing Label", index)
}
if vinit.MountPoint == "" {
return fmt.Errorf("VolumeInit[%d] missing MountPoint", index)
}
request.OverlayDirectories = append(request.OverlayDirectories,
vinit.MountPoint)
request.SecondaryVolumesInit = append(request.SecondaryVolumesInit,
vinit.VolumeInitialisationInfo)
util.WriteFstabEntry(secondaryFstab, "LABEL="+vinit.Label,
vinit.MountPoint, "ext4", "discard", 0, 2)
}
}
var imageReader, userDataReader io.Reader
if *imageName != "" {
request.ImageName = *imageName | request.OverlayFiles = overlayFiles
}
secondaryFstab.Write(request.OverlayFiles["/etc/fstab"])
if secondaryFstab.Len() > 0 {
if request.OverlayFiles == nil {
request.OverlayFiles = make(map[string][]byte)
}
request.OverlayFiles["/etc/fstab"] = secondaryFstab.Bytes()
}
} else if *imageURL != "" {
request.ImageURL = *imageURL
} else if *imageFile != "" {
file, size, err := getReader(*imageFile)
if err != nil {
return err
} else {
defer file.Close()
request.ImageDataSize = uint64(size)
imageReader = file
}
} else {
return errors.New("no image specified")
}
if *userDataFile != "" {
file, size, err := getReader(*userDataFile)
if err != nil {
return err
} else {
defer file.Close()
request.UserDataSize = uint64(size)
userDataReader = file
}
}
client, err := dialHypervisor(hypervisor)
if err != nil {
return err
}
defer client.Close()
var reply hyper_proto.CreateVmResponse
err = callCreateVm(client, request, &reply, imageReader, userDataReader,
int64(request.ImageDataSize), int64(request.UserDataSize), logger)
if err != nil {
return err
}
if err := hyperclient.AcknowledgeVm(client, reply.IpAddress); err != nil {
return fmt.Errorf("error acknowledging VM: %s", err)
}
fmt.Println(reply.IpAddress)
if reply.DhcpTimedOut {
return errors.New("DHCP ACK timed out")
}
if *dhcpTimeout > 0 {
logger.Debugln(0, "Received DHCP ACK")
}
return maybeWatchVm(client, hypervisor, reply.IpAddress, logger)
}
func getHypervisorAddress() (string, error) {
if *hypervisorHostname != "" {
return fmt.Sprintf("%s:%d", *hypervisorHostname, *hypervisorPortNum),
nil
}
client, err := dialFleetManager(fmt.Sprintf("%s:%d",
*fleetManagerHostname, *fleetManagerPortNum))
if err != nil {
return "", err
}
defer client.Close()
if *adjacentVM != "" {
if adjacentVmIpAddr, err := lookupIP(*adjacentVM); err != nil {
return "", err
} else {
return findHypervisorClient(client, adjacentVmIpAddr)
}
}
request := fm_proto.ListHypervisorsInLocationRequest{
Location: *location,
SubnetId: *subnetId,
}
var reply fm_proto.ListHypervisorsInLocationResponse
err = client.RequestReply("FleetManager.ListHypervisorsInLocation",
request, &reply)
if err != nil {
return "", err
}
if reply.Error != "" {
return "", errors.New(reply.Error)
}
if numHyper := len(reply.HypervisorAddresses); numHyper < 1 {
return "", errors.New("no active Hypervisors in location")
} else if numHyper < 2 {
return reply.HypervisorAddresses[0], nil
} else {
return reply.HypervisorAddresses[rand.Intn(numHyper-1)], nil
}
}
func getReader(filename string) (io.ReadCloser, int64, error) {
if file, err := os.Open(filename); err != nil {
return nil, -1, err
} else if filepath.Ext(filename) == ".vdi" {
vdi, err := virtualbox.NewReader(file)
if err != nil {
file.Close()
return nil, -1, err
}
return &wrappedReadCloser{real: file, wrap: vdi}, int64(vdi.Size), nil
} else {
fi, err := file.Stat()
if err != nil {
file.Close()
return nil, -1, err
}
switch fi.Mode() & os.ModeType {
case 0:
return file, fi.Size(), nil
case os.ModeDevice:
if size, err := readBlockDeviceSize(filename); err != nil {
file.Close()
return nil, -1, err
} else {
return file, size, nil
}
default:
file.Close()
return nil, -1, errors.New("unsupported file type")
}
}
}
func loadOverlayFiles() (map[string][]byte, error) {
if *overlayDirectory == "" {
return nil, nil
}
overlayFiles := make(map[string][]byte)
err := filepath.Walk(*overlayDirectory,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
overlayFiles[path[len(*overlayDirectory):]] = data
return nil
})
return overlayFiles, err
}
func makeVolumeInitParams(numVolumes uint) []volumeInitParams {
vinitParams := make([]volumeInitParams, numVolumes)
for index := 0; index < int(numVolumes); index++ {
label := fmt.Sprintf("/data/%d", index)
vinitParams[index].Label = label
vinitParams[index].MountPoint = label
}
return vinitParams
}
func processCreateVmResponses(conn *srpc.Conn,
logger log.DebugLogger) (hyper_proto.CreateVmResponse, error) {
var zeroResponse hyper_proto.CreateVmResponse
if err := conn.Flush(); err != nil {
return zeroResponse, fmt.Errorf("error flushing: %s", err)
}
for {
var response hyper_proto.CreateVmResponse
if err := conn.Decode(&response); err != nil {
return zeroResponse, fmt.Errorf("error decoding: %s", err)
}
if response.Error != "" {
return zeroResponse, errors.New(response.Error)
}
if response.ProgressMessage != "" {
logger.Debugln(0, response.ProgressMessage)
}
if response.Final {
return response, nil
}
}
}
func readBlockDeviceSize(filename string) (int64, error) {
if strings.HasPrefix(filename, "/dev/") {
filename = filename[5:]
}
deviceBlocks, err := readSysfsInt64(
filepath.Join(sysfsDirectory, filename, "size"))
if err != nil {
return 0, err
}
return deviceBlocks * 512, nil
}
func readSysfsInt64(filename string) (int64, error) {
file, err := os.Open(filename)
if err != nil {
return 0, err
}
defer file.Close()
var value int64
nScanned, err := fmt.Fscanf(file, "%d", &value)
if err != nil {
return 0, err
}
if nScanned < 1 {
return 0, fmt.Errorf("only read %d values from: %s", nScanned, filename)
}
return value, nil
}
func (r *wrappedReadCloser) Close() error {
return r.real.Close()
}
func (r *wrappedReadCloser) Read(p []byte) (n int, err error) {
return r.wrap.Read(p)
} | request.ImageTimeout = *imageTimeout
request.SkipBootloader = *skipBootloader
if overlayFiles, err := loadOverlayFiles(); err != nil {
return err
} else { | random_line_split |
createVm.go | package main
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path/filepath"
"strings"
"time"
hyperclient "github.com/Cloud-Foundations/Dominator/hypervisor/client"
"github.com/Cloud-Foundations/Dominator/lib/filesystem/util"
"github.com/Cloud-Foundations/Dominator/lib/format"
"github.com/Cloud-Foundations/Dominator/lib/images/virtualbox"
"github.com/Cloud-Foundations/Dominator/lib/json"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
"github.com/Cloud-Foundations/Dominator/lib/tags"
fm_proto "github.com/Cloud-Foundations/Dominator/proto/fleetmanager"
hyper_proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
var sysfsDirectory = "/sys/block"
type volumeInitParams struct {
hyper_proto.VolumeInitialisationInfo
MountPoint string
}
type wrappedReadCloser struct {
real io.Closer
wrap io.Reader
}
func init() {
rand.Seed(time.Now().Unix() + time.Now().UnixNano())
}
func createVmSubcommand(args []string, logger log.DebugLogger) error {
if err := createVm(logger); err != nil {
return fmt.Errorf("error creating VM: %s", err)
}
return nil
}
func callCreateVm(client *srpc.Client, request hyper_proto.CreateVmRequest,
reply *hyper_proto.CreateVmResponse, imageReader, userDataReader io.Reader,
imageSize, userDataSize int64, logger log.DebugLogger) error {
conn, err := client.Call("Hypervisor.CreateVm")
if err != nil {
return fmt.Errorf("error calling Hypervisor.CreateVm: %s", err)
}
defer conn.Close()
if err := conn.Encode(request); err != nil {
return fmt.Errorf("error encoding request: %s", err)
}
// Stream any required data.
if imageReader != nil {
logger.Debugln(0, "uploading image")
startTime := time.Now()
if nCopied, err := io.CopyN(conn, imageReader, imageSize); err != nil {
return fmt.Errorf("error uploading image: %s got %d of %d bytes",
err, nCopied, imageSize)
} else {
duration := time.Since(startTime)
speed := uint64(float64(nCopied) / duration.Seconds())
logger.Debugf(0, "uploaded image in %s (%s/s)\n",
format.Duration(duration), format.FormatBytes(speed))
}
}
if userDataReader != nil {
logger.Debugln(0, "uploading user data")
nCopied, err := io.CopyN(conn, userDataReader, userDataSize)
if err != nil {
return fmt.Errorf(
"error uploading user data: %s got %d of %d bytes",
err, nCopied, userDataSize)
}
}
response, err := processCreateVmResponses(conn, logger)
*reply = response
return err
}
func createVm(logger log.DebugLogger) error {
if *vmHostname == "" {
if name := vmTags["Name"]; name == "" {
return errors.New("no hostname specified")
} else {
*vmHostname = name
}
} else {
if name := vmTags["Name"]; name == "" {
if vmTags == nil {
vmTags = make(tags.Tags)
}
vmTags["Name"] = *vmHostname
}
}
if hypervisor, err := getHypervisorAddress(); err != nil {
return err
} else {
logger.Debugf(0, "creating VM on %s\n", hypervisor)
return createVmOnHypervisor(hypervisor, logger)
}
}
func createVmInfoFromFlags() hyper_proto.VmInfo {
return hyper_proto.VmInfo{
ConsoleType: consoleType,
DestroyProtection: *destroyProtection,
DisableVirtIO: *disableVirtIO,
Hostname: *vmHostname,
MemoryInMiB: uint64(memory >> 20),
MilliCPUs: *milliCPUs,
OwnerGroups: ownerGroups,
OwnerUsers: ownerUsers,
Tags: vmTags,
SecondarySubnetIDs: secondarySubnetIDs,
SubnetId: *subnetId,
VirtualCPUs: *virtualCPUs,
}
}
func createVmOnHypervisor(hypervisor string, logger log.DebugLogger) error {
request := hyper_proto.CreateVmRequest{
DhcpTimeout: *dhcpTimeout,
EnableNetboot: *enableNetboot,
MinimumFreeBytes: uint64(minFreeBytes),
RoundupPower: *roundupPower,
SkipMemoryCheck: *skipMemoryCheck,
VmInfo: createVmInfoFromFlags(),
}
if request.VmInfo.MemoryInMiB < 1 {
request.VmInfo.MemoryInMiB = 1024
}
if request.VmInfo.MilliCPUs < 1 {
request.VmInfo.MilliCPUs = 250
}
minimumCPUs := request.VmInfo.MilliCPUs / 1000
if request.VmInfo.VirtualCPUs > 0 &&
request.VmInfo.VirtualCPUs < minimumCPUs {
return fmt.Errorf("vCPUs must be at least %d", minimumCPUs)
}
if len(requestIPs) > 0 && requestIPs[0] != "" {
ipAddr := net.ParseIP(requestIPs[0])
if ipAddr == nil {
return fmt.Errorf("invalid IP address: %s", requestIPs[0])
}
request.Address.IpAddress = ipAddr
}
if len(requestIPs) > 1 && len(secondarySubnetIDs) > 0 {
request.SecondaryAddresses = make([]hyper_proto.Address,
len(secondarySubnetIDs))
for index, addr := range requestIPs[1:] {
if addr == "" {
continue
}
ipAddr := net.ParseIP(addr)
if ipAddr == nil {
return fmt.Errorf("invalid IP address: %s", requestIPs[0])
}
request.SecondaryAddresses[index] = hyper_proto.Address{
IpAddress: ipAddr}
}
}
secondaryFstab := &bytes.Buffer{}
var vinitParams []volumeInitParams
if *secondaryVolumesInitParams == "" {
vinitParams = makeVolumeInitParams(uint(len(secondaryVolumeSizes)))
} else {
err := json.ReadFromFile(*secondaryVolumesInitParams, &vinitParams)
if err != nil {
return err
}
}
for index, size := range secondaryVolumeSizes {
request.SecondaryVolumes = append(request.SecondaryVolumes,
hyper_proto.Volume{Size: uint64(size)})
if *initialiseSecondaryVolumes &&
index < len(vinitParams) {
vinit := vinitParams[index]
if vinit.Label == "" {
return fmt.Errorf("VolumeInit[%d] missing Label", index)
}
if vinit.MountPoint == "" {
return fmt.Errorf("VolumeInit[%d] missing MountPoint", index)
}
request.OverlayDirectories = append(request.OverlayDirectories,
vinit.MountPoint)
request.SecondaryVolumesInit = append(request.SecondaryVolumesInit,
vinit.VolumeInitialisationInfo)
util.WriteFstabEntry(secondaryFstab, "LABEL="+vinit.Label,
vinit.MountPoint, "ext4", "discard", 0, 2)
}
}
var imageReader, userDataReader io.Reader
if *imageName != "" {
request.ImageName = *imageName
request.ImageTimeout = *imageTimeout
request.SkipBootloader = *skipBootloader
if overlayFiles, err := loadOverlayFiles(); err != nil {
return err
} else {
request.OverlayFiles = overlayFiles
}
secondaryFstab.Write(request.OverlayFiles["/etc/fstab"])
if secondaryFstab.Len() > 0 {
if request.OverlayFiles == nil {
request.OverlayFiles = make(map[string][]byte)
}
request.OverlayFiles["/etc/fstab"] = secondaryFstab.Bytes()
}
} else if *imageURL != "" {
request.ImageURL = *imageURL
} else if *imageFile != "" {
file, size, err := getReader(*imageFile)
if err != nil {
return err
} else {
defer file.Close()
request.ImageDataSize = uint64(size)
imageReader = file
}
} else {
return errors.New("no image specified")
}
if *userDataFile != "" {
file, size, err := getReader(*userDataFile)
if err != nil {
return err
} else {
defer file.Close()
request.UserDataSize = uint64(size)
userDataReader = file
}
}
client, err := dialHypervisor(hypervisor)
if err != nil {
return err
}
defer client.Close()
var reply hyper_proto.CreateVmResponse
err = callCreateVm(client, request, &reply, imageReader, userDataReader,
int64(request.ImageDataSize), int64(request.UserDataSize), logger)
if err != nil {
return err
}
if err := hyperclient.AcknowledgeVm(client, reply.IpAddress); err != nil {
return fmt.Errorf("error acknowledging VM: %s", err)
}
fmt.Println(reply.IpAddress)
if reply.DhcpTimedOut {
return errors.New("DHCP ACK timed out")
}
if *dhcpTimeout > 0 {
logger.Debugln(0, "Received DHCP ACK")
}
return maybeWatchVm(client, hypervisor, reply.IpAddress, logger)
}
func getHypervisorAddress() (string, error) {
if *hypervisorHostname != "" {
return fmt.Sprintf("%s:%d", *hypervisorHostname, *hypervisorPortNum),
nil
}
client, err := dialFleetManager(fmt.Sprintf("%s:%d",
*fleetManagerHostname, *fleetManagerPortNum))
if err != nil {
return "", err
}
defer client.Close()
if *adjacentVM != "" {
if adjacentVmIpAddr, err := lookupIP(*adjacentVM); err != nil {
return "", err
} else {
return findHypervisorClient(client, adjacentVmIpAddr)
}
}
request := fm_proto.ListHypervisorsInLocationRequest{
Location: *location,
SubnetId: *subnetId,
}
var reply fm_proto.ListHypervisorsInLocationResponse
err = client.RequestReply("FleetManager.ListHypervisorsInLocation",
request, &reply)
if err != nil {
return "", err
}
if reply.Error != "" {
return "", errors.New(reply.Error)
}
if numHyper := len(reply.HypervisorAddresses); numHyper < 1 {
return "", errors.New("no active Hypervisors in location")
} else if numHyper < 2 {
return reply.HypervisorAddresses[0], nil
} else {
return reply.HypervisorAddresses[rand.Intn(numHyper-1)], nil
}
}
func getReader(filename string) (io.ReadCloser, int64, error) {
if file, err := os.Open(filename); err != nil {
return nil, -1, err
} else if filepath.Ext(filename) == ".vdi" {
vdi, err := virtualbox.NewReader(file)
if err != nil {
file.Close()
return nil, -1, err
}
return &wrappedReadCloser{real: file, wrap: vdi}, int64(vdi.Size), nil
} else {
fi, err := file.Stat()
if err != nil {
file.Close()
return nil, -1, err
}
switch fi.Mode() & os.ModeType {
case 0:
return file, fi.Size(), nil
case os.ModeDevice:
if size, err := readBlockDeviceSize(filename); err != nil {
file.Close()
return nil, -1, err
} else {
return file, size, nil
}
default:
file.Close()
return nil, -1, errors.New("unsupported file type")
}
}
}
func loadOverlayFiles() (map[string][]byte, error) {
if *overlayDirectory == "" |
overlayFiles := make(map[string][]byte)
err := filepath.Walk(*overlayDirectory,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
overlayFiles[path[len(*overlayDirectory):]] = data
return nil
})
return overlayFiles, err
}
func makeVolumeInitParams(numVolumes uint) []volumeInitParams {
vinitParams := make([]volumeInitParams, numVolumes)
for index := 0; index < int(numVolumes); index++ {
label := fmt.Sprintf("/data/%d", index)
vinitParams[index].Label = label
vinitParams[index].MountPoint = label
}
return vinitParams
}
func processCreateVmResponses(conn *srpc.Conn,
logger log.DebugLogger) (hyper_proto.CreateVmResponse, error) {
var zeroResponse hyper_proto.CreateVmResponse
if err := conn.Flush(); err != nil {
return zeroResponse, fmt.Errorf("error flushing: %s", err)
}
for {
var response hyper_proto.CreateVmResponse
if err := conn.Decode(&response); err != nil {
return zeroResponse, fmt.Errorf("error decoding: %s", err)
}
if response.Error != "" {
return zeroResponse, errors.New(response.Error)
}
if response.ProgressMessage != "" {
logger.Debugln(0, response.ProgressMessage)
}
if response.Final {
return response, nil
}
}
}
func readBlockDeviceSize(filename string) (int64, error) {
if strings.HasPrefix(filename, "/dev/") {
filename = filename[5:]
}
deviceBlocks, err := readSysfsInt64(
filepath.Join(sysfsDirectory, filename, "size"))
if err != nil {
return 0, err
}
return deviceBlocks * 512, nil
}
func readSysfsInt64(filename string) (int64, error) {
file, err := os.Open(filename)
if err != nil {
return 0, err
}
defer file.Close()
var value int64
nScanned, err := fmt.Fscanf(file, "%d", &value)
if err != nil {
return 0, err
}
if nScanned < 1 {
return 0, fmt.Errorf("only read %d values from: %s", nScanned, filename)
}
return value, nil
}
func (r *wrappedReadCloser) Close() error {
return r.real.Close()
}
func (r *wrappedReadCloser) Read(p []byte) (n int, err error) {
return r.wrap.Read(p)
}
| {
return nil, nil
} | conditional_block |
createVm.go | package main
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path/filepath"
"strings"
"time"
hyperclient "github.com/Cloud-Foundations/Dominator/hypervisor/client"
"github.com/Cloud-Foundations/Dominator/lib/filesystem/util"
"github.com/Cloud-Foundations/Dominator/lib/format"
"github.com/Cloud-Foundations/Dominator/lib/images/virtualbox"
"github.com/Cloud-Foundations/Dominator/lib/json"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
"github.com/Cloud-Foundations/Dominator/lib/tags"
fm_proto "github.com/Cloud-Foundations/Dominator/proto/fleetmanager"
hyper_proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
var sysfsDirectory = "/sys/block"
type volumeInitParams struct {
hyper_proto.VolumeInitialisationInfo
MountPoint string
}
type wrappedReadCloser struct {
real io.Closer
wrap io.Reader
}
func init() {
rand.Seed(time.Now().Unix() + time.Now().UnixNano())
}
func createVmSubcommand(args []string, logger log.DebugLogger) error {
if err := createVm(logger); err != nil {
return fmt.Errorf("error creating VM: %s", err)
}
return nil
}
func callCreateVm(client *srpc.Client, request hyper_proto.CreateVmRequest,
reply *hyper_proto.CreateVmResponse, imageReader, userDataReader io.Reader,
imageSize, userDataSize int64, logger log.DebugLogger) error {
conn, err := client.Call("Hypervisor.CreateVm")
if err != nil {
return fmt.Errorf("error calling Hypervisor.CreateVm: %s", err)
}
defer conn.Close()
if err := conn.Encode(request); err != nil {
return fmt.Errorf("error encoding request: %s", err)
}
// Stream any required data.
if imageReader != nil {
logger.Debugln(0, "uploading image")
startTime := time.Now()
if nCopied, err := io.CopyN(conn, imageReader, imageSize); err != nil {
return fmt.Errorf("error uploading image: %s got %d of %d bytes",
err, nCopied, imageSize)
} else {
duration := time.Since(startTime)
speed := uint64(float64(nCopied) / duration.Seconds())
logger.Debugf(0, "uploaded image in %s (%s/s)\n",
format.Duration(duration), format.FormatBytes(speed))
}
}
if userDataReader != nil {
logger.Debugln(0, "uploading user data")
nCopied, err := io.CopyN(conn, userDataReader, userDataSize)
if err != nil {
return fmt.Errorf(
"error uploading user data: %s got %d of %d bytes",
err, nCopied, userDataSize)
}
}
response, err := processCreateVmResponses(conn, logger)
*reply = response
return err
}
func createVm(logger log.DebugLogger) error {
if *vmHostname == "" {
if name := vmTags["Name"]; name == "" {
return errors.New("no hostname specified")
} else {
*vmHostname = name
}
} else {
if name := vmTags["Name"]; name == "" {
if vmTags == nil {
vmTags = make(tags.Tags)
}
vmTags["Name"] = *vmHostname
}
}
if hypervisor, err := getHypervisorAddress(); err != nil {
return err
} else {
logger.Debugf(0, "creating VM on %s\n", hypervisor)
return createVmOnHypervisor(hypervisor, logger)
}
}
func createVmInfoFromFlags() hyper_proto.VmInfo {
return hyper_proto.VmInfo{
ConsoleType: consoleType,
DestroyProtection: *destroyProtection,
DisableVirtIO: *disableVirtIO,
Hostname: *vmHostname,
MemoryInMiB: uint64(memory >> 20),
MilliCPUs: *milliCPUs,
OwnerGroups: ownerGroups,
OwnerUsers: ownerUsers,
Tags: vmTags,
SecondarySubnetIDs: secondarySubnetIDs,
SubnetId: *subnetId,
VirtualCPUs: *virtualCPUs,
}
}
func createVmOnHypervisor(hypervisor string, logger log.DebugLogger) error {
request := hyper_proto.CreateVmRequest{
DhcpTimeout: *dhcpTimeout,
EnableNetboot: *enableNetboot,
MinimumFreeBytes: uint64(minFreeBytes),
RoundupPower: *roundupPower,
SkipMemoryCheck: *skipMemoryCheck,
VmInfo: createVmInfoFromFlags(),
}
if request.VmInfo.MemoryInMiB < 1 {
request.VmInfo.MemoryInMiB = 1024
}
if request.VmInfo.MilliCPUs < 1 {
request.VmInfo.MilliCPUs = 250
}
minimumCPUs := request.VmInfo.MilliCPUs / 1000
if request.VmInfo.VirtualCPUs > 0 &&
request.VmInfo.VirtualCPUs < minimumCPUs {
return fmt.Errorf("vCPUs must be at least %d", minimumCPUs)
}
if len(requestIPs) > 0 && requestIPs[0] != "" {
ipAddr := net.ParseIP(requestIPs[0])
if ipAddr == nil {
return fmt.Errorf("invalid IP address: %s", requestIPs[0])
}
request.Address.IpAddress = ipAddr
}
if len(requestIPs) > 1 && len(secondarySubnetIDs) > 0 {
request.SecondaryAddresses = make([]hyper_proto.Address,
len(secondarySubnetIDs))
for index, addr := range requestIPs[1:] {
if addr == "" {
continue
}
ipAddr := net.ParseIP(addr)
if ipAddr == nil {
return fmt.Errorf("invalid IP address: %s", requestIPs[0])
}
request.SecondaryAddresses[index] = hyper_proto.Address{
IpAddress: ipAddr}
}
}
secondaryFstab := &bytes.Buffer{}
var vinitParams []volumeInitParams
if *secondaryVolumesInitParams == "" {
vinitParams = makeVolumeInitParams(uint(len(secondaryVolumeSizes)))
} else {
err := json.ReadFromFile(*secondaryVolumesInitParams, &vinitParams)
if err != nil {
return err
}
}
for index, size := range secondaryVolumeSizes {
request.SecondaryVolumes = append(request.SecondaryVolumes,
hyper_proto.Volume{Size: uint64(size)})
if *initialiseSecondaryVolumes &&
index < len(vinitParams) {
vinit := vinitParams[index]
if vinit.Label == "" {
return fmt.Errorf("VolumeInit[%d] missing Label", index)
}
if vinit.MountPoint == "" {
return fmt.Errorf("VolumeInit[%d] missing MountPoint", index)
}
request.OverlayDirectories = append(request.OverlayDirectories,
vinit.MountPoint)
request.SecondaryVolumesInit = append(request.SecondaryVolumesInit,
vinit.VolumeInitialisationInfo)
util.WriteFstabEntry(secondaryFstab, "LABEL="+vinit.Label,
vinit.MountPoint, "ext4", "discard", 0, 2)
}
}
var imageReader, userDataReader io.Reader
if *imageName != "" {
request.ImageName = *imageName
request.ImageTimeout = *imageTimeout
request.SkipBootloader = *skipBootloader
if overlayFiles, err := loadOverlayFiles(); err != nil {
return err
} else {
request.OverlayFiles = overlayFiles
}
secondaryFstab.Write(request.OverlayFiles["/etc/fstab"])
if secondaryFstab.Len() > 0 {
if request.OverlayFiles == nil {
request.OverlayFiles = make(map[string][]byte)
}
request.OverlayFiles["/etc/fstab"] = secondaryFstab.Bytes()
}
} else if *imageURL != "" {
request.ImageURL = *imageURL
} else if *imageFile != "" {
file, size, err := getReader(*imageFile)
if err != nil {
return err
} else {
defer file.Close()
request.ImageDataSize = uint64(size)
imageReader = file
}
} else {
return errors.New("no image specified")
}
if *userDataFile != "" {
file, size, err := getReader(*userDataFile)
if err != nil {
return err
} else {
defer file.Close()
request.UserDataSize = uint64(size)
userDataReader = file
}
}
client, err := dialHypervisor(hypervisor)
if err != nil {
return err
}
defer client.Close()
var reply hyper_proto.CreateVmResponse
err = callCreateVm(client, request, &reply, imageReader, userDataReader,
int64(request.ImageDataSize), int64(request.UserDataSize), logger)
if err != nil {
return err
}
if err := hyperclient.AcknowledgeVm(client, reply.IpAddress); err != nil {
return fmt.Errorf("error acknowledging VM: %s", err)
}
fmt.Println(reply.IpAddress)
if reply.DhcpTimedOut {
return errors.New("DHCP ACK timed out")
}
if *dhcpTimeout > 0 {
logger.Debugln(0, "Received DHCP ACK")
}
return maybeWatchVm(client, hypervisor, reply.IpAddress, logger)
}
func getHypervisorAddress() (string, error) {
if *hypervisorHostname != "" {
return fmt.Sprintf("%s:%d", *hypervisorHostname, *hypervisorPortNum),
nil
}
client, err := dialFleetManager(fmt.Sprintf("%s:%d",
*fleetManagerHostname, *fleetManagerPortNum))
if err != nil {
return "", err
}
defer client.Close()
if *adjacentVM != "" {
if adjacentVmIpAddr, err := lookupIP(*adjacentVM); err != nil {
return "", err
} else {
return findHypervisorClient(client, adjacentVmIpAddr)
}
}
request := fm_proto.ListHypervisorsInLocationRequest{
Location: *location,
SubnetId: *subnetId,
}
var reply fm_proto.ListHypervisorsInLocationResponse
err = client.RequestReply("FleetManager.ListHypervisorsInLocation",
request, &reply)
if err != nil {
return "", err
}
if reply.Error != "" {
return "", errors.New(reply.Error)
}
if numHyper := len(reply.HypervisorAddresses); numHyper < 1 {
return "", errors.New("no active Hypervisors in location")
} else if numHyper < 2 {
return reply.HypervisorAddresses[0], nil
} else {
return reply.HypervisorAddresses[rand.Intn(numHyper-1)], nil
}
}
func getReader(filename string) (io.ReadCloser, int64, error) {
if file, err := os.Open(filename); err != nil {
return nil, -1, err
} else if filepath.Ext(filename) == ".vdi" {
vdi, err := virtualbox.NewReader(file)
if err != nil {
file.Close()
return nil, -1, err
}
return &wrappedReadCloser{real: file, wrap: vdi}, int64(vdi.Size), nil
} else {
fi, err := file.Stat()
if err != nil {
file.Close()
return nil, -1, err
}
switch fi.Mode() & os.ModeType {
case 0:
return file, fi.Size(), nil
case os.ModeDevice:
if size, err := readBlockDeviceSize(filename); err != nil {
file.Close()
return nil, -1, err
} else {
return file, size, nil
}
default:
file.Close()
return nil, -1, errors.New("unsupported file type")
}
}
}
func loadOverlayFiles() (map[string][]byte, error) {
if *overlayDirectory == "" {
return nil, nil
}
overlayFiles := make(map[string][]byte)
err := filepath.Walk(*overlayDirectory,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
overlayFiles[path[len(*overlayDirectory):]] = data
return nil
})
return overlayFiles, err
}
func makeVolumeInitParams(numVolumes uint) []volumeInitParams {
vinitParams := make([]volumeInitParams, numVolumes)
for index := 0; index < int(numVolumes); index++ {
label := fmt.Sprintf("/data/%d", index)
vinitParams[index].Label = label
vinitParams[index].MountPoint = label
}
return vinitParams
}
func | (conn *srpc.Conn,
logger log.DebugLogger) (hyper_proto.CreateVmResponse, error) {
var zeroResponse hyper_proto.CreateVmResponse
if err := conn.Flush(); err != nil {
return zeroResponse, fmt.Errorf("error flushing: %s", err)
}
for {
var response hyper_proto.CreateVmResponse
if err := conn.Decode(&response); err != nil {
return zeroResponse, fmt.Errorf("error decoding: %s", err)
}
if response.Error != "" {
return zeroResponse, errors.New(response.Error)
}
if response.ProgressMessage != "" {
logger.Debugln(0, response.ProgressMessage)
}
if response.Final {
return response, nil
}
}
}
func readBlockDeviceSize(filename string) (int64, error) {
if strings.HasPrefix(filename, "/dev/") {
filename = filename[5:]
}
deviceBlocks, err := readSysfsInt64(
filepath.Join(sysfsDirectory, filename, "size"))
if err != nil {
return 0, err
}
return deviceBlocks * 512, nil
}
func readSysfsInt64(filename string) (int64, error) {
file, err := os.Open(filename)
if err != nil {
return 0, err
}
defer file.Close()
var value int64
nScanned, err := fmt.Fscanf(file, "%d", &value)
if err != nil {
return 0, err
}
if nScanned < 1 {
return 0, fmt.Errorf("only read %d values from: %s", nScanned, filename)
}
return value, nil
}
func (r *wrappedReadCloser) Close() error {
return r.real.Close()
}
func (r *wrappedReadCloser) Read(p []byte) (n int, err error) {
return r.wrap.Read(p)
}
| processCreateVmResponses | identifier_name |
altConversion.ts | import {
AltBaseNodeMixin,
AltBlendMixin,
AltCornerMixin,
AltDefaultShapeMixin,
AltEllipseNode,
AltFrameMixin,
AltFrameNode,
AltGeometryMixin,
AltGroupNode,
AltLayoutMixin,
AltRectangleCornerMixin,
AltRectangleNode,
AltSceneNode,
AltTextNode,
} from "./altMixins";
import { convertNodesOnRectangle } from "./convertNodesOnRectangle";
import { convertToAutoLayout } from "./convertToAutoLayout";
export const convertSingleNodeToAlt = (
node: SceneNode,
parent: AltFrameNode | AltGroupNode | null = null
): AltSceneNode => {
return convertIntoAltNodes([node], parent)[0];
};
export const frameNodeToAlt = (
node: FrameNode | InstanceNode | ComponentNode,
altParent: AltFrameNode | AltGroupNode | null = null,
hasSingleChildren = false
): AltRectangleNode | AltFrameNode | AltGroupNode => {
if (node.children.length === 0) {
// if it has no children, convert frame to rectangle
return frameToRectangleNode(node, altParent);
}
const altNode = new AltFrameNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertFrame(altNode, node);
convertCorner(altNode, node);
convertRectangleCorner(altNode, node);
altNode.hasChildren = true;
if (hasSingleChildren) {
altNode.children = [];
return altNode;
}
altNode.children = convertIntoAltNodes(node.children, altNode);
return convertToAutoLayout(convertNodesOnRectangle(altNode));
};
// auto convert Frame to Rectangle when Frame has no Children
const frameToRectangleNode = (
node: FrameNode | InstanceNode | ComponentNode,
altParent: AltFrameNode | AltGroupNode | null
): AltRectangleNode => {
const newNode = new AltRectangleNode();
newNode.id = node.id;
newNode.name = node.name;
if (altParent) {
newNode.parent = altParent;
}
convertDefaultShape(newNode, node);
convertRectangleCorner(newNode, node);
convertCorner(newNode, node);
return newNode;
};
const prepareTag = (altNode: AltBaseNodeMixin, node: SceneNode) => {
const gnode: SceneNode & GeometryMixin = node as any;
if (Array.isArray(gnode.fills)) {
altNode.bgImage = {
hash: "",
size: "FILL",
};
for (let f of gnode.fills) {
if (f.type === "IMAGE" && f.imageHash) {
altNode.bgImage.hash = f.imageHash;
altNode.bgImage.size = f.scaleMode;
}
}
}
const tagName = node.getPluginData("tagName");
if (tagName) {
altNode.tagName = tagName;
} else {
altNode.tagName = "div";
}
altNode.raster = (node.getPluginData("raster") || "") as any;
altNode.css = {
add: node.getPluginData("addcss"),
rem: node.getPluginData("remcss"),
style: node.getPluginData("style"),
inherit: node.getPluginData("inherit") as any,
};
if (altNode.raster && altNode.css && !altNode.css.inherit) {
altNode.css.inherit = {
width: false,
height: false,
class: true,
style: true,
};
}
if (
altNode.css &&
altNode.css.inherit &&
typeof altNode.css.inherit === "string"
) {
altNode.css.inherit = JSON.parse(altNode.css.inherit);
}
altNode.wrapCode = node.getPluginData("wrapCode") || "<<component>>";
altNode.props = JSON.parse(node.getPluginData("props") || "{}");
altNode.renderChildren = node.getPluginData("renderChildren") || ("y" as any);
if (node.type === "INSTANCE") {
const target: string = node.mainComponent.getPluginData("target");
const acceptChildren: boolean =
node.mainComponent.getPluginData("acceptChildren") === "y";
if (target) {
altNode.isInstance = true;
altNode.tagName = JSON.parse(target).name;
if (acceptChildren) {
const children = node.findOne((e) => e.name === "children");
if (children && children.type === "TEXT" && !altNode.props.children) {
altNode.props.children = children.characters.trim();
}
altNode.hasChildren = true;
altNode.renderChildren = "y";
} else {
altNode.hasChildren = false;
altNode.renderChildren = "n";
}
} else {
altNode.isInstance = false;
}
}
return altNode;
};
export const convertIntoAltNodes = (
sceneNode: ReadonlyArray<SceneNode>,
altParent: AltFrameNode | AltGroupNode | null = null,
hasSingleChildren = false
): Array<AltSceneNode> => {
const mapped: Array<AltSceneNode | null> = sceneNode.map(
(node: SceneNode) => {
if (node.type === "RECTANGLE" || node.type === "ELLIPSE") {
let altNode;
if (node.type === "RECTANGLE") {
altNode = new AltRectangleNode();
convertRectangleCorner(altNode, node);
} else {
altNode = new AltEllipseNode();
}
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertCorner(altNode, node);
return prepareTag(altNode, node);
} else if (node.type === "LINE") {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
// Lines have a height of zero, but they must have a height, so add 1.
altNode.height = 1;
// Let them be CENTER, since on Lines this property is ignored.
altNode.strokeAlign = "CENTER";
// Remove 1 since it now has a height of 1. It won't be visually perfect, but will be almost.
altNode.strokeWeight = altNode.strokeWeight - 1;
return prepareTag(altNode, node);
} else if (
node.type === "FRAME" ||
node.type === "INSTANCE" ||
node.type === "COMPONENT"
) {
let altNode;
const iconToRect = iconToRectangle(node, altParent);
if (iconToRect != null) {
altNode = prepareTag(iconToRect, node);
} else {
altNode = prepareTag(
frameNodeToAlt(node, altParent, hasSingleChildren),
node
);
}
return altNode;
} else if (node.type === "GROUP") {
if (node.children.length === 1 && node.visible !== false) {
// if Group is visible and has only one child, Group should disappear.
// there will be a single value anyway.
return convertIntoAltNodes(node.children, altParent)[0];
}
const iconToRect = iconToRectangle(node, altParent);
if (iconToRect != null) {
return iconToRect;
}
const altNode = new AltGroupNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertLayout(altNode, node);
convertBlend(altNode, node);
altNode.children = convertIntoAltNodes(node.children, altNode);
// try to find big rect and regardless of that result, also try to convert to autolayout.
// There is a big chance this will be returned as a Frame
// also, Group will always have at least 2 children.
return prepareTag(convertNodesOnRectangle(altNode), node);
} else if (node.type === "TEXT") {
const altNode = new AltTextNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertIntoAltText(altNode, node);
return prepareTag(altNode, node);
} else if (node.type === "VECTOR") {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
// Vector support is still missing. Meanwhile, add placeholder.
altNode.cornerRadius = 0;
if (altNode.fills === figma.mixed || altNode.fills.length === 0) {
// Use rose[400] from Tailwind 2 when Vector has no color.
altNode.fills = [
{
type: "SOLID",
color: {
r: 0.5,
g: 0.23,
b: 0.27,
},
visible: true,
opacity: 0.5,
blendMode: "NORMAL",
},
];
}
return prepareTag(altNode, node);
}
return null;
}
);
return mapped.filter(notEmpty);
};
const iconToRectangle = (
node: FrameNode | InstanceNode | ComponentNode | GroupNode,
altParent: AltFrameNode | AltGroupNode | null
): AltRectangleNode | null => {
if (node.children.every((d) => d.type === "VECTOR")) {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertBlend(altNode, node);
// width, x, y
convertLayout(altNode, node);
// Vector support is still missing. Meanwhile, add placeholder.
altNode.cornerRadius = 0;
altNode.strokes = [];
altNode.strokeWeight = 0;
altNode.strokeMiterLimit = 0;
altNode.strokeAlign = "CENTER";
altNode.strokeCap = "NONE";
altNode.strokeJoin = "BEVEL";
altNode.dashPattern = [];
altNode.fillStyleId = "";
altNode.strokeStyleId = "";
altNode.fills = [
{
type: "IMAGE",
imageHash: "",
scaleMode: "FIT",
visible: true,
opacity: 0.5,
blendMode: "NORMAL",
},
];
return altNode;
}
return null;
};
const convertLayout = (altNode: AltLayoutMixin, node: LayoutMixin) => {
// Get the correct X/Y position when rotation is applied.
// This won't guarantee a perfect position, since we would still
// need to calculate the offset based on node width/height to compensate,
// which we are not currently doing. However, this is a lot better than nothing and will help LineNode.
if (node.rotation !== undefined && Math.round(node.rotation) !== 0) {
const boundingRect = getBoundingRect(node);
altNode.x = boundingRect.x;
altNode.y = boundingRect.y;
} else {
altNode.x = node.x;
altNode.y = node.y;
}
altNode.width = node.width;
altNode.height = node.height;
altNode.rotation = node.rotation;
altNode.layoutAlign = node.layoutAlign;
altNode.layoutGrow = node.layoutGrow;
altNode.overflow = (node as any).overflowDirection || "NONE";
};
const convertFrame = (altNode: AltFrameMixin, node: DefaultFrameMixin) => {
altNode.layoutMode = node.layoutMode;
altNode.primaryAxisSizingMode = node.primaryAxisSizingMode;
altNode.counterAxisSizingMode = node.counterAxisSizingMode;
// Fix this: https://stackoverflow.com/questions/57859754/flexbox-space-between-but-center-if-one-element
// It affects HTML, Tailwind, Flutter and possibly SwiftUI. So, let's be consistent.
if (
node.primaryAxisAlignItems === "SPACE_BETWEEN" &&
node.children.length === 1
) {
altNode.primaryAxisAlignItems = "CENTER";
} else {
altNode.primaryAxisAlignItems = node.primaryAxisAlignItems;
}
altNode.counterAxisAlignItems = node.counterAxisAlignItems;
altNode.paddingLeft = node.paddingLeft;
altNode.paddingRight = node.paddingRight;
altNode.paddingTop = node.paddingTop;
altNode.paddingBottom = node.paddingBottom;
altNode.itemSpacing = node.itemSpacing;
altNode.layoutGrids = node.layoutGrids;
altNode.gridStyleId = node.gridStyleId;
altNode.clipsContent = node.clipsContent;
altNode.guides = node.guides;
};
const convertGeometry = (altNode: AltGeometryMixin, node: GeometryMixin) => {
altNode.fills = node.fills;
altNode.strokes = node.strokes;
altNode.strokeWeight = node.strokeWeight;
altNode.strokeMiterLimit = node.strokeMiterLimit;
altNode.strokeAlign = node.strokeAlign;
altNode.strokeCap = node.strokeCap;
altNode.strokeJoin = node.strokeJoin;
altNode.dashPattern = node.dashPattern;
altNode.fillStyleId = node.fillStyleId;
altNode.strokeStyleId = node.strokeStyleId;
};
const convertBlend = (
altNode: AltBlendMixin,
node: BlendMixin & SceneNodeMixin
) => {
altNode.opacity = node.opacity;
altNode.blendMode = node.blendMode;
altNode.isMask = node.isMask;
altNode.effects = node.effects;
altNode.effectStyleId = node.effectStyleId;
const wrap = (altNode as any).wrapCode;
if (wrap && wrap !== "<<component>>") {
altNode.visible = true;
} else {
altNode.visible = node.visible;
}
};
const convertDefaultShape = (
altNode: AltDefaultShapeMixin,
node: DefaultShapeMixin
) => {
// opacity, visible
convertBlend(altNode, node);
// fills, strokes
convertGeometry(altNode, node);
// width, x, y
convertLayout(altNode, node);
};
const convertCorner = (altNode: AltCornerMixin, node: CornerMixin) => {
altNode.cornerRadius = node.cornerRadius;
altNode.cornerSmoothing = node.cornerSmoothing;
};
const convertRectangleCorner = (
altNode: AltRectangleCornerMixin,
node: RectangleCornerMixin
) => {
altNode.topLeftRadius = node.topLeftRadius;
altNode.topRightRadius = node.topRightRadius;
altNode.bottomLeftRadius = node.bottomLeftRadius;
altNode.bottomRightRadius = node.bottomRightRadius;
};
const convertIntoAltText = (altNode: AltTextNode, node: TextNode) => {
altNode.textAlignHorizontal = node.textAlignHorizontal;
altNode.textAlignVertical = node.textAlignVertical;
altNode.paragraphIndent = node.paragraphIndent;
altNode.paragraphSpacing = node.paragraphSpacing;
altNode.fontSize = node.fontSize;
altNode.fontName = node.fontName;
altNode.textCase = node.textCase;
altNode.textDecoration = node.textDecoration;
altNode.letterSpacing = node.letterSpacing;
altNode.textAutoResize = node.textAutoResize;
altNode.characters = node.characters;
altNode.lineHeight = node.lineHeight;
};
export function | <TValue>(
value: TValue | null | undefined
): value is TValue {
return value !== null && value !== undefined;
}
const applyMatrixToPoint = (matrix: number[][], point: number[]): number[] => {
return [
point[0] * matrix[0][0] + point[1] * matrix[0][1] + matrix[0][2],
point[0] * matrix[1][0] + point[1] * matrix[1][1] + matrix[1][2],
];
};
/**
* this function return a bounding rect for an nodes
*/
// x/y absolute coordinates
// height/width
// x2/y2 bottom right coordinates
export const getBoundingRect = (
node: LayoutMixin
): {
x: number;
y: number;
// x2: number;
// y2: number;
// height: number;
// width: number;
} => {
const boundingRect = {
x: 0,
y: 0,
// x2: 0,
// y2: 0,
// height: 0,
// width: 0,
};
const halfHeight = node.height / 2;
const halfWidth = node.width / 2;
const [[c0, s0, x], [s1, c1, y]] = node.absoluteTransform;
const matrix = [
[c0, s0, x + halfWidth * c0 + halfHeight * s0],
[s1, c1, y + halfWidth * s1 + halfHeight * c1],
];
// the coordinates of the corners of the rectangle
const XY: {
x: number[];
y: number[];
} = {
x: [1, -1, 1, -1],
y: [1, -1, -1, 1],
};
// fill in
for (let i = 0; i <= 3; i++) {
const a = applyMatrixToPoint(matrix, [
XY.x[i] * halfWidth,
XY.y[i] * halfHeight,
]);
XY.x[i] = a[0];
XY.y[i] = a[1];
}
XY.x.sort((a, b) => a - b);
XY.y.sort((a, b) => a - b);
return {
x: XY.x[0],
y: XY.y[0],
};
return boundingRect;
};
| notEmpty | identifier_name |
altConversion.ts | import {
AltBaseNodeMixin,
AltBlendMixin,
AltCornerMixin,
AltDefaultShapeMixin,
AltEllipseNode,
AltFrameMixin,
AltFrameNode,
AltGeometryMixin,
AltGroupNode,
AltLayoutMixin,
AltRectangleCornerMixin,
AltRectangleNode,
AltSceneNode,
AltTextNode,
} from "./altMixins";
import { convertNodesOnRectangle } from "./convertNodesOnRectangle";
import { convertToAutoLayout } from "./convertToAutoLayout";
export const convertSingleNodeToAlt = (
node: SceneNode,
parent: AltFrameNode | AltGroupNode | null = null
): AltSceneNode => {
return convertIntoAltNodes([node], parent)[0];
};
export const frameNodeToAlt = (
node: FrameNode | InstanceNode | ComponentNode,
altParent: AltFrameNode | AltGroupNode | null = null,
hasSingleChildren = false
): AltRectangleNode | AltFrameNode | AltGroupNode => {
if (node.children.length === 0) {
// if it has no children, convert frame to rectangle
return frameToRectangleNode(node, altParent);
}
const altNode = new AltFrameNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertFrame(altNode, node);
convertCorner(altNode, node);
convertRectangleCorner(altNode, node);
altNode.hasChildren = true;
if (hasSingleChildren) {
altNode.children = [];
return altNode;
}
altNode.children = convertIntoAltNodes(node.children, altNode);
return convertToAutoLayout(convertNodesOnRectangle(altNode));
};
// auto convert Frame to Rectangle when Frame has no Children
const frameToRectangleNode = (
node: FrameNode | InstanceNode | ComponentNode,
altParent: AltFrameNode | AltGroupNode | null
): AltRectangleNode => {
const newNode = new AltRectangleNode();
newNode.id = node.id;
newNode.name = node.name;
if (altParent) {
newNode.parent = altParent;
}
convertDefaultShape(newNode, node);
convertRectangleCorner(newNode, node);
convertCorner(newNode, node);
return newNode;
};
const prepareTag = (altNode: AltBaseNodeMixin, node: SceneNode) => {
const gnode: SceneNode & GeometryMixin = node as any;
if (Array.isArray(gnode.fills)) {
altNode.bgImage = {
hash: "",
size: "FILL",
};
for (let f of gnode.fills) {
if (f.type === "IMAGE" && f.imageHash) {
altNode.bgImage.hash = f.imageHash;
altNode.bgImage.size = f.scaleMode;
}
}
}
const tagName = node.getPluginData("tagName");
if (tagName) {
altNode.tagName = tagName;
} else {
altNode.tagName = "div";
}
altNode.raster = (node.getPluginData("raster") || "") as any;
altNode.css = {
add: node.getPluginData("addcss"),
rem: node.getPluginData("remcss"),
style: node.getPluginData("style"),
inherit: node.getPluginData("inherit") as any,
};
if (altNode.raster && altNode.css && !altNode.css.inherit) {
altNode.css.inherit = {
width: false,
height: false,
class: true,
style: true,
};
}
if (
altNode.css &&
altNode.css.inherit &&
typeof altNode.css.inherit === "string"
) {
altNode.css.inherit = JSON.parse(altNode.css.inherit);
}
altNode.wrapCode = node.getPluginData("wrapCode") || "<<component>>";
altNode.props = JSON.parse(node.getPluginData("props") || "{}");
altNode.renderChildren = node.getPluginData("renderChildren") || ("y" as any);
if (node.type === "INSTANCE") {
const target: string = node.mainComponent.getPluginData("target");
const acceptChildren: boolean =
node.mainComponent.getPluginData("acceptChildren") === "y";
if (target) {
altNode.isInstance = true;
altNode.tagName = JSON.parse(target).name;
if (acceptChildren) {
const children = node.findOne((e) => e.name === "children");
if (children && children.type === "TEXT" && !altNode.props.children) { | } else {
altNode.hasChildren = false;
altNode.renderChildren = "n";
}
} else {
altNode.isInstance = false;
}
}
return altNode;
};
export const convertIntoAltNodes = (
sceneNode: ReadonlyArray<SceneNode>,
altParent: AltFrameNode | AltGroupNode | null = null,
hasSingleChildren = false
): Array<AltSceneNode> => {
const mapped: Array<AltSceneNode | null> = sceneNode.map(
(node: SceneNode) => {
if (node.type === "RECTANGLE" || node.type === "ELLIPSE") {
let altNode;
if (node.type === "RECTANGLE") {
altNode = new AltRectangleNode();
convertRectangleCorner(altNode, node);
} else {
altNode = new AltEllipseNode();
}
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertCorner(altNode, node);
return prepareTag(altNode, node);
} else if (node.type === "LINE") {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
// Lines have a height of zero, but they must have a height, so add 1.
altNode.height = 1;
// Let them be CENTER, since on Lines this property is ignored.
altNode.strokeAlign = "CENTER";
// Remove 1 since it now has a height of 1. It won't be visually perfect, but will be almost.
altNode.strokeWeight = altNode.strokeWeight - 1;
return prepareTag(altNode, node);
} else if (
node.type === "FRAME" ||
node.type === "INSTANCE" ||
node.type === "COMPONENT"
) {
let altNode;
const iconToRect = iconToRectangle(node, altParent);
if (iconToRect != null) {
altNode = prepareTag(iconToRect, node);
} else {
altNode = prepareTag(
frameNodeToAlt(node, altParent, hasSingleChildren),
node
);
}
return altNode;
} else if (node.type === "GROUP") {
if (node.children.length === 1 && node.visible !== false) {
// if Group is visible and has only one child, Group should disappear.
// there will be a single value anyway.
return convertIntoAltNodes(node.children, altParent)[0];
}
const iconToRect = iconToRectangle(node, altParent);
if (iconToRect != null) {
return iconToRect;
}
const altNode = new AltGroupNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertLayout(altNode, node);
convertBlend(altNode, node);
altNode.children = convertIntoAltNodes(node.children, altNode);
// try to find big rect and regardless of that result, also try to convert to autolayout.
// There is a big chance this will be returned as a Frame
// also, Group will always have at least 2 children.
return prepareTag(convertNodesOnRectangle(altNode), node);
} else if (node.type === "TEXT") {
const altNode = new AltTextNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertIntoAltText(altNode, node);
return prepareTag(altNode, node);
} else if (node.type === "VECTOR") {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
// Vector support is still missing. Meanwhile, add placeholder.
altNode.cornerRadius = 0;
if (altNode.fills === figma.mixed || altNode.fills.length === 0) {
// Use rose[400] from Tailwind 2 when Vector has no color.
altNode.fills = [
{
type: "SOLID",
color: {
r: 0.5,
g: 0.23,
b: 0.27,
},
visible: true,
opacity: 0.5,
blendMode: "NORMAL",
},
];
}
return prepareTag(altNode, node);
}
return null;
}
);
return mapped.filter(notEmpty);
};
const iconToRectangle = (
node: FrameNode | InstanceNode | ComponentNode | GroupNode,
altParent: AltFrameNode | AltGroupNode | null
): AltRectangleNode | null => {
if (node.children.every((d) => d.type === "VECTOR")) {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertBlend(altNode, node);
// width, x, y
convertLayout(altNode, node);
// Vector support is still missing. Meanwhile, add placeholder.
altNode.cornerRadius = 0;
altNode.strokes = [];
altNode.strokeWeight = 0;
altNode.strokeMiterLimit = 0;
altNode.strokeAlign = "CENTER";
altNode.strokeCap = "NONE";
altNode.strokeJoin = "BEVEL";
altNode.dashPattern = [];
altNode.fillStyleId = "";
altNode.strokeStyleId = "";
altNode.fills = [
{
type: "IMAGE",
imageHash: "",
scaleMode: "FIT",
visible: true,
opacity: 0.5,
blendMode: "NORMAL",
},
];
return altNode;
}
return null;
};
const convertLayout = (altNode: AltLayoutMixin, node: LayoutMixin) => {
// Get the correct X/Y position when rotation is applied.
// This won't guarantee a perfect position, since we would still
// need to calculate the offset based on node width/height to compensate,
// which we are not currently doing. However, this is a lot better than nothing and will help LineNode.
if (node.rotation !== undefined && Math.round(node.rotation) !== 0) {
const boundingRect = getBoundingRect(node);
altNode.x = boundingRect.x;
altNode.y = boundingRect.y;
} else {
altNode.x = node.x;
altNode.y = node.y;
}
altNode.width = node.width;
altNode.height = node.height;
altNode.rotation = node.rotation;
altNode.layoutAlign = node.layoutAlign;
altNode.layoutGrow = node.layoutGrow;
altNode.overflow = (node as any).overflowDirection || "NONE";
};
const convertFrame = (altNode: AltFrameMixin, node: DefaultFrameMixin) => {
altNode.layoutMode = node.layoutMode;
altNode.primaryAxisSizingMode = node.primaryAxisSizingMode;
altNode.counterAxisSizingMode = node.counterAxisSizingMode;
// Fix this: https://stackoverflow.com/questions/57859754/flexbox-space-between-but-center-if-one-element
// It affects HTML, Tailwind, Flutter and possibly SwiftUI. So, let's be consistent.
if (
node.primaryAxisAlignItems === "SPACE_BETWEEN" &&
node.children.length === 1
) {
altNode.primaryAxisAlignItems = "CENTER";
} else {
altNode.primaryAxisAlignItems = node.primaryAxisAlignItems;
}
altNode.counterAxisAlignItems = node.counterAxisAlignItems;
altNode.paddingLeft = node.paddingLeft;
altNode.paddingRight = node.paddingRight;
altNode.paddingTop = node.paddingTop;
altNode.paddingBottom = node.paddingBottom;
altNode.itemSpacing = node.itemSpacing;
altNode.layoutGrids = node.layoutGrids;
altNode.gridStyleId = node.gridStyleId;
altNode.clipsContent = node.clipsContent;
altNode.guides = node.guides;
};
const convertGeometry = (altNode: AltGeometryMixin, node: GeometryMixin) => {
altNode.fills = node.fills;
altNode.strokes = node.strokes;
altNode.strokeWeight = node.strokeWeight;
altNode.strokeMiterLimit = node.strokeMiterLimit;
altNode.strokeAlign = node.strokeAlign;
altNode.strokeCap = node.strokeCap;
altNode.strokeJoin = node.strokeJoin;
altNode.dashPattern = node.dashPattern;
altNode.fillStyleId = node.fillStyleId;
altNode.strokeStyleId = node.strokeStyleId;
};
const convertBlend = (
altNode: AltBlendMixin,
node: BlendMixin & SceneNodeMixin
) => {
altNode.opacity = node.opacity;
altNode.blendMode = node.blendMode;
altNode.isMask = node.isMask;
altNode.effects = node.effects;
altNode.effectStyleId = node.effectStyleId;
const wrap = (altNode as any).wrapCode;
if (wrap && wrap !== "<<component>>") {
altNode.visible = true;
} else {
altNode.visible = node.visible;
}
};
const convertDefaultShape = (
altNode: AltDefaultShapeMixin,
node: DefaultShapeMixin
) => {
// opacity, visible
convertBlend(altNode, node);
// fills, strokes
convertGeometry(altNode, node);
// width, x, y
convertLayout(altNode, node);
};
const convertCorner = (altNode: AltCornerMixin, node: CornerMixin) => {
altNode.cornerRadius = node.cornerRadius;
altNode.cornerSmoothing = node.cornerSmoothing;
};
const convertRectangleCorner = (
altNode: AltRectangleCornerMixin,
node: RectangleCornerMixin
) => {
altNode.topLeftRadius = node.topLeftRadius;
altNode.topRightRadius = node.topRightRadius;
altNode.bottomLeftRadius = node.bottomLeftRadius;
altNode.bottomRightRadius = node.bottomRightRadius;
};
const convertIntoAltText = (altNode: AltTextNode, node: TextNode) => {
altNode.textAlignHorizontal = node.textAlignHorizontal;
altNode.textAlignVertical = node.textAlignVertical;
altNode.paragraphIndent = node.paragraphIndent;
altNode.paragraphSpacing = node.paragraphSpacing;
altNode.fontSize = node.fontSize;
altNode.fontName = node.fontName;
altNode.textCase = node.textCase;
altNode.textDecoration = node.textDecoration;
altNode.letterSpacing = node.letterSpacing;
altNode.textAutoResize = node.textAutoResize;
altNode.characters = node.characters;
altNode.lineHeight = node.lineHeight;
};
export function notEmpty<TValue>(
value: TValue | null | undefined
): value is TValue {
return value !== null && value !== undefined;
}
const applyMatrixToPoint = (matrix: number[][], point: number[]): number[] => {
return [
point[0] * matrix[0][0] + point[1] * matrix[0][1] + matrix[0][2],
point[0] * matrix[1][0] + point[1] * matrix[1][1] + matrix[1][2],
];
};
/**
* this function return a bounding rect for an nodes
*/
// x/y absolute coordinates
// height/width
// x2/y2 bottom right coordinates
export const getBoundingRect = (
node: LayoutMixin
): {
x: number;
y: number;
// x2: number;
// y2: number;
// height: number;
// width: number;
} => {
const boundingRect = {
x: 0,
y: 0,
// x2: 0,
// y2: 0,
// height: 0,
// width: 0,
};
const halfHeight = node.height / 2;
const halfWidth = node.width / 2;
const [[c0, s0, x], [s1, c1, y]] = node.absoluteTransform;
const matrix = [
[c0, s0, x + halfWidth * c0 + halfHeight * s0],
[s1, c1, y + halfWidth * s1 + halfHeight * c1],
];
// the coordinates of the corners of the rectangle
const XY: {
x: number[];
y: number[];
} = {
x: [1, -1, 1, -1],
y: [1, -1, -1, 1],
};
// fill in
for (let i = 0; i <= 3; i++) {
const a = applyMatrixToPoint(matrix, [
XY.x[i] * halfWidth,
XY.y[i] * halfHeight,
]);
XY.x[i] = a[0];
XY.y[i] = a[1];
}
XY.x.sort((a, b) => a - b);
XY.y.sort((a, b) => a - b);
return {
x: XY.x[0],
y: XY.y[0],
};
return boundingRect;
}; | altNode.props.children = children.characters.trim();
}
altNode.hasChildren = true;
altNode.renderChildren = "y"; | random_line_split |
altConversion.ts | import {
AltBaseNodeMixin,
AltBlendMixin,
AltCornerMixin,
AltDefaultShapeMixin,
AltEllipseNode,
AltFrameMixin,
AltFrameNode,
AltGeometryMixin,
AltGroupNode,
AltLayoutMixin,
AltRectangleCornerMixin,
AltRectangleNode,
AltSceneNode,
AltTextNode,
} from "./altMixins";
import { convertNodesOnRectangle } from "./convertNodesOnRectangle";
import { convertToAutoLayout } from "./convertToAutoLayout";
export const convertSingleNodeToAlt = (
node: SceneNode,
parent: AltFrameNode | AltGroupNode | null = null
): AltSceneNode => {
return convertIntoAltNodes([node], parent)[0];
};
export const frameNodeToAlt = (
node: FrameNode | InstanceNode | ComponentNode,
altParent: AltFrameNode | AltGroupNode | null = null,
hasSingleChildren = false
): AltRectangleNode | AltFrameNode | AltGroupNode => {
if (node.children.length === 0) {
// if it has no children, convert frame to rectangle
return frameToRectangleNode(node, altParent);
}
const altNode = new AltFrameNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertFrame(altNode, node);
convertCorner(altNode, node);
convertRectangleCorner(altNode, node);
altNode.hasChildren = true;
if (hasSingleChildren) {
altNode.children = [];
return altNode;
}
altNode.children = convertIntoAltNodes(node.children, altNode);
return convertToAutoLayout(convertNodesOnRectangle(altNode));
};
// auto convert Frame to Rectangle when Frame has no Children
const frameToRectangleNode = (
node: FrameNode | InstanceNode | ComponentNode,
altParent: AltFrameNode | AltGroupNode | null
): AltRectangleNode => {
const newNode = new AltRectangleNode();
newNode.id = node.id;
newNode.name = node.name;
if (altParent) {
newNode.parent = altParent;
}
convertDefaultShape(newNode, node);
convertRectangleCorner(newNode, node);
convertCorner(newNode, node);
return newNode;
};
const prepareTag = (altNode: AltBaseNodeMixin, node: SceneNode) => {
const gnode: SceneNode & GeometryMixin = node as any;
if (Array.isArray(gnode.fills)) {
altNode.bgImage = {
hash: "",
size: "FILL",
};
for (let f of gnode.fills) {
if (f.type === "IMAGE" && f.imageHash) {
altNode.bgImage.hash = f.imageHash;
altNode.bgImage.size = f.scaleMode;
}
}
}
const tagName = node.getPluginData("tagName");
if (tagName) {
altNode.tagName = tagName;
} else {
altNode.tagName = "div";
}
altNode.raster = (node.getPluginData("raster") || "") as any;
altNode.css = {
add: node.getPluginData("addcss"),
rem: node.getPluginData("remcss"),
style: node.getPluginData("style"),
inherit: node.getPluginData("inherit") as any,
};
if (altNode.raster && altNode.css && !altNode.css.inherit) {
altNode.css.inherit = {
width: false,
height: false,
class: true,
style: true,
};
}
if (
altNode.css &&
altNode.css.inherit &&
typeof altNode.css.inherit === "string"
) {
altNode.css.inherit = JSON.parse(altNode.css.inherit);
}
altNode.wrapCode = node.getPluginData("wrapCode") || "<<component>>";
altNode.props = JSON.parse(node.getPluginData("props") || "{}");
altNode.renderChildren = node.getPluginData("renderChildren") || ("y" as any);
if (node.type === "INSTANCE") {
const target: string = node.mainComponent.getPluginData("target");
const acceptChildren: boolean =
node.mainComponent.getPluginData("acceptChildren") === "y";
if (target) {
altNode.isInstance = true;
altNode.tagName = JSON.parse(target).name;
if (acceptChildren) {
const children = node.findOne((e) => e.name === "children");
if (children && children.type === "TEXT" && !altNode.props.children) {
altNode.props.children = children.characters.trim();
}
altNode.hasChildren = true;
altNode.renderChildren = "y";
} else {
altNode.hasChildren = false;
altNode.renderChildren = "n";
}
} else {
altNode.isInstance = false;
}
}
return altNode;
};
export const convertIntoAltNodes = (
sceneNode: ReadonlyArray<SceneNode>,
altParent: AltFrameNode | AltGroupNode | null = null,
hasSingleChildren = false
): Array<AltSceneNode> => {
const mapped: Array<AltSceneNode | null> = sceneNode.map(
(node: SceneNode) => {
if (node.type === "RECTANGLE" || node.type === "ELLIPSE") {
let altNode;
if (node.type === "RECTANGLE") {
altNode = new AltRectangleNode();
convertRectangleCorner(altNode, node);
} else {
altNode = new AltEllipseNode();
}
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertCorner(altNode, node);
return prepareTag(altNode, node);
} else if (node.type === "LINE") {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
// Lines have a height of zero, but they must have a height, so add 1.
altNode.height = 1;
// Let them be CENTER, since on Lines this property is ignored.
altNode.strokeAlign = "CENTER";
// Remove 1 since it now has a height of 1. It won't be visually perfect, but will be almost.
altNode.strokeWeight = altNode.strokeWeight - 1;
return prepareTag(altNode, node);
} else if (
node.type === "FRAME" ||
node.type === "INSTANCE" ||
node.type === "COMPONENT"
) {
let altNode;
const iconToRect = iconToRectangle(node, altParent);
if (iconToRect != null) {
altNode = prepareTag(iconToRect, node);
} else {
altNode = prepareTag(
frameNodeToAlt(node, altParent, hasSingleChildren),
node
);
}
return altNode;
} else if (node.type === "GROUP") {
if (node.children.length === 1 && node.visible !== false) {
// if Group is visible and has only one child, Group should disappear.
// there will be a single value anyway.
return convertIntoAltNodes(node.children, altParent)[0];
}
const iconToRect = iconToRectangle(node, altParent);
if (iconToRect != null) {
return iconToRect;
}
const altNode = new AltGroupNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertLayout(altNode, node);
convertBlend(altNode, node);
altNode.children = convertIntoAltNodes(node.children, altNode);
// try to find big rect and regardless of that result, also try to convert to autolayout.
// There is a big chance this will be returned as a Frame
// also, Group will always have at least 2 children.
return prepareTag(convertNodesOnRectangle(altNode), node);
} else if (node.type === "TEXT") {
const altNode = new AltTextNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertIntoAltText(altNode, node);
return prepareTag(altNode, node);
} else if (node.type === "VECTOR") {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
// Vector support is still missing. Meanwhile, add placeholder.
altNode.cornerRadius = 0;
if (altNode.fills === figma.mixed || altNode.fills.length === 0) {
// Use rose[400] from Tailwind 2 when Vector has no color.
altNode.fills = [
{
type: "SOLID",
color: {
r: 0.5,
g: 0.23,
b: 0.27,
},
visible: true,
opacity: 0.5,
blendMode: "NORMAL",
},
];
}
return prepareTag(altNode, node);
}
return null;
}
);
return mapped.filter(notEmpty);
};
const iconToRectangle = (
node: FrameNode | InstanceNode | ComponentNode | GroupNode,
altParent: AltFrameNode | AltGroupNode | null
): AltRectangleNode | null => {
if (node.children.every((d) => d.type === "VECTOR")) {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertBlend(altNode, node);
// width, x, y
convertLayout(altNode, node);
// Vector support is still missing. Meanwhile, add placeholder.
altNode.cornerRadius = 0;
altNode.strokes = [];
altNode.strokeWeight = 0;
altNode.strokeMiterLimit = 0;
altNode.strokeAlign = "CENTER";
altNode.strokeCap = "NONE";
altNode.strokeJoin = "BEVEL";
altNode.dashPattern = [];
altNode.fillStyleId = "";
altNode.strokeStyleId = "";
altNode.fills = [
{
type: "IMAGE",
imageHash: "",
scaleMode: "FIT",
visible: true,
opacity: 0.5,
blendMode: "NORMAL",
},
];
return altNode;
}
return null;
};
const convertLayout = (altNode: AltLayoutMixin, node: LayoutMixin) => {
// Get the correct X/Y position when rotation is applied.
// This won't guarantee a perfect position, since we would still
// need to calculate the offset based on node width/height to compensate,
// which we are not currently doing. However, this is a lot better than nothing and will help LineNode.
if (node.rotation !== undefined && Math.round(node.rotation) !== 0) {
const boundingRect = getBoundingRect(node);
altNode.x = boundingRect.x;
altNode.y = boundingRect.y;
} else {
altNode.x = node.x;
altNode.y = node.y;
}
altNode.width = node.width;
altNode.height = node.height;
altNode.rotation = node.rotation;
altNode.layoutAlign = node.layoutAlign;
altNode.layoutGrow = node.layoutGrow;
altNode.overflow = (node as any).overflowDirection || "NONE";
};
const convertFrame = (altNode: AltFrameMixin, node: DefaultFrameMixin) => {
altNode.layoutMode = node.layoutMode;
altNode.primaryAxisSizingMode = node.primaryAxisSizingMode;
altNode.counterAxisSizingMode = node.counterAxisSizingMode;
// Fix this: https://stackoverflow.com/questions/57859754/flexbox-space-between-but-center-if-one-element
// It affects HTML, Tailwind, Flutter and possibly SwiftUI. So, let's be consistent.
if (
node.primaryAxisAlignItems === "SPACE_BETWEEN" &&
node.children.length === 1
) {
altNode.primaryAxisAlignItems = "CENTER";
} else {
altNode.primaryAxisAlignItems = node.primaryAxisAlignItems;
}
altNode.counterAxisAlignItems = node.counterAxisAlignItems;
altNode.paddingLeft = node.paddingLeft;
altNode.paddingRight = node.paddingRight;
altNode.paddingTop = node.paddingTop;
altNode.paddingBottom = node.paddingBottom;
altNode.itemSpacing = node.itemSpacing;
altNode.layoutGrids = node.layoutGrids;
altNode.gridStyleId = node.gridStyleId;
altNode.clipsContent = node.clipsContent;
altNode.guides = node.guides;
};
const convertGeometry = (altNode: AltGeometryMixin, node: GeometryMixin) => {
altNode.fills = node.fills;
altNode.strokes = node.strokes;
altNode.strokeWeight = node.strokeWeight;
altNode.strokeMiterLimit = node.strokeMiterLimit;
altNode.strokeAlign = node.strokeAlign;
altNode.strokeCap = node.strokeCap;
altNode.strokeJoin = node.strokeJoin;
altNode.dashPattern = node.dashPattern;
altNode.fillStyleId = node.fillStyleId;
altNode.strokeStyleId = node.strokeStyleId;
};
const convertBlend = (
altNode: AltBlendMixin,
node: BlendMixin & SceneNodeMixin
) => {
altNode.opacity = node.opacity;
altNode.blendMode = node.blendMode;
altNode.isMask = node.isMask;
altNode.effects = node.effects;
altNode.effectStyleId = node.effectStyleId;
const wrap = (altNode as any).wrapCode;
if (wrap && wrap !== "<<component>>") {
altNode.visible = true;
} else {
altNode.visible = node.visible;
}
};
const convertDefaultShape = (
altNode: AltDefaultShapeMixin,
node: DefaultShapeMixin
) => {
// opacity, visible
convertBlend(altNode, node);
// fills, strokes
convertGeometry(altNode, node);
// width, x, y
convertLayout(altNode, node);
};
const convertCorner = (altNode: AltCornerMixin, node: CornerMixin) => {
altNode.cornerRadius = node.cornerRadius;
altNode.cornerSmoothing = node.cornerSmoothing;
};
const convertRectangleCorner = (
altNode: AltRectangleCornerMixin,
node: RectangleCornerMixin
) => {
altNode.topLeftRadius = node.topLeftRadius;
altNode.topRightRadius = node.topRightRadius;
altNode.bottomLeftRadius = node.bottomLeftRadius;
altNode.bottomRightRadius = node.bottomRightRadius;
};
const convertIntoAltText = (altNode: AltTextNode, node: TextNode) => {
altNode.textAlignHorizontal = node.textAlignHorizontal;
altNode.textAlignVertical = node.textAlignVertical;
altNode.paragraphIndent = node.paragraphIndent;
altNode.paragraphSpacing = node.paragraphSpacing;
altNode.fontSize = node.fontSize;
altNode.fontName = node.fontName;
altNode.textCase = node.textCase;
altNode.textDecoration = node.textDecoration;
altNode.letterSpacing = node.letterSpacing;
altNode.textAutoResize = node.textAutoResize;
altNode.characters = node.characters;
altNode.lineHeight = node.lineHeight;
};
export function notEmpty<TValue>(
value: TValue | null | undefined
): value is TValue |
const applyMatrixToPoint = (matrix: number[][], point: number[]): number[] => {
return [
point[0] * matrix[0][0] + point[1] * matrix[0][1] + matrix[0][2],
point[0] * matrix[1][0] + point[1] * matrix[1][1] + matrix[1][2],
];
};
/**
* this function return a bounding rect for an nodes
*/
// x/y absolute coordinates
// height/width
// x2/y2 bottom right coordinates
export const getBoundingRect = (
node: LayoutMixin
): {
x: number;
y: number;
// x2: number;
// y2: number;
// height: number;
// width: number;
} => {
const boundingRect = {
x: 0,
y: 0,
// x2: 0,
// y2: 0,
// height: 0,
// width: 0,
};
const halfHeight = node.height / 2;
const halfWidth = node.width / 2;
const [[c0, s0, x], [s1, c1, y]] = node.absoluteTransform;
const matrix = [
[c0, s0, x + halfWidth * c0 + halfHeight * s0],
[s1, c1, y + halfWidth * s1 + halfHeight * c1],
];
// the coordinates of the corners of the rectangle
const XY: {
x: number[];
y: number[];
} = {
x: [1, -1, 1, -1],
y: [1, -1, -1, 1],
};
// fill in
for (let i = 0; i <= 3; i++) {
const a = applyMatrixToPoint(matrix, [
XY.x[i] * halfWidth,
XY.y[i] * halfHeight,
]);
XY.x[i] = a[0];
XY.y[i] = a[1];
}
XY.x.sort((a, b) => a - b);
XY.y.sort((a, b) => a - b);
return {
x: XY.x[0],
y: XY.y[0],
};
return boundingRect;
};
| {
return value !== null && value !== undefined;
} | identifier_body |
altConversion.ts | import {
AltBaseNodeMixin,
AltBlendMixin,
AltCornerMixin,
AltDefaultShapeMixin,
AltEllipseNode,
AltFrameMixin,
AltFrameNode,
AltGeometryMixin,
AltGroupNode,
AltLayoutMixin,
AltRectangleCornerMixin,
AltRectangleNode,
AltSceneNode,
AltTextNode,
} from "./altMixins";
import { convertNodesOnRectangle } from "./convertNodesOnRectangle";
import { convertToAutoLayout } from "./convertToAutoLayout";
export const convertSingleNodeToAlt = (
node: SceneNode,
parent: AltFrameNode | AltGroupNode | null = null
): AltSceneNode => {
return convertIntoAltNodes([node], parent)[0];
};
export const frameNodeToAlt = (
node: FrameNode | InstanceNode | ComponentNode,
altParent: AltFrameNode | AltGroupNode | null = null,
hasSingleChildren = false
): AltRectangleNode | AltFrameNode | AltGroupNode => {
if (node.children.length === 0) {
// if it has no children, convert frame to rectangle
return frameToRectangleNode(node, altParent);
}
const altNode = new AltFrameNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertFrame(altNode, node);
convertCorner(altNode, node);
convertRectangleCorner(altNode, node);
altNode.hasChildren = true;
if (hasSingleChildren) {
altNode.children = [];
return altNode;
}
altNode.children = convertIntoAltNodes(node.children, altNode);
return convertToAutoLayout(convertNodesOnRectangle(altNode));
};
// auto convert Frame to Rectangle when Frame has no Children
const frameToRectangleNode = (
node: FrameNode | InstanceNode | ComponentNode,
altParent: AltFrameNode | AltGroupNode | null
): AltRectangleNode => {
const newNode = new AltRectangleNode();
newNode.id = node.id;
newNode.name = node.name;
if (altParent) {
newNode.parent = altParent;
}
convertDefaultShape(newNode, node);
convertRectangleCorner(newNode, node);
convertCorner(newNode, node);
return newNode;
};
const prepareTag = (altNode: AltBaseNodeMixin, node: SceneNode) => {
const gnode: SceneNode & GeometryMixin = node as any;
if (Array.isArray(gnode.fills)) {
altNode.bgImage = {
hash: "",
size: "FILL",
};
for (let f of gnode.fills) {
if (f.type === "IMAGE" && f.imageHash) {
altNode.bgImage.hash = f.imageHash;
altNode.bgImage.size = f.scaleMode;
}
}
}
const tagName = node.getPluginData("tagName");
if (tagName) {
altNode.tagName = tagName;
} else {
altNode.tagName = "div";
}
altNode.raster = (node.getPluginData("raster") || "") as any;
altNode.css = {
add: node.getPluginData("addcss"),
rem: node.getPluginData("remcss"),
style: node.getPluginData("style"),
inherit: node.getPluginData("inherit") as any,
};
if (altNode.raster && altNode.css && !altNode.css.inherit) {
altNode.css.inherit = {
width: false,
height: false,
class: true,
style: true,
};
}
if (
altNode.css &&
altNode.css.inherit &&
typeof altNode.css.inherit === "string"
) {
altNode.css.inherit = JSON.parse(altNode.css.inherit);
}
altNode.wrapCode = node.getPluginData("wrapCode") || "<<component>>";
altNode.props = JSON.parse(node.getPluginData("props") || "{}");
altNode.renderChildren = node.getPluginData("renderChildren") || ("y" as any);
if (node.type === "INSTANCE") {
const target: string = node.mainComponent.getPluginData("target");
const acceptChildren: boolean =
node.mainComponent.getPluginData("acceptChildren") === "y";
if (target) {
altNode.isInstance = true;
altNode.tagName = JSON.parse(target).name;
if (acceptChildren) {
const children = node.findOne((e) => e.name === "children");
if (children && children.type === "TEXT" && !altNode.props.children) {
altNode.props.children = children.characters.trim();
}
altNode.hasChildren = true;
altNode.renderChildren = "y";
} else {
altNode.hasChildren = false;
altNode.renderChildren = "n";
}
} else {
altNode.isInstance = false;
}
}
return altNode;
};
export const convertIntoAltNodes = (
sceneNode: ReadonlyArray<SceneNode>,
altParent: AltFrameNode | AltGroupNode | null = null,
hasSingleChildren = false
): Array<AltSceneNode> => {
const mapped: Array<AltSceneNode | null> = sceneNode.map(
(node: SceneNode) => {
if (node.type === "RECTANGLE" || node.type === "ELLIPSE") {
let altNode;
if (node.type === "RECTANGLE") {
altNode = new AltRectangleNode();
convertRectangleCorner(altNode, node);
} else {
altNode = new AltEllipseNode();
}
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertCorner(altNode, node);
return prepareTag(altNode, node);
} else if (node.type === "LINE") {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
// Lines have a height of zero, but they must have a height, so add 1.
altNode.height = 1;
// Let them be CENTER, since on Lines this property is ignored.
altNode.strokeAlign = "CENTER";
// Remove 1 since it now has a height of 1. It won't be visually perfect, but will be almost.
altNode.strokeWeight = altNode.strokeWeight - 1;
return prepareTag(altNode, node);
} else if (
node.type === "FRAME" ||
node.type === "INSTANCE" ||
node.type === "COMPONENT"
) {
let altNode;
const iconToRect = iconToRectangle(node, altParent);
if (iconToRect != null) {
altNode = prepareTag(iconToRect, node);
} else {
altNode = prepareTag(
frameNodeToAlt(node, altParent, hasSingleChildren),
node
);
}
return altNode;
} else if (node.type === "GROUP") {
if (node.children.length === 1 && node.visible !== false) {
// if Group is visible and has only one child, Group should disappear.
// there will be a single value anyway.
return convertIntoAltNodes(node.children, altParent)[0];
}
const iconToRect = iconToRectangle(node, altParent);
if (iconToRect != null) {
return iconToRect;
}
const altNode = new AltGroupNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertLayout(altNode, node);
convertBlend(altNode, node);
altNode.children = convertIntoAltNodes(node.children, altNode);
// try to find big rect and regardless of that result, also try to convert to autolayout.
// There is a big chance this will be returned as a Frame
// also, Group will always have at least 2 children.
return prepareTag(convertNodesOnRectangle(altNode), node);
} else if (node.type === "TEXT") {
const altNode = new AltTextNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
convertIntoAltText(altNode, node);
return prepareTag(altNode, node);
} else if (node.type === "VECTOR") {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) {
altNode.parent = altParent;
}
convertDefaultShape(altNode, node);
// Vector support is still missing. Meanwhile, add placeholder.
altNode.cornerRadius = 0;
if (altNode.fills === figma.mixed || altNode.fills.length === 0) {
// Use rose[400] from Tailwind 2 when Vector has no color.
altNode.fills = [
{
type: "SOLID",
color: {
r: 0.5,
g: 0.23,
b: 0.27,
},
visible: true,
opacity: 0.5,
blendMode: "NORMAL",
},
];
}
return prepareTag(altNode, node);
}
return null;
}
);
return mapped.filter(notEmpty);
};
const iconToRectangle = (
node: FrameNode | InstanceNode | ComponentNode | GroupNode,
altParent: AltFrameNode | AltGroupNode | null
): AltRectangleNode | null => {
if (node.children.every((d) => d.type === "VECTOR")) {
const altNode = new AltRectangleNode();
altNode.id = node.id;
altNode.name = node.name;
if (altParent) |
convertBlend(altNode, node);
// width, x, y
convertLayout(altNode, node);
// Vector support is still missing. Meanwhile, add placeholder.
altNode.cornerRadius = 0;
altNode.strokes = [];
altNode.strokeWeight = 0;
altNode.strokeMiterLimit = 0;
altNode.strokeAlign = "CENTER";
altNode.strokeCap = "NONE";
altNode.strokeJoin = "BEVEL";
altNode.dashPattern = [];
altNode.fillStyleId = "";
altNode.strokeStyleId = "";
altNode.fills = [
{
type: "IMAGE",
imageHash: "",
scaleMode: "FIT",
visible: true,
opacity: 0.5,
blendMode: "NORMAL",
},
];
return altNode;
}
return null;
};
const convertLayout = (altNode: AltLayoutMixin, node: LayoutMixin) => {
// Get the correct X/Y position when rotation is applied.
// This won't guarantee a perfect position, since we would still
// need to calculate the offset based on node width/height to compensate,
// which we are not currently doing. However, this is a lot better than nothing and will help LineNode.
if (node.rotation !== undefined && Math.round(node.rotation) !== 0) {
const boundingRect = getBoundingRect(node);
altNode.x = boundingRect.x;
altNode.y = boundingRect.y;
} else {
altNode.x = node.x;
altNode.y = node.y;
}
altNode.width = node.width;
altNode.height = node.height;
altNode.rotation = node.rotation;
altNode.layoutAlign = node.layoutAlign;
altNode.layoutGrow = node.layoutGrow;
altNode.overflow = (node as any).overflowDirection || "NONE";
};
const convertFrame = (altNode: AltFrameMixin, node: DefaultFrameMixin) => {
altNode.layoutMode = node.layoutMode;
altNode.primaryAxisSizingMode = node.primaryAxisSizingMode;
altNode.counterAxisSizingMode = node.counterAxisSizingMode;
// Fix this: https://stackoverflow.com/questions/57859754/flexbox-space-between-but-center-if-one-element
// It affects HTML, Tailwind, Flutter and possibly SwiftUI. So, let's be consistent.
if (
node.primaryAxisAlignItems === "SPACE_BETWEEN" &&
node.children.length === 1
) {
altNode.primaryAxisAlignItems = "CENTER";
} else {
altNode.primaryAxisAlignItems = node.primaryAxisAlignItems;
}
altNode.counterAxisAlignItems = node.counterAxisAlignItems;
altNode.paddingLeft = node.paddingLeft;
altNode.paddingRight = node.paddingRight;
altNode.paddingTop = node.paddingTop;
altNode.paddingBottom = node.paddingBottom;
altNode.itemSpacing = node.itemSpacing;
altNode.layoutGrids = node.layoutGrids;
altNode.gridStyleId = node.gridStyleId;
altNode.clipsContent = node.clipsContent;
altNode.guides = node.guides;
};
const convertGeometry = (altNode: AltGeometryMixin, node: GeometryMixin) => {
altNode.fills = node.fills;
altNode.strokes = node.strokes;
altNode.strokeWeight = node.strokeWeight;
altNode.strokeMiterLimit = node.strokeMiterLimit;
altNode.strokeAlign = node.strokeAlign;
altNode.strokeCap = node.strokeCap;
altNode.strokeJoin = node.strokeJoin;
altNode.dashPattern = node.dashPattern;
altNode.fillStyleId = node.fillStyleId;
altNode.strokeStyleId = node.strokeStyleId;
};
const convertBlend = (
altNode: AltBlendMixin,
node: BlendMixin & SceneNodeMixin
) => {
altNode.opacity = node.opacity;
altNode.blendMode = node.blendMode;
altNode.isMask = node.isMask;
altNode.effects = node.effects;
altNode.effectStyleId = node.effectStyleId;
const wrap = (altNode as any).wrapCode;
if (wrap && wrap !== "<<component>>") {
altNode.visible = true;
} else {
altNode.visible = node.visible;
}
};
const convertDefaultShape = (
altNode: AltDefaultShapeMixin,
node: DefaultShapeMixin
) => {
// opacity, visible
convertBlend(altNode, node);
// fills, strokes
convertGeometry(altNode, node);
// width, x, y
convertLayout(altNode, node);
};
const convertCorner = (altNode: AltCornerMixin, node: CornerMixin) => {
altNode.cornerRadius = node.cornerRadius;
altNode.cornerSmoothing = node.cornerSmoothing;
};
const convertRectangleCorner = (
altNode: AltRectangleCornerMixin,
node: RectangleCornerMixin
) => {
altNode.topLeftRadius = node.topLeftRadius;
altNode.topRightRadius = node.topRightRadius;
altNode.bottomLeftRadius = node.bottomLeftRadius;
altNode.bottomRightRadius = node.bottomRightRadius;
};
const convertIntoAltText = (altNode: AltTextNode, node: TextNode) => {
altNode.textAlignHorizontal = node.textAlignHorizontal;
altNode.textAlignVertical = node.textAlignVertical;
altNode.paragraphIndent = node.paragraphIndent;
altNode.paragraphSpacing = node.paragraphSpacing;
altNode.fontSize = node.fontSize;
altNode.fontName = node.fontName;
altNode.textCase = node.textCase;
altNode.textDecoration = node.textDecoration;
altNode.letterSpacing = node.letterSpacing;
altNode.textAutoResize = node.textAutoResize;
altNode.characters = node.characters;
altNode.lineHeight = node.lineHeight;
};
export function notEmpty<TValue>(
value: TValue | null | undefined
): value is TValue {
return value !== null && value !== undefined;
}
const applyMatrixToPoint = (matrix: number[][], point: number[]): number[] => {
return [
point[0] * matrix[0][0] + point[1] * matrix[0][1] + matrix[0][2],
point[0] * matrix[1][0] + point[1] * matrix[1][1] + matrix[1][2],
];
};
/**
* this function return a bounding rect for an nodes
*/
// x/y absolute coordinates
// height/width
// x2/y2 bottom right coordinates
export const getBoundingRect = (
node: LayoutMixin
): {
x: number;
y: number;
// x2: number;
// y2: number;
// height: number;
// width: number;
} => {
const boundingRect = {
x: 0,
y: 0,
// x2: 0,
// y2: 0,
// height: 0,
// width: 0,
};
const halfHeight = node.height / 2;
const halfWidth = node.width / 2;
const [[c0, s0, x], [s1, c1, y]] = node.absoluteTransform;
const matrix = [
[c0, s0, x + halfWidth * c0 + halfHeight * s0],
[s1, c1, y + halfWidth * s1 + halfHeight * c1],
];
// the coordinates of the corners of the rectangle
const XY: {
x: number[];
y: number[];
} = {
x: [1, -1, 1, -1],
y: [1, -1, -1, 1],
};
// fill in
for (let i = 0; i <= 3; i++) {
const a = applyMatrixToPoint(matrix, [
XY.x[i] * halfWidth,
XY.y[i] * halfHeight,
]);
XY.x[i] = a[0];
XY.y[i] = a[1];
}
XY.x.sort((a, b) => a - b);
XY.y.sort((a, b) => a - b);
return {
x: XY.x[0],
y: XY.y[0],
};
return boundingRect;
};
| {
altNode.parent = altParent;
} | conditional_block |
file_generator.py | #import glob
import json
import logging
import os
import pandas as pd
import numpy as np
import yaml
import lar_constraints
import lar_generator
from rules_engine import rules_engine
from test_file_generator import test_data
import utils
class FileGenerator(object):
def __init__(self, file_paths_config='configurations/test_filepaths.yaml', geo_config_file='configurations/geographic_data.yaml',
clean_file_config='configurations/clean_file_config.yaml', edit_report_config='configurations/edit_report_config.yaml'):
print("initializing file generator")
#Loads the filepath configuration.
print("opening config files")
with open(file_paths_config) as f:
#Uses safe_load instead of load.
self.filepaths = yaml.safe_load(f)
#Loads the geographic file configuration.
with open(geo_config_file) as f:
# Uses safe_load instead of load.
self.geographic = yaml.safe_load(f)
with open(self.geographic["geographic_data_file"]) as f:
self.geo_data = yaml.safe_load(f)
#Loads the clean file configuration.
with open(clean_file_config) as f:
# Uses safe_load instead of load.
self.clean_config = yaml.safe_load(f)
self.geo_config_file = geo_config_file #set geo config file as class variable to instantiate other classes
#Loads the edit report configuration.
with open(edit_report_config) as f:
# Uses safe_load instead of load.
self.edit_report_config = yaml.safe_load(f)
print("config files loaded")
#Sets the logging parameters.
#Uses a log file name and file writing mode from the
#test_filepaths yaml.
print("configuring logging in file Generator")
logging.basicConfig(filename=self.filepaths['log_filename'],
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filemode=self.filepaths['log_mode'],
level=logging.INFO)
print("logging configured")
#Loads geographic geographic data from filepaths named in the test_filepaths
#yaml file.
print("loading geo data to file generator")
self.geographic_data = pd.read_csv(self.geographic['geographic_data_file'], delimiter='|', header=None,
names=self.geographic['file_columns'], dtype=object)
#create 5 digit County Codes from 2 digit state and 3 digit county
self.geographic_data['county_fips'] = self.geographic_data.apply(lambda x: str(x.state_code) + str(x.county), axis=1)
#create 11 digit Census Tract codes from 5 digit county and 6 digit tract
self.geographic_data["tract_fips"] = self.geographic_data.apply(lambda x: str(x.county_fips) + str(x.tracts), axis=1)
self.small_counties = list(self.geographic_data.county_fips[self.geographic_data.small_county=="S"])
print("geo data loaded to file generator")
#Loads schemas for LAR and TS.
#Schemas contain valid enumerations, including NA values, for each field in the dataset.
print("loading JSON schema files")
self.lar_schema_df = pd.DataFrame(json.load(open(self.filepaths['lar_schema_json'], "r")))
self.ts_schema_df = pd.DataFrame(json.load(open(self.filepaths['ts_schema_json'], "r")))
#Instantiates the other classes.
#lar_gen is responsible for generating data according to the values in the schema.
print("instantiating class objects")
print("lar gen loading")
self.lar_gen = lar_generator.lar_gen()
print("lar gen done")
#lar_constrains is responsible for modifying generated data so that
#the resulting file passes syntax and validity edits.
print("lar constraints loading")
self.lar_const = lar_constraints.lar_constraints()
print("lar constraints loaded")
self.constraints = []
for func in dir(self.lar_const):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
self.constraints.append(func)
#lar_validator checks a dataframe and returns a JSON with
#edit pass/fail results.
print("rules engine loading")
self.lar_validator = rules_engine(geo_config_file=self.geo_config_file)
#tracts=tracts, counties=counties, small_counties=small_counties)
print("rules engine loaded")
print("file generator initialization complete")
def validation(self, row, ts_row):
"""
Applies the syntax and validity rules engine logic
to the LAR row to create an edit report.
"""
#Creates dataframes of LAR and TS data.
lar_data = pd.DataFrame(row, index=[1])
ts_data = pd.DataFrame(ts_row, index=[0])
#Instantiates a rules checker to check the row against edits in the rules engine.
#Loads LAR and TS data to the rules engine to validate LAR data and return an edit report
self.lar_validator.load_lar_data(lar_data)
self.lar_validator.load_ts_data(ts_data)
#Runs the edits against the LAR row and produces edit check results.
for func in dir(self.lar_validator):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(self.lar_validator, func)(row)
#Returns edit check results.
return self.lar_validator.results
def make_clean_lar_row(self, ts_row):
|
def create_files(self, kind):
"""Creates a clean file or a set of edit files specified by the
function call"""
#Creates TS row data.
self.ts_row = self.lar_gen.make_ts_row()
#Creates a TS row dataframe.
ts_df = pd.DataFrame(self.ts_row, index=[1])
#The following produces a clean file.
if kind == 'clean_file':
#Creates a first row of LAR to begin the dataframe.
#All other rows are concatenated to the dataframe until
#the length of the dataframe reaches the file length specified in the
#test filepaths yaml file.
for i in range(0, self.clean_config["file_length"]["value"] ):
print('Creating row {i}'.format(i=i))
if i==0:
first_row = self.make_clean_lar_row(ts_row=self.ts_row)
lar_frame = pd.DataFrame(first_row, index=[1])
else:
new_row = self.make_clean_lar_row(ts_row=self.ts_row)
new_row = pd.DataFrame(new_row, index=[1])
lar_frame = pd.concat([lar_frame, new_row], axis=0)
#Writes the file to a clean filepath specified in the test_filepaths
#configuration.
out_file_path = self.filepaths['clean_filepath'].format(bank_name=self.clean_config["name"]["value"])
out_file_bank_name = self.filepaths['clean_filename'].format(row_count=self.clean_config["file_length"]["value"] , bank_name=self.clean_config["name"]["value"])
utils.write_file(ts_input=ts_df, lar_input=lar_frame, path=out_file_path, name=out_file_bank_name)
#For error files.
if kind == 'error_files':
#Modifies clean data and outputs
#resulting files that fail specific edits.
#Instantiates the edit file maker.
file_maker = test_data(ts_schema=self.ts_schema_df,
lar_schema=self.lar_schema_df,
geographic_data=self.geographic_data)
#Pulls in the clean data filepath and name from the
#test filepaths yaml file.
ts_data, lar_data = utils.read_data_file(path=self.filepaths['clean_filepath'].format(bank_name=self.clean_config["name"]["value"]),
data_file=self.filepaths["clean_filename"].format(bank_name=self.clean_config["name"]["value"], row_count=self.clean_config["file_length"]["value"]))
#Passes clean file data to the file maker object.
file_maker.load_data_frames(ts_data, lar_data)
#Generates a file for each edit function in file maker.
edits = []
#Loops over all data modification functions.
for func in dir(file_maker):
#Checks if function is a numbered syntax or validity edit.
if func[:1] in ("s", "v", "q") and func[1:4].isdigit()==True:
print("applying:", func)
#Applies data modification functions and produces files.
getattr(file_maker, func)()
def validate_quality_edit_file(self, quality_filename):
"""
The test file generator logic for creating quality edit test files
may cause rows to fail not only quality edits, but also
syntax or validity edits in the creation process.
This function takes a quality edit test file from the quality filepaths
directory in the test filepaths yaml, drops rows that have
syntax or validity edits, and duplicates the remaining clean rows to the
length of the original file.
The file is then saved in a new directory for quality edit test files
that also pass syntax and validity edits.
NOTE: This function works to allow LAR rows to pass syntax and validity edits and
does not validate the TS sheet.
"""
try:
#Instantiates an edit checker object with rules_engine.
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df, geographic_data=self.geographic_data)
#Reads the files and separates data into TS and LAR frames.
ts_df, lar_df = utils.read_data_file(
path=self.filepaths['quality_filepath'].format(bank_name=self.clean_config['name']['value']),
data_file=quality_filename)
#Stores the original length of the file.
original_length = len(lar_df.index)
#Loads data into the checker object.
checker.load_data_frames(ts_df, lar_df)
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a results dataframe and keeps the results that
#have failed.
report_df = pd.DataFrame(checker.results)
report_df = report_df[(report_df['status']=='failed')]
# The function ignores TS edits and drops results related
# to edit fails from the TS.
report_df = report_df[report_df['row_ids'] != 'TS']
if len(report_df) == 0:
#If there are no syntax or validity edits
#the data is written to a new directory for quality
#test files that pass syntax and validity edits.
utils.write_file(path=self.filepaths['quality_pass_s_v_filepath'].format(
bank_name=self.clean_config['name']['value']),
ts_input=ts_df,
lar_input=lar_df,
name=quality_filename)
#The case if there are rows that failed syntax or validity edits.
else:
#Creates an empty list for storing row numbers
#where edits have failed.
uli_list = []
#Iterates through each row and appends the list of ULI's
#of rows where syntax or validity edits have failed.
for index, row in report_df.iterrows():
uli_list.append(row.row_ids)
#Drops not-a-numbers from the ULI list.
if np.nan in uli_list:
uli_list.remove(np.nan)
#Creates a new list to store the ULI's without nested brackets.
new_list = []
for i in range(len(uli_list)):
for n in range(len(uli_list[i])):
new_list.append(uli_list[i][n])
#Creates a list that removes ULI's that are repeated.
unique_uli_list = []
for i in new_list:
if i not in unique_uli_list:
unique_uli_list.append(i)
#Creates a list of row numbers corresponding to the
#ULI's that have failed syntax or validity edits.
bad_rows = []
for index, row in lar_df.iterrows():
if row['uli'] in unique_uli_list:
failed_uli = row['uli']
bad_rows.append(lar_df[lar_df['uli']==failed_uli].index.values.astype(int)[0])
#Drops all rows that failed syntax or validity edits
#from the original LAR dataframe.
lar_df = lar_df.drop(bad_rows)
#Creates new lar rows to the original length of the file
#using the utils new lar rows function.
ts_df, lar_df = utils.new_lar_rows(final_row_count=original_length,
lar_df=lar_df, ts_df=ts_df)
#Writes the file to the new path for quality test files
#that pass syntax and validity edits.
utils.write_file(path=self.filepaths['quality_pass_s_v_filepath'].format(bank_name=self.clean_config['name']['value']),
ts_input=ts_df, lar_input=lar_df, name=quality_filename)
print("Adjusting {file} to pass syntax and validity edits.".format(file=quality_filename))
print("File saved in {path}".format(path=self.filepaths['quality_pass_s_v_filepath'].format(bank_name=self.clean_config['name']['value'])))
#The condition where there are no clean rows present in the file.
except ZeroDivisionError as e:
#Prints a message to indicate that the file has not been validated.
print(e)
print("Sorry no clean file available for {file}.".format(file=quality_filename))
def edit_report(self):
"""
This function takes in a filepath and name, producing a report on
whether any rows of the data failed syntax, validity or quality edits.
The report contains among its fields the edit name, the status of the
edit, the number of rows failed by the edit, if any, and the ULI's or
NULIs (loan ID) of the rows that fail the edit.
The resulting report is saved as a csv file using configurations
from the test_filepaths.yaml file.
"""
#Instantiates the rules engine class as a checker object with a
#LAR schema, a TS schema, and geographic geographic data.
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df, geographic_data=self.geographic_data)
#Seperates data from the filepath and filename into a TS dataframe
#and a LAR dataframe.
ts_df, lar_df = utils.read_data_file(path=self.edit_report_config['data_filepath'],
data_file=self.edit_report_config['data_filename'])
#Loads the TS and LAR dataframes into the checker object.
checker.load_data_frames(ts_df, lar_df)
#Applies each function in the rules engine that checks for edits
#and creates a results list of edits failed or passed.
for func in dir(checker):
if func[:1] in ("s", "v", "q") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a dataframe of results from the checker.
report_df = pd.DataFrame(checker.results)
#Writes the report to the filepath and name designated in
#the test_fielpaths yaml
edit_report_path = self.edit_report_config['edit_report_output_filepath']
if not os.path.exists(edit_report_path):
os.makedirs(edit_report_path)
report_df.to_csv(edit_report_path +self.edit_report_config['edit_report_output_filename'])
#Logs the result.
logging.info("Edit Report has been created in {filepath}".format(
filepath=edit_report_path))
def create_custom_row(self, dictionary, clean_filepath, clean_filename):
"""
Creates a custom clean LAR row by passing in a dictionary of columns
and new values to modify all the rows of an
existing clean file, filters the modified file for clean rows,
and then pulls the first row from the file.
Pulls rows from the clean file last generated. Suggestion that
the file pulled should be 1000 original rows or greater, to ensure
that modified clean rows can be found.
"""
#Creates a TS and LAR dataframe from the clean filepath and name
#specified.
ts_df, lar_df = utils.read_data_file(
path=clean_filepath,
data_file=clean_filename)
#Changes each column (key in the dictionary) to the new value in
# the dictionary.
for key, value in dictionary.items():
lar_df[key] = value
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df,
geographic_data=self.geographic_data)
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
#Loads the TS and LAR dataframes into the checker object.
checker.load_data_frames(ts_df, lar_df)
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a results dataframe and keeps the results that
#have failed.
report_df = pd.DataFrame(checker.results)
report_df = report_df[(report_df['status']=='failed')].copy()
# The function ignores TS edits and drops results related
# to edit fails from the TS.
report_df = report_df[report_df['row_ids'] != 'TS']
if len(report_df) == 0:
#If there are no syntax or validity edits
#the data is written to a new directory for quality
#test files that pass syntax and validity edits.
#Takes the first row of data.
lar_row = lar_df[0:1]
#The case if there are rows that failed syntax or validity edits.
else:
#Creates a list of ULI's corresponding to rows where
#syntax or validity edits have failed.
#The resulting list is a list of lists, a list of ulis failed for each
#edit failed.
uli_list = list(report_df.row_ids)
#Converts the list of lists to a single list.
single_uli_list = []
for i in uli_list:
single_uli_list = single_uli_list + i
#Creates a list that removes ULI's that are repeated.
unique_uli_list = set(single_uli_list)
#Drops rows in the data containing syntax or validity edits.
lar_df = lar_df[lar_df.uli.isin(unique_uli_list)].copy()
#Only one row is needed for output.
#The following, takes the first row of data from the clean dataframe
lar_row = lar_df[0:1]
return(lar_row)
| """Uses the lar_gen object and a TS row to create a LAR row that
passes syntax and validity edits according to the FIG."""
#Stores the stop condition and the initial number of iterations.
#stop = False
iters = 1
#Makes a new row using the lar generator.
row = self.lar_gen.make_row(lei=self.clean_config["lei"]["value"])
#Begins a loop that creates the LAR row. The loop generates the row
#with the lar_generator and then validates the row
#against the rules engine for syntax or validity edits.
#If syntax or validity edits are present, the row modified using contraints until it conforms to FIG spec
stop = False
while stop == False:
#Copies row to enable diff logging.
#this helps troubleshoot where lar generation gets stuck
row_base = row.copy()
#Creates an edit report based on the validation.
res = pd.DataFrame(self.validation(row, ts_row))
#debugging print section
print("*"*25)
print("iterations:", iters)
#print(logging.info(res[res.status=="failed"]))
print(len(res[res.status=="failed"]), "row fails")
#print((res[res.status=="failed"]))
#print(res)
#If there are no syntax or validity edits present, the stop condition is invoked and the row is returned
#and added to the LAR dataset
if len(res[res.status=="failed"])<=0:
stop = True
else:
#Logs the results of edits that have failed.
logging.info(res[res.status=="failed"])
message = "\nstarting constraints iteration {iter}".format(iter=iters)
logging.info(message)
for constraint in self.constraints:
row = row = getattr(self.lar_const, constraint)(row)#self.apply_constraint(row=row, func=constraint)
logging.info("new row")
logging.info(row)
diff = set(row_base.items()) - set(row.items())
logging.info("row diff")
logging.info(diff)
#print(diff)
#If there are syntax or validity edits present, the constraints
#are applied until the LAR data pass the FIG edit checks
#else:
#row = self.constraints_loop(constraints=self.get_const_list(), row=row, row_base=row_base)
iters += 1
return row | identifier_body |
file_generator.py | #import glob
import json
import logging
import os
import pandas as pd
import numpy as np
import yaml
import lar_constraints
import lar_generator
from rules_engine import rules_engine
from test_file_generator import test_data
import utils
class FileGenerator(object):
def __init__(self, file_paths_config='configurations/test_filepaths.yaml', geo_config_file='configurations/geographic_data.yaml',
clean_file_config='configurations/clean_file_config.yaml', edit_report_config='configurations/edit_report_config.yaml'):
print("initializing file generator")
#Loads the filepath configuration.
print("opening config files")
with open(file_paths_config) as f:
#Uses safe_load instead of load.
self.filepaths = yaml.safe_load(f)
#Loads the geographic file configuration.
with open(geo_config_file) as f:
# Uses safe_load instead of load.
self.geographic = yaml.safe_load(f)
with open(self.geographic["geographic_data_file"]) as f:
self.geo_data = yaml.safe_load(f)
#Loads the clean file configuration.
with open(clean_file_config) as f:
# Uses safe_load instead of load.
self.clean_config = yaml.safe_load(f)
self.geo_config_file = geo_config_file #set geo config file as class variable to instantiate other classes
#Loads the edit report configuration.
with open(edit_report_config) as f:
# Uses safe_load instead of load.
self.edit_report_config = yaml.safe_load(f)
print("config files loaded")
#Sets the logging parameters.
#Uses a log file name and file writing mode from the
#test_filepaths yaml.
print("configuring logging in file Generator")
logging.basicConfig(filename=self.filepaths['log_filename'],
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filemode=self.filepaths['log_mode'],
level=logging.INFO)
print("logging configured")
#Loads geographic geographic data from filepaths named in the test_filepaths
#yaml file.
print("loading geo data to file generator")
self.geographic_data = pd.read_csv(self.geographic['geographic_data_file'], delimiter='|', header=None,
names=self.geographic['file_columns'], dtype=object)
#create 5 digit County Codes from 2 digit state and 3 digit county
self.geographic_data['county_fips'] = self.geographic_data.apply(lambda x: str(x.state_code) + str(x.county), axis=1)
#create 11 digit Census Tract codes from 5 digit county and 6 digit tract
self.geographic_data["tract_fips"] = self.geographic_data.apply(lambda x: str(x.county_fips) + str(x.tracts), axis=1)
self.small_counties = list(self.geographic_data.county_fips[self.geographic_data.small_county=="S"])
print("geo data loaded to file generator")
#Loads schemas for LAR and TS.
#Schemas contain valid enumerations, including NA values, for each field in the dataset.
print("loading JSON schema files")
self.lar_schema_df = pd.DataFrame(json.load(open(self.filepaths['lar_schema_json'], "r")))
self.ts_schema_df = pd.DataFrame(json.load(open(self.filepaths['ts_schema_json'], "r")))
#Instantiates the other classes.
#lar_gen is responsible for generating data according to the values in the schema.
print("instantiating class objects")
print("lar gen loading")
self.lar_gen = lar_generator.lar_gen()
print("lar gen done")
#lar_constrains is responsible for modifying generated data so that
#the resulting file passes syntax and validity edits.
print("lar constraints loading")
self.lar_const = lar_constraints.lar_constraints()
print("lar constraints loaded")
self.constraints = []
for func in dir(self.lar_const):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
self.constraints.append(func)
#lar_validator checks a dataframe and returns a JSON with
#edit pass/fail results.
print("rules engine loading")
self.lar_validator = rules_engine(geo_config_file=self.geo_config_file)
#tracts=tracts, counties=counties, small_counties=small_counties)
print("rules engine loaded")
print("file generator initialization complete")
def validation(self, row, ts_row):
"""
Applies the syntax and validity rules engine logic
to the LAR row to create an edit report.
"""
#Creates dataframes of LAR and TS data.
lar_data = pd.DataFrame(row, index=[1])
ts_data = pd.DataFrame(ts_row, index=[0])
#Instantiates a rules checker to check the row against edits in the rules engine.
#Loads LAR and TS data to the rules engine to validate LAR data and return an edit report
self.lar_validator.load_lar_data(lar_data)
self.lar_validator.load_ts_data(ts_data)
#Runs the edits against the LAR row and produces edit check results.
for func in dir(self.lar_validator):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(self.lar_validator, func)(row)
#Returns edit check results.
return self.lar_validator.results
def make_clean_lar_row(self, ts_row):
"""Uses the lar_gen object and a TS row to create a LAR row that
passes syntax and validity edits according to the FIG."""
#Stores the stop condition and the initial number of iterations.
#stop = False
iters = 1
#Makes a new row using the lar generator.
row = self.lar_gen.make_row(lei=self.clean_config["lei"]["value"])
#Begins a loop that creates the LAR row. The loop generates the row
#with the lar_generator and then validates the row
#against the rules engine for syntax or validity edits.
#If syntax or validity edits are present, the row modified using contraints until it conforms to FIG spec
stop = False
while stop == False:
#Copies row to enable diff logging.
#this helps troubleshoot where lar generation gets stuck
row_base = row.copy()
#Creates an edit report based on the validation.
res = pd.DataFrame(self.validation(row, ts_row))
#debugging print section
print("*"*25)
print("iterations:", iters)
#print(logging.info(res[res.status=="failed"]))
print(len(res[res.status=="failed"]), "row fails")
#print((res[res.status=="failed"]))
#print(res)
#If there are no syntax or validity edits present, the stop condition is invoked and the row is returned
#and added to the LAR dataset
if len(res[res.status=="failed"])<=0:
stop = True
else:
#Logs the results of edits that have failed.
logging.info(res[res.status=="failed"])
message = "\nstarting constraints iteration {iter}".format(iter=iters)
logging.info(message)
for constraint in self.constraints:
row = row = getattr(self.lar_const, constraint)(row)#self.apply_constraint(row=row, func=constraint)
logging.info("new row")
logging.info(row)
diff = set(row_base.items()) - set(row.items())
logging.info("row diff")
logging.info(diff)
#print(diff)
#If there are syntax or validity edits present, the constraints
#are applied until the LAR data pass the FIG edit checks
#else:
#row = self.constraints_loop(constraints=self.get_const_list(), row=row, row_base=row_base)
iters += 1
return row
def create_files(self, kind):
"""Creates a clean file or a set of edit files specified by the
function call"""
#Creates TS row data.
self.ts_row = self.lar_gen.make_ts_row()
#Creates a TS row dataframe.
ts_df = pd.DataFrame(self.ts_row, index=[1])
#The following produces a clean file.
if kind == 'clean_file':
#Creates a first row of LAR to begin the dataframe.
#All other rows are concatenated to the dataframe until
#the length of the dataframe reaches the file length specified in the
#test filepaths yaml file.
for i in range(0, self.clean_config["file_length"]["value"] ):
print('Creating row {i}'.format(i=i))
if i==0:
|
else:
new_row = self.make_clean_lar_row(ts_row=self.ts_row)
new_row = pd.DataFrame(new_row, index=[1])
lar_frame = pd.concat([lar_frame, new_row], axis=0)
#Writes the file to a clean filepath specified in the test_filepaths
#configuration.
out_file_path = self.filepaths['clean_filepath'].format(bank_name=self.clean_config["name"]["value"])
out_file_bank_name = self.filepaths['clean_filename'].format(row_count=self.clean_config["file_length"]["value"] , bank_name=self.clean_config["name"]["value"])
utils.write_file(ts_input=ts_df, lar_input=lar_frame, path=out_file_path, name=out_file_bank_name)
#For error files.
if kind == 'error_files':
#Modifies clean data and outputs
#resulting files that fail specific edits.
#Instantiates the edit file maker.
file_maker = test_data(ts_schema=self.ts_schema_df,
lar_schema=self.lar_schema_df,
geographic_data=self.geographic_data)
#Pulls in the clean data filepath and name from the
#test filepaths yaml file.
ts_data, lar_data = utils.read_data_file(path=self.filepaths['clean_filepath'].format(bank_name=self.clean_config["name"]["value"]),
data_file=self.filepaths["clean_filename"].format(bank_name=self.clean_config["name"]["value"], row_count=self.clean_config["file_length"]["value"]))
#Passes clean file data to the file maker object.
file_maker.load_data_frames(ts_data, lar_data)
#Generates a file for each edit function in file maker.
edits = []
#Loops over all data modification functions.
for func in dir(file_maker):
#Checks if function is a numbered syntax or validity edit.
if func[:1] in ("s", "v", "q") and func[1:4].isdigit()==True:
print("applying:", func)
#Applies data modification functions and produces files.
getattr(file_maker, func)()
def validate_quality_edit_file(self, quality_filename):
"""
The test file generator logic for creating quality edit test files
may cause rows to fail not only quality edits, but also
syntax or validity edits in the creation process.
This function takes a quality edit test file from the quality filepaths
directory in the test filepaths yaml, drops rows that have
syntax or validity edits, and duplicates the remaining clean rows to the
length of the original file.
The file is then saved in a new directory for quality edit test files
that also pass syntax and validity edits.
NOTE: This function works to allow LAR rows to pass syntax and validity edits and
does not validate the TS sheet.
"""
try:
#Instantiates an edit checker object with rules_engine.
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df, geographic_data=self.geographic_data)
#Reads the files and separates data into TS and LAR frames.
ts_df, lar_df = utils.read_data_file(
path=self.filepaths['quality_filepath'].format(bank_name=self.clean_config['name']['value']),
data_file=quality_filename)
#Stores the original length of the file.
original_length = len(lar_df.index)
#Loads data into the checker object.
checker.load_data_frames(ts_df, lar_df)
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a results dataframe and keeps the results that
#have failed.
report_df = pd.DataFrame(checker.results)
report_df = report_df[(report_df['status']=='failed')]
# The function ignores TS edits and drops results related
# to edit fails from the TS.
report_df = report_df[report_df['row_ids'] != 'TS']
if len(report_df) == 0:
#If there are no syntax or validity edits
#the data is written to a new directory for quality
#test files that pass syntax and validity edits.
utils.write_file(path=self.filepaths['quality_pass_s_v_filepath'].format(
bank_name=self.clean_config['name']['value']),
ts_input=ts_df,
lar_input=lar_df,
name=quality_filename)
#The case if there are rows that failed syntax or validity edits.
else:
#Creates an empty list for storing row numbers
#where edits have failed.
uli_list = []
#Iterates through each row and appends the list of ULI's
#of rows where syntax or validity edits have failed.
for index, row in report_df.iterrows():
uli_list.append(row.row_ids)
#Drops not-a-numbers from the ULI list.
if np.nan in uli_list:
uli_list.remove(np.nan)
#Creates a new list to store the ULI's without nested brackets.
new_list = []
for i in range(len(uli_list)):
for n in range(len(uli_list[i])):
new_list.append(uli_list[i][n])
#Creates a list that removes ULI's that are repeated.
unique_uli_list = []
for i in new_list:
if i not in unique_uli_list:
unique_uli_list.append(i)
#Creates a list of row numbers corresponding to the
#ULI's that have failed syntax or validity edits.
bad_rows = []
for index, row in lar_df.iterrows():
if row['uli'] in unique_uli_list:
failed_uli = row['uli']
bad_rows.append(lar_df[lar_df['uli']==failed_uli].index.values.astype(int)[0])
#Drops all rows that failed syntax or validity edits
#from the original LAR dataframe.
lar_df = lar_df.drop(bad_rows)
#Creates new lar rows to the original length of the file
#using the utils new lar rows function.
ts_df, lar_df = utils.new_lar_rows(final_row_count=original_length,
lar_df=lar_df, ts_df=ts_df)
#Writes the file to the new path for quality test files
#that pass syntax and validity edits.
utils.write_file(path=self.filepaths['quality_pass_s_v_filepath'].format(bank_name=self.clean_config['name']['value']),
ts_input=ts_df, lar_input=lar_df, name=quality_filename)
print("Adjusting {file} to pass syntax and validity edits.".format(file=quality_filename))
print("File saved in {path}".format(path=self.filepaths['quality_pass_s_v_filepath'].format(bank_name=self.clean_config['name']['value'])))
#The condition where there are no clean rows present in the file.
except ZeroDivisionError as e:
#Prints a message to indicate that the file has not been validated.
print(e)
print("Sorry no clean file available for {file}.".format(file=quality_filename))
def edit_report(self):
"""
This function takes in a filepath and name, producing a report on
whether any rows of the data failed syntax, validity or quality edits.
The report contains among its fields the edit name, the status of the
edit, the number of rows failed by the edit, if any, and the ULI's or
NULIs (loan ID) of the rows that fail the edit.
The resulting report is saved as a csv file using configurations
from the test_filepaths.yaml file.
"""
#Instantiates the rules engine class as a checker object with a
#LAR schema, a TS schema, and geographic geographic data.
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df, geographic_data=self.geographic_data)
#Seperates data from the filepath and filename into a TS dataframe
#and a LAR dataframe.
ts_df, lar_df = utils.read_data_file(path=self.edit_report_config['data_filepath'],
data_file=self.edit_report_config['data_filename'])
#Loads the TS and LAR dataframes into the checker object.
checker.load_data_frames(ts_df, lar_df)
#Applies each function in the rules engine that checks for edits
#and creates a results list of edits failed or passed.
for func in dir(checker):
if func[:1] in ("s", "v", "q") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a dataframe of results from the checker.
report_df = pd.DataFrame(checker.results)
#Writes the report to the filepath and name designated in
#the test_fielpaths yaml
edit_report_path = self.edit_report_config['edit_report_output_filepath']
if not os.path.exists(edit_report_path):
os.makedirs(edit_report_path)
report_df.to_csv(edit_report_path +self.edit_report_config['edit_report_output_filename'])
#Logs the result.
logging.info("Edit Report has been created in {filepath}".format(
filepath=edit_report_path))
def create_custom_row(self, dictionary, clean_filepath, clean_filename):
"""
Creates a custom clean LAR row by passing in a dictionary of columns
and new values to modify all the rows of an
existing clean file, filters the modified file for clean rows,
and then pulls the first row from the file.
Pulls rows from the clean file last generated. Suggestion that
the file pulled should be 1000 original rows or greater, to ensure
that modified clean rows can be found.
"""
#Creates a TS and LAR dataframe from the clean filepath and name
#specified.
ts_df, lar_df = utils.read_data_file(
path=clean_filepath,
data_file=clean_filename)
#Changes each column (key in the dictionary) to the new value in
# the dictionary.
for key, value in dictionary.items():
lar_df[key] = value
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df,
geographic_data=self.geographic_data)
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
#Loads the TS and LAR dataframes into the checker object.
checker.load_data_frames(ts_df, lar_df)
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a results dataframe and keeps the results that
#have failed.
report_df = pd.DataFrame(checker.results)
report_df = report_df[(report_df['status']=='failed')].copy()
# The function ignores TS edits and drops results related
# to edit fails from the TS.
report_df = report_df[report_df['row_ids'] != 'TS']
if len(report_df) == 0:
#If there are no syntax or validity edits
#the data is written to a new directory for quality
#test files that pass syntax and validity edits.
#Takes the first row of data.
lar_row = lar_df[0:1]
#The case if there are rows that failed syntax or validity edits.
else:
#Creates a list of ULI's corresponding to rows where
#syntax or validity edits have failed.
#The resulting list is a list of lists, a list of ulis failed for each
#edit failed.
uli_list = list(report_df.row_ids)
#Converts the list of lists to a single list.
single_uli_list = []
for i in uli_list:
single_uli_list = single_uli_list + i
#Creates a list that removes ULI's that are repeated.
unique_uli_list = set(single_uli_list)
#Drops rows in the data containing syntax or validity edits.
lar_df = lar_df[lar_df.uli.isin(unique_uli_list)].copy()
#Only one row is needed for output.
#The following, takes the first row of data from the clean dataframe
lar_row = lar_df[0:1]
return(lar_row)
| first_row = self.make_clean_lar_row(ts_row=self.ts_row)
lar_frame = pd.DataFrame(first_row, index=[1]) | conditional_block |
file_generator.py | #import glob
import json
import logging
import os
import pandas as pd
import numpy as np
import yaml
import lar_constraints
import lar_generator
from rules_engine import rules_engine
from test_file_generator import test_data
import utils
class FileGenerator(object):
def __init__(self, file_paths_config='configurations/test_filepaths.yaml', geo_config_file='configurations/geographic_data.yaml',
clean_file_config='configurations/clean_file_config.yaml', edit_report_config='configurations/edit_report_config.yaml'):
print("initializing file generator")
#Loads the filepath configuration.
print("opening config files")
with open(file_paths_config) as f:
#Uses safe_load instead of load.
self.filepaths = yaml.safe_load(f)
#Loads the geographic file configuration.
with open(geo_config_file) as f:
# Uses safe_load instead of load.
self.geographic = yaml.safe_load(f)
with open(self.geographic["geographic_data_file"]) as f:
self.geo_data = yaml.safe_load(f)
#Loads the clean file configuration.
with open(clean_file_config) as f:
# Uses safe_load instead of load.
self.clean_config = yaml.safe_load(f)
self.geo_config_file = geo_config_file #set geo config file as class variable to instantiate other classes
#Loads the edit report configuration.
with open(edit_report_config) as f:
# Uses safe_load instead of load.
self.edit_report_config = yaml.safe_load(f)
print("config files loaded")
#Sets the logging parameters.
#Uses a log file name and file writing mode from the
#test_filepaths yaml.
print("configuring logging in file Generator")
logging.basicConfig(filename=self.filepaths['log_filename'],
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filemode=self.filepaths['log_mode'],
level=logging.INFO)
print("logging configured")
#Loads geographic geographic data from filepaths named in the test_filepaths
#yaml file.
print("loading geo data to file generator")
self.geographic_data = pd.read_csv(self.geographic['geographic_data_file'], delimiter='|', header=None,
names=self.geographic['file_columns'], dtype=object)
#create 5 digit County Codes from 2 digit state and 3 digit county
self.geographic_data['county_fips'] = self.geographic_data.apply(lambda x: str(x.state_code) + str(x.county), axis=1)
#create 11 digit Census Tract codes from 5 digit county and 6 digit tract
self.geographic_data["tract_fips"] = self.geographic_data.apply(lambda x: str(x.county_fips) + str(x.tracts), axis=1)
self.small_counties = list(self.geographic_data.county_fips[self.geographic_data.small_county=="S"])
print("geo data loaded to file generator")
#Loads schemas for LAR and TS.
#Schemas contain valid enumerations, including NA values, for each field in the dataset.
print("loading JSON schema files")
self.lar_schema_df = pd.DataFrame(json.load(open(self.filepaths['lar_schema_json'], "r")))
self.ts_schema_df = pd.DataFrame(json.load(open(self.filepaths['ts_schema_json'], "r")))
#Instantiates the other classes.
#lar_gen is responsible for generating data according to the values in the schema.
print("instantiating class objects")
print("lar gen loading")
self.lar_gen = lar_generator.lar_gen()
print("lar gen done")
#lar_constrains is responsible for modifying generated data so that
#the resulting file passes syntax and validity edits.
print("lar constraints loading")
self.lar_const = lar_constraints.lar_constraints()
print("lar constraints loaded")
self.constraints = []
for func in dir(self.lar_const):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
self.constraints.append(func)
#lar_validator checks a dataframe and returns a JSON with
#edit pass/fail results.
print("rules engine loading")
self.lar_validator = rules_engine(geo_config_file=self.geo_config_file)
#tracts=tracts, counties=counties, small_counties=small_counties)
print("rules engine loaded")
print("file generator initialization complete")
def validation(self, row, ts_row):
"""
Applies the syntax and validity rules engine logic
to the LAR row to create an edit report.
"""
#Creates dataframes of LAR and TS data.
lar_data = pd.DataFrame(row, index=[1])
ts_data = pd.DataFrame(ts_row, index=[0])
#Instantiates a rules checker to check the row against edits in the rules engine.
#Loads LAR and TS data to the rules engine to validate LAR data and return an edit report
self.lar_validator.load_lar_data(lar_data)
self.lar_validator.load_ts_data(ts_data)
#Runs the edits against the LAR row and produces edit check results.
for func in dir(self.lar_validator):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(self.lar_validator, func)(row)
#Returns edit check results.
return self.lar_validator.results
def make_clean_lar_row(self, ts_row):
"""Uses the lar_gen object and a TS row to create a LAR row that
passes syntax and validity edits according to the FIG."""
#Stores the stop condition and the initial number of iterations.
#stop = False
iters = 1
#Makes a new row using the lar generator.
row = self.lar_gen.make_row(lei=self.clean_config["lei"]["value"])
#Begins a loop that creates the LAR row. The loop generates the row
#with the lar_generator and then validates the row
#against the rules engine for syntax or validity edits.
#If syntax or validity edits are present, the row modified using contraints until it conforms to FIG spec
stop = False
while stop == False:
#Copies row to enable diff logging.
#this helps troubleshoot where lar generation gets stuck
row_base = row.copy()
#Creates an edit report based on the validation.
res = pd.DataFrame(self.validation(row, ts_row))
#debugging print section
print("*"*25)
print("iterations:", iters)
#print(logging.info(res[res.status=="failed"]))
print(len(res[res.status=="failed"]), "row fails")
#print((res[res.status=="failed"]))
#print(res)
#If there are no syntax or validity edits present, the stop condition is invoked and the row is returned
#and added to the LAR dataset
if len(res[res.status=="failed"])<=0:
stop = True
else:
#Logs the results of edits that have failed.
logging.info(res[res.status=="failed"])
message = "\nstarting constraints iteration {iter}".format(iter=iters)
logging.info(message)
for constraint in self.constraints:
row = row = getattr(self.lar_const, constraint)(row)#self.apply_constraint(row=row, func=constraint)
logging.info("new row")
logging.info(row)
diff = set(row_base.items()) - set(row.items())
logging.info("row diff")
logging.info(diff)
#print(diff)
#If there are syntax or validity edits present, the constraints
#are applied until the LAR data pass the FIG edit checks
#else:
#row = self.constraints_loop(constraints=self.get_const_list(), row=row, row_base=row_base)
iters += 1
return row
def | (self, kind):
"""Creates a clean file or a set of edit files specified by the
function call"""
#Creates TS row data.
self.ts_row = self.lar_gen.make_ts_row()
#Creates a TS row dataframe.
ts_df = pd.DataFrame(self.ts_row, index=[1])
#The following produces a clean file.
if kind == 'clean_file':
#Creates a first row of LAR to begin the dataframe.
#All other rows are concatenated to the dataframe until
#the length of the dataframe reaches the file length specified in the
#test filepaths yaml file.
for i in range(0, self.clean_config["file_length"]["value"] ):
print('Creating row {i}'.format(i=i))
if i==0:
first_row = self.make_clean_lar_row(ts_row=self.ts_row)
lar_frame = pd.DataFrame(first_row, index=[1])
else:
new_row = self.make_clean_lar_row(ts_row=self.ts_row)
new_row = pd.DataFrame(new_row, index=[1])
lar_frame = pd.concat([lar_frame, new_row], axis=0)
#Writes the file to a clean filepath specified in the test_filepaths
#configuration.
out_file_path = self.filepaths['clean_filepath'].format(bank_name=self.clean_config["name"]["value"])
out_file_bank_name = self.filepaths['clean_filename'].format(row_count=self.clean_config["file_length"]["value"] , bank_name=self.clean_config["name"]["value"])
utils.write_file(ts_input=ts_df, lar_input=lar_frame, path=out_file_path, name=out_file_bank_name)
#For error files.
if kind == 'error_files':
#Modifies clean data and outputs
#resulting files that fail specific edits.
#Instantiates the edit file maker.
file_maker = test_data(ts_schema=self.ts_schema_df,
lar_schema=self.lar_schema_df,
geographic_data=self.geographic_data)
#Pulls in the clean data filepath and name from the
#test filepaths yaml file.
ts_data, lar_data = utils.read_data_file(path=self.filepaths['clean_filepath'].format(bank_name=self.clean_config["name"]["value"]),
data_file=self.filepaths["clean_filename"].format(bank_name=self.clean_config["name"]["value"], row_count=self.clean_config["file_length"]["value"]))
#Passes clean file data to the file maker object.
file_maker.load_data_frames(ts_data, lar_data)
#Generates a file for each edit function in file maker.
edits = []
#Loops over all data modification functions.
for func in dir(file_maker):
#Checks if function is a numbered syntax or validity edit.
if func[:1] in ("s", "v", "q") and func[1:4].isdigit()==True:
print("applying:", func)
#Applies data modification functions and produces files.
getattr(file_maker, func)()
def validate_quality_edit_file(self, quality_filename):
"""
The test file generator logic for creating quality edit test files
may cause rows to fail not only quality edits, but also
syntax or validity edits in the creation process.
This function takes a quality edit test file from the quality filepaths
directory in the test filepaths yaml, drops rows that have
syntax or validity edits, and duplicates the remaining clean rows to the
length of the original file.
The file is then saved in a new directory for quality edit test files
that also pass syntax and validity edits.
NOTE: This function works to allow LAR rows to pass syntax and validity edits and
does not validate the TS sheet.
"""
try:
#Instantiates an edit checker object with rules_engine.
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df, geographic_data=self.geographic_data)
#Reads the files and separates data into TS and LAR frames.
ts_df, lar_df = utils.read_data_file(
path=self.filepaths['quality_filepath'].format(bank_name=self.clean_config['name']['value']),
data_file=quality_filename)
#Stores the original length of the file.
original_length = len(lar_df.index)
#Loads data into the checker object.
checker.load_data_frames(ts_df, lar_df)
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a results dataframe and keeps the results that
#have failed.
report_df = pd.DataFrame(checker.results)
report_df = report_df[(report_df['status']=='failed')]
# The function ignores TS edits and drops results related
# to edit fails from the TS.
report_df = report_df[report_df['row_ids'] != 'TS']
if len(report_df) == 0:
#If there are no syntax or validity edits
#the data is written to a new directory for quality
#test files that pass syntax and validity edits.
utils.write_file(path=self.filepaths['quality_pass_s_v_filepath'].format(
bank_name=self.clean_config['name']['value']),
ts_input=ts_df,
lar_input=lar_df,
name=quality_filename)
#The case if there are rows that failed syntax or validity edits.
else:
#Creates an empty list for storing row numbers
#where edits have failed.
uli_list = []
#Iterates through each row and appends the list of ULI's
#of rows where syntax or validity edits have failed.
for index, row in report_df.iterrows():
uli_list.append(row.row_ids)
#Drops not-a-numbers from the ULI list.
if np.nan in uli_list:
uli_list.remove(np.nan)
#Creates a new list to store the ULI's without nested brackets.
new_list = []
for i in range(len(uli_list)):
for n in range(len(uli_list[i])):
new_list.append(uli_list[i][n])
#Creates a list that removes ULI's that are repeated.
unique_uli_list = []
for i in new_list:
if i not in unique_uli_list:
unique_uli_list.append(i)
#Creates a list of row numbers corresponding to the
#ULI's that have failed syntax or validity edits.
bad_rows = []
for index, row in lar_df.iterrows():
if row['uli'] in unique_uli_list:
failed_uli = row['uli']
bad_rows.append(lar_df[lar_df['uli']==failed_uli].index.values.astype(int)[0])
#Drops all rows that failed syntax or validity edits
#from the original LAR dataframe.
lar_df = lar_df.drop(bad_rows)
#Creates new lar rows to the original length of the file
#using the utils new lar rows function.
ts_df, lar_df = utils.new_lar_rows(final_row_count=original_length,
lar_df=lar_df, ts_df=ts_df)
#Writes the file to the new path for quality test files
#that pass syntax and validity edits.
utils.write_file(path=self.filepaths['quality_pass_s_v_filepath'].format(bank_name=self.clean_config['name']['value']),
ts_input=ts_df, lar_input=lar_df, name=quality_filename)
print("Adjusting {file} to pass syntax and validity edits.".format(file=quality_filename))
print("File saved in {path}".format(path=self.filepaths['quality_pass_s_v_filepath'].format(bank_name=self.clean_config['name']['value'])))
#The condition where there are no clean rows present in the file.
except ZeroDivisionError as e:
#Prints a message to indicate that the file has not been validated.
print(e)
print("Sorry no clean file available for {file}.".format(file=quality_filename))
def edit_report(self):
"""
This function takes in a filepath and name, producing a report on
whether any rows of the data failed syntax, validity or quality edits.
The report contains among its fields the edit name, the status of the
edit, the number of rows failed by the edit, if any, and the ULI's or
NULIs (loan ID) of the rows that fail the edit.
The resulting report is saved as a csv file using configurations
from the test_filepaths.yaml file.
"""
#Instantiates the rules engine class as a checker object with a
#LAR schema, a TS schema, and geographic geographic data.
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df, geographic_data=self.geographic_data)
#Seperates data from the filepath and filename into a TS dataframe
#and a LAR dataframe.
ts_df, lar_df = utils.read_data_file(path=self.edit_report_config['data_filepath'],
data_file=self.edit_report_config['data_filename'])
#Loads the TS and LAR dataframes into the checker object.
checker.load_data_frames(ts_df, lar_df)
#Applies each function in the rules engine that checks for edits
#and creates a results list of edits failed or passed.
for func in dir(checker):
if func[:1] in ("s", "v", "q") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a dataframe of results from the checker.
report_df = pd.DataFrame(checker.results)
#Writes the report to the filepath and name designated in
#the test_fielpaths yaml
edit_report_path = self.edit_report_config['edit_report_output_filepath']
if not os.path.exists(edit_report_path):
os.makedirs(edit_report_path)
report_df.to_csv(edit_report_path +self.edit_report_config['edit_report_output_filename'])
#Logs the result.
logging.info("Edit Report has been created in {filepath}".format(
filepath=edit_report_path))
def create_custom_row(self, dictionary, clean_filepath, clean_filename):
"""
Creates a custom clean LAR row by passing in a dictionary of columns
and new values to modify all the rows of an
existing clean file, filters the modified file for clean rows,
and then pulls the first row from the file.
Pulls rows from the clean file last generated. Suggestion that
the file pulled should be 1000 original rows or greater, to ensure
that modified clean rows can be found.
"""
#Creates a TS and LAR dataframe from the clean filepath and name
#specified.
ts_df, lar_df = utils.read_data_file(
path=clean_filepath,
data_file=clean_filename)
#Changes each column (key in the dictionary) to the new value in
# the dictionary.
for key, value in dictionary.items():
lar_df[key] = value
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df,
geographic_data=self.geographic_data)
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
#Loads the TS and LAR dataframes into the checker object.
checker.load_data_frames(ts_df, lar_df)
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a results dataframe and keeps the results that
#have failed.
report_df = pd.DataFrame(checker.results)
report_df = report_df[(report_df['status']=='failed')].copy()
# The function ignores TS edits and drops results related
# to edit fails from the TS.
report_df = report_df[report_df['row_ids'] != 'TS']
if len(report_df) == 0:
#If there are no syntax or validity edits
#the data is written to a new directory for quality
#test files that pass syntax and validity edits.
#Takes the first row of data.
lar_row = lar_df[0:1]
#The case if there are rows that failed syntax or validity edits.
else:
#Creates a list of ULI's corresponding to rows where
#syntax or validity edits have failed.
#The resulting list is a list of lists, a list of ulis failed for each
#edit failed.
uli_list = list(report_df.row_ids)
#Converts the list of lists to a single list.
single_uli_list = []
for i in uli_list:
single_uli_list = single_uli_list + i
#Creates a list that removes ULI's that are repeated.
unique_uli_list = set(single_uli_list)
#Drops rows in the data containing syntax or validity edits.
lar_df = lar_df[lar_df.uli.isin(unique_uli_list)].copy()
#Only one row is needed for output.
#The following, takes the first row of data from the clean dataframe
lar_row = lar_df[0:1]
return(lar_row)
| create_files | identifier_name |
file_generator.py | #import glob
import json
import logging
import os
import pandas as pd
import numpy as np
import yaml
import lar_constraints
import lar_generator
from rules_engine import rules_engine
from test_file_generator import test_data
import utils
class FileGenerator(object):
def __init__(self, file_paths_config='configurations/test_filepaths.yaml', geo_config_file='configurations/geographic_data.yaml',
clean_file_config='configurations/clean_file_config.yaml', edit_report_config='configurations/edit_report_config.yaml'):
print("initializing file generator")
#Loads the filepath configuration.
print("opening config files")
with open(file_paths_config) as f:
#Uses safe_load instead of load.
self.filepaths = yaml.safe_load(f)
#Loads the geographic file configuration.
with open(geo_config_file) as f:
# Uses safe_load instead of load.
self.geographic = yaml.safe_load(f)
with open(self.geographic["geographic_data_file"]) as f:
self.geo_data = yaml.safe_load(f)
#Loads the clean file configuration.
with open(clean_file_config) as f:
# Uses safe_load instead of load.
self.clean_config = yaml.safe_load(f)
self.geo_config_file = geo_config_file #set geo config file as class variable to instantiate other classes
#Loads the edit report configuration.
with open(edit_report_config) as f:
# Uses safe_load instead of load.
self.edit_report_config = yaml.safe_load(f)
print("config files loaded")
#Sets the logging parameters.
#Uses a log file name and file writing mode from the
#test_filepaths yaml.
print("configuring logging in file Generator")
logging.basicConfig(filename=self.filepaths['log_filename'],
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filemode=self.filepaths['log_mode'],
level=logging.INFO)
print("logging configured")
#Loads geographic geographic data from filepaths named in the test_filepaths
#yaml file.
print("loading geo data to file generator")
self.geographic_data = pd.read_csv(self.geographic['geographic_data_file'], delimiter='|', header=None,
names=self.geographic['file_columns'], dtype=object)
#create 5 digit County Codes from 2 digit state and 3 digit county
self.geographic_data['county_fips'] = self.geographic_data.apply(lambda x: str(x.state_code) + str(x.county), axis=1)
#create 11 digit Census Tract codes from 5 digit county and 6 digit tract
self.geographic_data["tract_fips"] = self.geographic_data.apply(lambda x: str(x.county_fips) + str(x.tracts), axis=1)
self.small_counties = list(self.geographic_data.county_fips[self.geographic_data.small_county=="S"])
print("geo data loaded to file generator")
#Loads schemas for LAR and TS.
#Schemas contain valid enumerations, including NA values, for each field in the dataset.
print("loading JSON schema files")
self.lar_schema_df = pd.DataFrame(json.load(open(self.filepaths['lar_schema_json'], "r")))
self.ts_schema_df = pd.DataFrame(json.load(open(self.filepaths['ts_schema_json'], "r")))
#Instantiates the other classes.
#lar_gen is responsible for generating data according to the values in the schema.
print("instantiating class objects")
print("lar gen loading")
self.lar_gen = lar_generator.lar_gen()
print("lar gen done")
#lar_constrains is responsible for modifying generated data so that
#the resulting file passes syntax and validity edits.
print("lar constraints loading")
self.lar_const = lar_constraints.lar_constraints()
print("lar constraints loaded")
self.constraints = []
for func in dir(self.lar_const):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
self.constraints.append(func)
#lar_validator checks a dataframe and returns a JSON with
#edit pass/fail results.
print("rules engine loading")
self.lar_validator = rules_engine(geo_config_file=self.geo_config_file)
#tracts=tracts, counties=counties, small_counties=small_counties)
print("rules engine loaded")
print("file generator initialization complete")
def validation(self, row, ts_row):
"""
Applies the syntax and validity rules engine logic
to the LAR row to create an edit report.
"""
#Creates dataframes of LAR and TS data.
lar_data = pd.DataFrame(row, index=[1])
ts_data = pd.DataFrame(ts_row, index=[0])
#Instantiates a rules checker to check the row against edits in the rules engine.
#Loads LAR and TS data to the rules engine to validate LAR data and return an edit report
self.lar_validator.load_lar_data(lar_data)
self.lar_validator.load_ts_data(ts_data)
#Runs the edits against the LAR row and produces edit check results.
for func in dir(self.lar_validator):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(self.lar_validator, func)(row)
#Returns edit check results.
return self.lar_validator.results
def make_clean_lar_row(self, ts_row):
"""Uses the lar_gen object and a TS row to create a LAR row that
passes syntax and validity edits according to the FIG."""
#Stores the stop condition and the initial number of iterations.
#stop = False
iters = 1
#Makes a new row using the lar generator.
row = self.lar_gen.make_row(lei=self.clean_config["lei"]["value"])
#Begins a loop that creates the LAR row. The loop generates the row
#with the lar_generator and then validates the row
#against the rules engine for syntax or validity edits.
#If syntax or validity edits are present, the row modified using contraints until it conforms to FIG spec
stop = False
while stop == False:
#Copies row to enable diff logging.
#this helps troubleshoot where lar generation gets stuck
row_base = row.copy()
#Creates an edit report based on the validation.
res = pd.DataFrame(self.validation(row, ts_row))
#debugging print section
print("*"*25)
print("iterations:", iters)
#print(logging.info(res[res.status=="failed"]))
print(len(res[res.status=="failed"]), "row fails")
#print((res[res.status=="failed"]))
#print(res)
#If there are no syntax or validity edits present, the stop condition is invoked and the row is returned
#and added to the LAR dataset
if len(res[res.status=="failed"])<=0:
stop = True
else:
#Logs the results of edits that have failed.
logging.info(res[res.status=="failed"])
message = "\nstarting constraints iteration {iter}".format(iter=iters)
logging.info(message)
for constraint in self.constraints:
row = row = getattr(self.lar_const, constraint)(row)#self.apply_constraint(row=row, func=constraint)
logging.info("new row")
logging.info(row)
diff = set(row_base.items()) - set(row.items())
logging.info("row diff")
logging.info(diff)
#print(diff)
#If there are syntax or validity edits present, the constraints
#are applied until the LAR data pass the FIG edit checks
#else:
#row = self.constraints_loop(constraints=self.get_const_list(), row=row, row_base=row_base)
iters += 1
return row
def create_files(self, kind):
"""Creates a clean file or a set of edit files specified by the
function call"""
#Creates TS row data.
self.ts_row = self.lar_gen.make_ts_row()
#Creates a TS row dataframe.
ts_df = pd.DataFrame(self.ts_row, index=[1])
#The following produces a clean file.
if kind == 'clean_file':
#Creates a first row of LAR to begin the dataframe.
#All other rows are concatenated to the dataframe until
#the length of the dataframe reaches the file length specified in the
#test filepaths yaml file.
for i in range(0, self.clean_config["file_length"]["value"] ):
print('Creating row {i}'.format(i=i))
if i==0:
first_row = self.make_clean_lar_row(ts_row=self.ts_row)
lar_frame = pd.DataFrame(first_row, index=[1])
else:
new_row = self.make_clean_lar_row(ts_row=self.ts_row)
new_row = pd.DataFrame(new_row, index=[1])
lar_frame = pd.concat([lar_frame, new_row], axis=0)
#Writes the file to a clean filepath specified in the test_filepaths
#configuration.
out_file_path = self.filepaths['clean_filepath'].format(bank_name=self.clean_config["name"]["value"])
out_file_bank_name = self.filepaths['clean_filename'].format(row_count=self.clean_config["file_length"]["value"] , bank_name=self.clean_config["name"]["value"])
utils.write_file(ts_input=ts_df, lar_input=lar_frame, path=out_file_path, name=out_file_bank_name)
#For error files.
if kind == 'error_files':
#Modifies clean data and outputs
#resulting files that fail specific edits.
#Instantiates the edit file maker.
file_maker = test_data(ts_schema=self.ts_schema_df,
lar_schema=self.lar_schema_df,
geographic_data=self.geographic_data)
#Pulls in the clean data filepath and name from the
#test filepaths yaml file.
ts_data, lar_data = utils.read_data_file(path=self.filepaths['clean_filepath'].format(bank_name=self.clean_config["name"]["value"]),
data_file=self.filepaths["clean_filename"].format(bank_name=self.clean_config["name"]["value"], row_count=self.clean_config["file_length"]["value"]))
#Passes clean file data to the file maker object.
file_maker.load_data_frames(ts_data, lar_data)
#Generates a file for each edit function in file maker.
edits = []
#Loops over all data modification functions.
for func in dir(file_maker):
#Checks if function is a numbered syntax or validity edit.
if func[:1] in ("s", "v", "q") and func[1:4].isdigit()==True:
print("applying:", func)
#Applies data modification functions and produces files.
getattr(file_maker, func)()
def validate_quality_edit_file(self, quality_filename):
"""
The test file generator logic for creating quality edit test files
may cause rows to fail not only quality edits, but also
syntax or validity edits in the creation process.
This function takes a quality edit test file from the quality filepaths
directory in the test filepaths yaml, drops rows that have
syntax or validity edits, and duplicates the remaining clean rows to the
length of the original file.
The file is then saved in a new directory for quality edit test files
that also pass syntax and validity edits.
NOTE: This function works to allow LAR rows to pass syntax and validity edits and
does not validate the TS sheet.
"""
try:
#Instantiates an edit checker object with rules_engine.
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df, geographic_data=self.geographic_data)
#Reads the files and separates data into TS and LAR frames.
ts_df, lar_df = utils.read_data_file(
path=self.filepaths['quality_filepath'].format(bank_name=self.clean_config['name']['value']),
data_file=quality_filename)
#Stores the original length of the file.
original_length = len(lar_df.index)
#Loads data into the checker object.
checker.load_data_frames(ts_df, lar_df)
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a results dataframe and keeps the results that
#have failed.
report_df = pd.DataFrame(checker.results)
report_df = report_df[(report_df['status']=='failed')]
# The function ignores TS edits and drops results related
# to edit fails from the TS.
report_df = report_df[report_df['row_ids'] != 'TS']
if len(report_df) == 0:
#If there are no syntax or validity edits
#the data is written to a new directory for quality
#test files that pass syntax and validity edits.
utils.write_file(path=self.filepaths['quality_pass_s_v_filepath'].format(
bank_name=self.clean_config['name']['value']),
ts_input=ts_df,
lar_input=lar_df,
name=quality_filename)
#The case if there are rows that failed syntax or validity edits.
else:
#Creates an empty list for storing row numbers
#where edits have failed.
uli_list = []
#Iterates through each row and appends the list of ULI's
#of rows where syntax or validity edits have failed.
for index, row in report_df.iterrows():
uli_list.append(row.row_ids)
#Drops not-a-numbers from the ULI list.
if np.nan in uli_list:
uli_list.remove(np.nan) | for i in range(len(uli_list)):
for n in range(len(uli_list[i])):
new_list.append(uli_list[i][n])
#Creates a list that removes ULI's that are repeated.
unique_uli_list = []
for i in new_list:
if i not in unique_uli_list:
unique_uli_list.append(i)
#Creates a list of row numbers corresponding to the
#ULI's that have failed syntax or validity edits.
bad_rows = []
for index, row in lar_df.iterrows():
if row['uli'] in unique_uli_list:
failed_uli = row['uli']
bad_rows.append(lar_df[lar_df['uli']==failed_uli].index.values.astype(int)[0])
#Drops all rows that failed syntax or validity edits
#from the original LAR dataframe.
lar_df = lar_df.drop(bad_rows)
#Creates new lar rows to the original length of the file
#using the utils new lar rows function.
ts_df, lar_df = utils.new_lar_rows(final_row_count=original_length,
lar_df=lar_df, ts_df=ts_df)
#Writes the file to the new path for quality test files
#that pass syntax and validity edits.
utils.write_file(path=self.filepaths['quality_pass_s_v_filepath'].format(bank_name=self.clean_config['name']['value']),
ts_input=ts_df, lar_input=lar_df, name=quality_filename)
print("Adjusting {file} to pass syntax and validity edits.".format(file=quality_filename))
print("File saved in {path}".format(path=self.filepaths['quality_pass_s_v_filepath'].format(bank_name=self.clean_config['name']['value'])))
#The condition where there are no clean rows present in the file.
except ZeroDivisionError as e:
#Prints a message to indicate that the file has not been validated.
print(e)
print("Sorry no clean file available for {file}.".format(file=quality_filename))
def edit_report(self):
"""
This function takes in a filepath and name, producing a report on
whether any rows of the data failed syntax, validity or quality edits.
The report contains among its fields the edit name, the status of the
edit, the number of rows failed by the edit, if any, and the ULI's or
NULIs (loan ID) of the rows that fail the edit.
The resulting report is saved as a csv file using configurations
from the test_filepaths.yaml file.
"""
#Instantiates the rules engine class as a checker object with a
#LAR schema, a TS schema, and geographic geographic data.
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df, geographic_data=self.geographic_data)
#Seperates data from the filepath and filename into a TS dataframe
#and a LAR dataframe.
ts_df, lar_df = utils.read_data_file(path=self.edit_report_config['data_filepath'],
data_file=self.edit_report_config['data_filename'])
#Loads the TS and LAR dataframes into the checker object.
checker.load_data_frames(ts_df, lar_df)
#Applies each function in the rules engine that checks for edits
#and creates a results list of edits failed or passed.
for func in dir(checker):
if func[:1] in ("s", "v", "q") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a dataframe of results from the checker.
report_df = pd.DataFrame(checker.results)
#Writes the report to the filepath and name designated in
#the test_fielpaths yaml
edit_report_path = self.edit_report_config['edit_report_output_filepath']
if not os.path.exists(edit_report_path):
os.makedirs(edit_report_path)
report_df.to_csv(edit_report_path +self.edit_report_config['edit_report_output_filename'])
#Logs the result.
logging.info("Edit Report has been created in {filepath}".format(
filepath=edit_report_path))
def create_custom_row(self, dictionary, clean_filepath, clean_filename):
"""
Creates a custom clean LAR row by passing in a dictionary of columns
and new values to modify all the rows of an
existing clean file, filters the modified file for clean rows,
and then pulls the first row from the file.
Pulls rows from the clean file last generated. Suggestion that
the file pulled should be 1000 original rows or greater, to ensure
that modified clean rows can be found.
"""
#Creates a TS and LAR dataframe from the clean filepath and name
#specified.
ts_df, lar_df = utils.read_data_file(
path=clean_filepath,
data_file=clean_filename)
#Changes each column (key in the dictionary) to the new value in
# the dictionary.
for key, value in dictionary.items():
lar_df[key] = value
checker = rules_engine(lar_schema=self.lar_schema_df,
ts_schema=self.ts_schema_df,
geographic_data=self.geographic_data)
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
#Loads the TS and LAR dataframes into the checker object.
checker.load_data_frames(ts_df, lar_df)
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Produces a report as to which syntax or validity
#edits have passed or failed based on logic in the rules_engine.
for func in dir(checker):
if func[:1] in ("s", "v") and func[1:4].isdigit()==True:
getattr(checker, func)()
#Creates a results dataframe and keeps the results that
#have failed.
report_df = pd.DataFrame(checker.results)
report_df = report_df[(report_df['status']=='failed')].copy()
# The function ignores TS edits and drops results related
# to edit fails from the TS.
report_df = report_df[report_df['row_ids'] != 'TS']
if len(report_df) == 0:
#If there are no syntax or validity edits
#the data is written to a new directory for quality
#test files that pass syntax and validity edits.
#Takes the first row of data.
lar_row = lar_df[0:1]
#The case if there are rows that failed syntax or validity edits.
else:
#Creates a list of ULI's corresponding to rows where
#syntax or validity edits have failed.
#The resulting list is a list of lists, a list of ulis failed for each
#edit failed.
uli_list = list(report_df.row_ids)
#Converts the list of lists to a single list.
single_uli_list = []
for i in uli_list:
single_uli_list = single_uli_list + i
#Creates a list that removes ULI's that are repeated.
unique_uli_list = set(single_uli_list)
#Drops rows in the data containing syntax or validity edits.
lar_df = lar_df[lar_df.uli.isin(unique_uli_list)].copy()
#Only one row is needed for output.
#The following, takes the first row of data from the clean dataframe
lar_row = lar_df[0:1]
return(lar_row) |
#Creates a new list to store the ULI's without nested brackets.
new_list = [] | random_line_split |
main.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import os
import jinja2
import json
import random
import string
import logging
import hashlib
import datetime
import time
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
# autoescape escapes html from user text automatically
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class Vacation(db.Model):
confirm = db.StringProperty(required=True, indexed=True)
depart = db.StringProperty(required=True, default="San Francisco, CA")
arrive = db.StringProperty(required=True, indexed=True)
date = db.DateProperty(required=True)
nights = db.IntegerProperty(required=True, default=7)
num_people = db.IntegerProperty(required=True, default=2)
price = db.FloatProperty(required=True)
name = db.StringProperty(required=True)
flights = db.StringListProperty()
class Flight(db.Model):
name = db.StringProperty(required=True, choices=set(["outgoing", "returning", "layover"]))
depart = db.StringProperty(required=True, default="San Francisco, CA")
arrive = db.StringProperty(required=True)
date = db.DateProperty(required=True)
time = db.TimeProperty(required=True)
flight_number = db.StringProperty(required=True, indexed=True)
confirm = db.StringProperty(required=True, indexed=True)
boarding_pass = db.TextProperty(required=False)
# parent = vacation
# confirm from vacation
# global variables
tours = {"london": datetime.date(11,1,1),
"tokyo": datetime.date(6,1,1),
"barcelona": datetime.date(5,1,1)}
destinations = {"rome": "Rome, Italy",
"rio": "Rio De Janiero, Brazil",
"paris": "Paris, France",
"london": "London, England",
"tokyo": "Tokyo, Japan",
"barcelona": "Barcelona, Spain"}
weather = {"rome": "70 ° F, Sunny",
"rio": "70 ° F, Sunny",
"paris": "70 ° F, Sunny",
"london": "50 ° F, Rainy",
"tokyo": "70 ° F, Sunny",
"barcelona": "70 ° F, Sunny"}
price_per_person = {"rome": 1200,
"rio": 1000,
"paris": 1500,
"london": 2500,
"tokyo": 3000,
"barcelona": 2000}
# utility methods
def log_record(text):
return logging.error(text.upper())
def sleep(n):
return time.sleep(abs(float(n)))
# vacation methods
def get_vacation(confirmation):
return Vacation.all().filter("confirm =", confirmation).get()
def generate_confirmation():
confirmation = ''.join(random.choice(string.hexdigits.upper()) for x in xrange(6))
while get_vacation(confirmation):
confirmation = ''.join(random.choice(string.hexdigits.upper()) for x in xrange(6))
return confirmation
def generate_new_vacation_and_flights(depart=None, arrive=None, date=None, nights=None, num_people=None, price=None, name=None):
new_vacation = Vacation(confirm=generate_confirmation(), depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
new_vacation.put()
sleep(0.5)
flights = generate_flights(new_vacation.confirm)
sleep(0.5)
for flight in flights:
flight.boarding_pass = generate_ticket_html(flight.flight_number)
flight.put()
sleep(0.5)
return new_vacation
# flight methods
def get_flight(flight_number):
return Flight.all().filter("flight_number =", flight_number).get()
def generate_flight_number():
flight_number = ''.join(random.choice(string.letters.upper()) for x in xrange(2)) + ''.join(random.choice(string.digits) for x in xrange(6))
while get_flight(flight_number):
flight_number = ''.join(random.choice(string.letters.upper()) for x in xrange(2)) + ''.join(random.choice(string.digits) for x in xrange(6))
return flight_number
def random_time(after_time=False):
if after_time:
minutes = random.choice(range(480,660))
dt = datetime.datetime.combine(datetime.date.today(), after_time) + datetime.timedelta(minutes=minutes)
return dt.time()
else:
return datetime.time(random.choice(range(0,12)), random.choice(range(60)), random.choice(range(60)))
def return_date(date, nights=7):
return date + datetime.timedelta(days=nights)
def generate_layover():
return random.choice(['Seattle, WA', 'Dallas, TX', 'Portland, OR', 'Los Angeles, CA', 'St. Louis, MO', 'Cedar Rapids, IA', 'Fargo, ND', 'New Orleans, LA', 'Cleveland, OH'])
def generate_flights(confirmation):
vacation = get_vacation(confirmation)
layover_dest = generate_layover()
departing = Flight(parent=vacation,
name="outgoing",
depart=vacation.depart,
arrive=layover_dest,
date=vacation.date,
time=random_time(),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
departing.put()
returning = Flight(parent=vacation,
name="returning",
depart=vacation.arrive,
arrive=vacation.depart,
date=return_date(vacation.date, vacation.nights),
time=random_time(),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
returning.put()
layover = Flight(parent=vacation,
name="layover",
depart=layover_dest,
arrive=vacation.arrive,
date=vacation.date,
time=random_time(departing.time),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
layover.put()
vacation.flights = [departing.flight_number, layover.flight_number, returning.flight_number]
vacation.put()
return [departing, layover, returning]
def generate_ticket_html(flight_number):
flight = get_flight(flight_number)
html = "<div class='ticket'><p><span>Flight Number: %s</span><span>Confirmation Number: %s</span></p><img src='images/jquery_QR.png' alt='QR_Code' /></div>" % (flight.flight_number, flight.confirm)
return html
def get_flights_from_confirmation(confirmation):
vacation = get_vacation(confirmation)
if vacation:
log_record(str(vacation))
flights = vacation.flights
if flights:
flights = map(get_flight, flights)
return flights
# class methods
def parse_date(unicode_date):
date_list = unicode_date.split("-")
date_list = map(int, date_list)
return datetime.date(date_list[0], date_list[1], date_list[2])
class HomeHandler(Handler):
def get(self):
self.render("home.html")
class FlightsHandler(Handler):
def get(self):
confirmation = self.request.get('confirmation')
flights = get_flights_from_confirmation(confirmation)
self.render("flights.html", confirmation=confirmation, flights=flights)
class DestinationHandler(Handler):
def get(self):
dest = self.request.get("dest")
if dest:
self.render("destination.html", destination=destinations[dest], dest=dest, price=price_per_person[dest])
else:
self.render('destination.html')
def post(self):
dest = self.request.get("arrive")
first_name = self.request.get("first-name")
last_name = self.request.get("last-name")
depart = self.request.get("depart")
arrive = destinations[dest]
num_people = self.request.get("num-people")
date = self.request.get("date")
nights = self.request.get("nights")
price = self.request.get("price")
for item in [dest, first_name, last_name, depart, arrive, num_people, str(parse_date(date)), nights, price]:
log_record(item)
if dest and first_name and last_name and depart and arrive and num_people and date and nights and price:
name = first_name + " " + last_name
date = parse_date(date)
nights = int(nights)
price = float(price)
num_people = int(num_people)
vacation = generate_new_vacation_and_flights(depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
self.redirect("/flights?confirmation="+vacation.confirm)
else:
self.render('tour.html', destination=destinations[dest], dest=dest, error="One of the boxes isn't filled in!")
class TourHandler(Handler):
def get(self):
dest = self.request.get("dest")
if dest:
self.render('tour.html', destination=destinations[dest], dest=dest, price=price_per_person[dest])
else:
self.render('tour.html')
def post(self):
dest = self.request.get("arrive")
log_record(dest)
first_name = self.request.get("first-name")
last_name = self.request.get("last-name")
depart = self.request.get("depart")
arrive = destinations[dest]
num_people = self.request.get("num_people")
price = self.request.get("price")
for item in [dest, first_name, last_name, depart, arrive, num_people, price]:
log_record(item)
if first_name and last_name and depart and arrive and num_people:
name = first_name + " " + last_name
date = tours[dest]
nights = 7
price = float(price)
num_people = int(num_people)
vacation = generate_new_vacation_and_flights(depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
self.redirect("/flights?confirmation="+vacation.confirm)
else:
self.render('tour.html', destination=destinations[dest], dest=dest, error="One of the boxes isn't filled in!")
class ConfirmationHandler(Handler):
def get(self):
self.render("check_confirm.html")
class CheckFlightsHandler(Handler):
|
app = webapp2.WSGIApplication([
('/', HomeHandler),
('/flights', FlightsHandler),
('/destination', DestinationHandler),
('/tour', TourHandler),
('/confirmation', ConfirmationHandler),
('/check-flights', CheckFlightsHandler)
], debug=True)
| def post(self):
confirmation = self.request.get('confirmation')
flights = get_flights_from_confirmation(confirmation)
if flights:
self.render("check_flights.html", confirmation=confirmation, flights=flights)
else:
self.render("check_flights.html", error=True, confirmation=confirmation) | identifier_body |
main.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import os | import json
import random
import string
import logging
import hashlib
import datetime
import time
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
# autoescape escapes html from user text automatically
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class Vacation(db.Model):
confirm = db.StringProperty(required=True, indexed=True)
depart = db.StringProperty(required=True, default="San Francisco, CA")
arrive = db.StringProperty(required=True, indexed=True)
date = db.DateProperty(required=True)
nights = db.IntegerProperty(required=True, default=7)
num_people = db.IntegerProperty(required=True, default=2)
price = db.FloatProperty(required=True)
name = db.StringProperty(required=True)
flights = db.StringListProperty()
class Flight(db.Model):
name = db.StringProperty(required=True, choices=set(["outgoing", "returning", "layover"]))
depart = db.StringProperty(required=True, default="San Francisco, CA")
arrive = db.StringProperty(required=True)
date = db.DateProperty(required=True)
time = db.TimeProperty(required=True)
flight_number = db.StringProperty(required=True, indexed=True)
confirm = db.StringProperty(required=True, indexed=True)
boarding_pass = db.TextProperty(required=False)
# parent = vacation
# confirm from vacation
# global variables
tours = {"london": datetime.date(11,1,1),
"tokyo": datetime.date(6,1,1),
"barcelona": datetime.date(5,1,1)}
destinations = {"rome": "Rome, Italy",
"rio": "Rio De Janiero, Brazil",
"paris": "Paris, France",
"london": "London, England",
"tokyo": "Tokyo, Japan",
"barcelona": "Barcelona, Spain"}
weather = {"rome": "70 ° F, Sunny",
"rio": "70 ° F, Sunny",
"paris": "70 ° F, Sunny",
"london": "50 ° F, Rainy",
"tokyo": "70 ° F, Sunny",
"barcelona": "70 ° F, Sunny"}
price_per_person = {"rome": 1200,
"rio": 1000,
"paris": 1500,
"london": 2500,
"tokyo": 3000,
"barcelona": 2000}
# utility methods
def log_record(text):
return logging.error(text.upper())
def sleep(n):
return time.sleep(abs(float(n)))
# vacation methods
def get_vacation(confirmation):
return Vacation.all().filter("confirm =", confirmation).get()
def generate_confirmation():
confirmation = ''.join(random.choice(string.hexdigits.upper()) for x in xrange(6))
while get_vacation(confirmation):
confirmation = ''.join(random.choice(string.hexdigits.upper()) for x in xrange(6))
return confirmation
def generate_new_vacation_and_flights(depart=None, arrive=None, date=None, nights=None, num_people=None, price=None, name=None):
new_vacation = Vacation(confirm=generate_confirmation(), depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
new_vacation.put()
sleep(0.5)
flights = generate_flights(new_vacation.confirm)
sleep(0.5)
for flight in flights:
flight.boarding_pass = generate_ticket_html(flight.flight_number)
flight.put()
sleep(0.5)
return new_vacation
# flight methods
def get_flight(flight_number):
return Flight.all().filter("flight_number =", flight_number).get()
def generate_flight_number():
flight_number = ''.join(random.choice(string.letters.upper()) for x in xrange(2)) + ''.join(random.choice(string.digits) for x in xrange(6))
while get_flight(flight_number):
flight_number = ''.join(random.choice(string.letters.upper()) for x in xrange(2)) + ''.join(random.choice(string.digits) for x in xrange(6))
return flight_number
def random_time(after_time=False):
if after_time:
minutes = random.choice(range(480,660))
dt = datetime.datetime.combine(datetime.date.today(), after_time) + datetime.timedelta(minutes=minutes)
return dt.time()
else:
return datetime.time(random.choice(range(0,12)), random.choice(range(60)), random.choice(range(60)))
def return_date(date, nights=7):
return date + datetime.timedelta(days=nights)
def generate_layover():
return random.choice(['Seattle, WA', 'Dallas, TX', 'Portland, OR', 'Los Angeles, CA', 'St. Louis, MO', 'Cedar Rapids, IA', 'Fargo, ND', 'New Orleans, LA', 'Cleveland, OH'])
def generate_flights(confirmation):
vacation = get_vacation(confirmation)
layover_dest = generate_layover()
departing = Flight(parent=vacation,
name="outgoing",
depart=vacation.depart,
arrive=layover_dest,
date=vacation.date,
time=random_time(),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
departing.put()
returning = Flight(parent=vacation,
name="returning",
depart=vacation.arrive,
arrive=vacation.depart,
date=return_date(vacation.date, vacation.nights),
time=random_time(),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
returning.put()
layover = Flight(parent=vacation,
name="layover",
depart=layover_dest,
arrive=vacation.arrive,
date=vacation.date,
time=random_time(departing.time),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
layover.put()
vacation.flights = [departing.flight_number, layover.flight_number, returning.flight_number]
vacation.put()
return [departing, layover, returning]
def generate_ticket_html(flight_number):
flight = get_flight(flight_number)
html = "<div class='ticket'><p><span>Flight Number: %s</span><span>Confirmation Number: %s</span></p><img src='images/jquery_QR.png' alt='QR_Code' /></div>" % (flight.flight_number, flight.confirm)
return html
def get_flights_from_confirmation(confirmation):
vacation = get_vacation(confirmation)
if vacation:
log_record(str(vacation))
flights = vacation.flights
if flights:
flights = map(get_flight, flights)
return flights
# class methods
def parse_date(unicode_date):
date_list = unicode_date.split("-")
date_list = map(int, date_list)
return datetime.date(date_list[0], date_list[1], date_list[2])
class HomeHandler(Handler):
def get(self):
self.render("home.html")
class FlightsHandler(Handler):
def get(self):
confirmation = self.request.get('confirmation')
flights = get_flights_from_confirmation(confirmation)
self.render("flights.html", confirmation=confirmation, flights=flights)
class DestinationHandler(Handler):
def get(self):
dest = self.request.get("dest")
if dest:
self.render("destination.html", destination=destinations[dest], dest=dest, price=price_per_person[dest])
else:
self.render('destination.html')
def post(self):
dest = self.request.get("arrive")
first_name = self.request.get("first-name")
last_name = self.request.get("last-name")
depart = self.request.get("depart")
arrive = destinations[dest]
num_people = self.request.get("num-people")
date = self.request.get("date")
nights = self.request.get("nights")
price = self.request.get("price")
for item in [dest, first_name, last_name, depart, arrive, num_people, str(parse_date(date)), nights, price]:
log_record(item)
if dest and first_name and last_name and depart and arrive and num_people and date and nights and price:
name = first_name + " " + last_name
date = parse_date(date)
nights = int(nights)
price = float(price)
num_people = int(num_people)
vacation = generate_new_vacation_and_flights(depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
self.redirect("/flights?confirmation="+vacation.confirm)
else:
self.render('tour.html', destination=destinations[dest], dest=dest, error="One of the boxes isn't filled in!")
class TourHandler(Handler):
def get(self):
dest = self.request.get("dest")
if dest:
self.render('tour.html', destination=destinations[dest], dest=dest, price=price_per_person[dest])
else:
self.render('tour.html')
def post(self):
dest = self.request.get("arrive")
log_record(dest)
first_name = self.request.get("first-name")
last_name = self.request.get("last-name")
depart = self.request.get("depart")
arrive = destinations[dest]
num_people = self.request.get("num_people")
price = self.request.get("price")
for item in [dest, first_name, last_name, depart, arrive, num_people, price]:
log_record(item)
if first_name and last_name and depart and arrive and num_people:
name = first_name + " " + last_name
date = tours[dest]
nights = 7
price = float(price)
num_people = int(num_people)
vacation = generate_new_vacation_and_flights(depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
self.redirect("/flights?confirmation="+vacation.confirm)
else:
self.render('tour.html', destination=destinations[dest], dest=dest, error="One of the boxes isn't filled in!")
class ConfirmationHandler(Handler):
def get(self):
self.render("check_confirm.html")
class CheckFlightsHandler(Handler):
def post(self):
confirmation = self.request.get('confirmation')
flights = get_flights_from_confirmation(confirmation)
if flights:
self.render("check_flights.html", confirmation=confirmation, flights=flights)
else:
self.render("check_flights.html", error=True, confirmation=confirmation)
app = webapp2.WSGIApplication([
('/', HomeHandler),
('/flights', FlightsHandler),
('/destination', DestinationHandler),
('/tour', TourHandler),
('/confirmation', ConfirmationHandler),
('/check-flights', CheckFlightsHandler)
], debug=True) | import jinja2
| random_line_split |
main.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import os
import jinja2
import json
import random
import string
import logging
import hashlib
import datetime
import time
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
# autoescape escapes html from user text automatically
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class Vacation(db.Model):
confirm = db.StringProperty(required=True, indexed=True)
depart = db.StringProperty(required=True, default="San Francisco, CA")
arrive = db.StringProperty(required=True, indexed=True)
date = db.DateProperty(required=True)
nights = db.IntegerProperty(required=True, default=7)
num_people = db.IntegerProperty(required=True, default=2)
price = db.FloatProperty(required=True)
name = db.StringProperty(required=True)
flights = db.StringListProperty()
class Flight(db.Model):
name = db.StringProperty(required=True, choices=set(["outgoing", "returning", "layover"]))
depart = db.StringProperty(required=True, default="San Francisco, CA")
arrive = db.StringProperty(required=True)
date = db.DateProperty(required=True)
time = db.TimeProperty(required=True)
flight_number = db.StringProperty(required=True, indexed=True)
confirm = db.StringProperty(required=True, indexed=True)
boarding_pass = db.TextProperty(required=False)
# parent = vacation
# confirm from vacation
# global variables
tours = {"london": datetime.date(11,1,1),
"tokyo": datetime.date(6,1,1),
"barcelona": datetime.date(5,1,1)}
destinations = {"rome": "Rome, Italy",
"rio": "Rio De Janiero, Brazil",
"paris": "Paris, France",
"london": "London, England",
"tokyo": "Tokyo, Japan",
"barcelona": "Barcelona, Spain"}
weather = {"rome": "70 ° F, Sunny",
"rio": "70 ° F, Sunny",
"paris": "70 ° F, Sunny",
"london": "50 ° F, Rainy",
"tokyo": "70 ° F, Sunny",
"barcelona": "70 ° F, Sunny"}
price_per_person = {"rome": 1200,
"rio": 1000,
"paris": 1500,
"london": 2500,
"tokyo": 3000,
"barcelona": 2000}
# utility methods
def log_record(text):
return logging.error(text.upper())
def sleep(n):
return time.sleep(abs(float(n)))
# vacation methods
def get_vacation(confirmation):
return Vacation.all().filter("confirm =", confirmation).get()
def generate_confirmation():
confirmation = ''.join(random.choice(string.hexdigits.upper()) for x in xrange(6))
while get_vacation(confirmation):
confirmation = ''.join(random.choice(string.hexdigits.upper()) for x in xrange(6))
return confirmation
def generate_new_vacation_and_flights(depart=None, arrive=None, date=None, nights=None, num_people=None, price=None, name=None):
new_vacation = Vacation(confirm=generate_confirmation(), depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
new_vacation.put()
sleep(0.5)
flights = generate_flights(new_vacation.confirm)
sleep(0.5)
for flight in flights:
flight.boarding_pass = generate_ticket_html(flight.flight_number)
flight.put()
sleep(0.5)
return new_vacation
# flight methods
def get_flight(flight_number):
return Flight.all().filter("flight_number =", flight_number).get()
def generate_flight_number():
flight_number = ''.join(random.choice(string.letters.upper()) for x in xrange(2)) + ''.join(random.choice(string.digits) for x in xrange(6))
while get_flight(flight_number):
flight_number = ''.join(random.choice(string.letters.upper()) for x in xrange(2)) + ''.join(random.choice(string.digits) for x in xrange(6))
return flight_number
def random_time(after_time=False):
if after_time:
minutes = random.choice(range(480,660))
dt = datetime.datetime.combine(datetime.date.today(), after_time) + datetime.timedelta(minutes=minutes)
return dt.time()
else:
return datetime.time(random.choice(range(0,12)), random.choice(range(60)), random.choice(range(60)))
def return_date(date, nights=7):
return date + datetime.timedelta(days=nights)
def generate_layover():
return random.choice(['Seattle, WA', 'Dallas, TX', 'Portland, OR', 'Los Angeles, CA', 'St. Louis, MO', 'Cedar Rapids, IA', 'Fargo, ND', 'New Orleans, LA', 'Cleveland, OH'])
def generate_flights(confirmation):
vacation = get_vacation(confirmation)
layover_dest = generate_layover()
departing = Flight(parent=vacation,
name="outgoing",
depart=vacation.depart,
arrive=layover_dest,
date=vacation.date,
time=random_time(),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
departing.put()
returning = Flight(parent=vacation,
name="returning",
depart=vacation.arrive,
arrive=vacation.depart,
date=return_date(vacation.date, vacation.nights),
time=random_time(),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
returning.put()
layover = Flight(parent=vacation,
name="layover",
depart=layover_dest,
arrive=vacation.arrive,
date=vacation.date,
time=random_time(departing.time),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
layover.put()
vacation.flights = [departing.flight_number, layover.flight_number, returning.flight_number]
vacation.put()
return [departing, layover, returning]
def generate_ticket_html(flight_number):
flight = get_flight(flight_number)
html = "<div class='ticket'><p><span>Flight Number: %s</span><span>Confirmation Number: %s</span></p><img src='images/jquery_QR.png' alt='QR_Code' /></div>" % (flight.flight_number, flight.confirm)
return html
def get_flights_from_confirmation(confirmation):
vacation = get_vacation(confirmation)
if vacation:
log_record(str(vacation))
flights = vacation.flights
if flights:
flights = map(get_flight, flights)
return flights
# class methods
def parse_date(unicode_date):
date_list = unicode_date.split("-")
date_list = map(int, date_list)
return datetime.date(date_list[0], date_list[1], date_list[2])
class HomeHandler(Handler):
def get(self):
self.render("home.html")
class FlightsHandler(Handler):
def get(self):
confirmation = self.request.get('confirmation')
flights = get_flights_from_confirmation(confirmation)
self.render("flights.html", confirmation=confirmation, flights=flights)
class DestinationHandler(Handler):
def get(self):
dest = self.request.get("dest")
if dest:
self.render("destination.html", destination=destinations[dest], dest=dest, price=price_per_person[dest])
else:
self.render('destination.html')
def post(self):
dest = self.request.get("arrive")
first_name = self.request.get("first-name")
last_name = self.request.get("last-name")
depart = self.request.get("depart")
arrive = destinations[dest]
num_people = self.request.get("num-people")
date = self.request.get("date")
nights = self.request.get("nights")
price = self.request.get("price")
for item in [dest, first_name, last_name, depart, arrive, num_people, str(parse_date(date)), nights, price]:
log_record(item)
if dest and first_name and last_name and depart and arrive and num_people and date and nights and price:
name = first_name + " " + last_name
date = parse_date(date)
nights = int(nights)
price = float(price)
num_people = int(num_people)
vacation = generate_new_vacation_and_flights(depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
self.redirect("/flights?confirmation="+vacation.confirm)
else:
self.render('tour.html', destination=destinations[dest], dest=dest, error="One of the boxes isn't filled in!")
class TourHandler(Handler):
def get(self):
dest = self.request.get("dest")
if dest:
self.render('tour.html', destination=destinations[dest], dest=dest, price=price_per_person[dest])
else:
self.render('tour.html')
def post(self):
dest = self.request.get("arrive")
log_record(dest)
first_name = self.request.get("first-name")
last_name = self.request.get("last-name")
depart = self.request.get("depart")
arrive = destinations[dest]
num_people = self.request.get("num_people")
price = self.request.get("price")
for item in [dest, first_name, last_name, depart, arrive, num_people, price]:
log_record(item)
if first_name and last_name and depart and arrive and num_people:
name = first_name + " " + last_name
date = tours[dest]
nights = 7
price = float(price)
num_people = int(num_people)
vacation = generate_new_vacation_and_flights(depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
self.redirect("/flights?confirmation="+vacation.confirm)
else:
self.render('tour.html', destination=destinations[dest], dest=dest, error="One of the boxes isn't filled in!")
class ConfirmationHandler(Handler):
def get(self):
self.render("check_confirm.html")
class CheckFlightsHandler(Handler):
def post(self):
confirmation = self.request.get('confirmation')
flights = get_flights_from_confirmation(confirmation)
if flights:
|
else:
self.render("check_flights.html", error=True, confirmation=confirmation)
app = webapp2.WSGIApplication([
('/', HomeHandler),
('/flights', FlightsHandler),
('/destination', DestinationHandler),
('/tour', TourHandler),
('/confirmation', ConfirmationHandler),
('/check-flights', CheckFlightsHandler)
], debug=True)
| self.render("check_flights.html", confirmation=confirmation, flights=flights) | conditional_block |
main.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import os
import jinja2
import json
import random
import string
import logging
import hashlib
import datetime
import time
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
# autoescape escapes html from user text automatically
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class Vacation(db.Model):
confirm = db.StringProperty(required=True, indexed=True)
depart = db.StringProperty(required=True, default="San Francisco, CA")
arrive = db.StringProperty(required=True, indexed=True)
date = db.DateProperty(required=True)
nights = db.IntegerProperty(required=True, default=7)
num_people = db.IntegerProperty(required=True, default=2)
price = db.FloatProperty(required=True)
name = db.StringProperty(required=True)
flights = db.StringListProperty()
class Flight(db.Model):
name = db.StringProperty(required=True, choices=set(["outgoing", "returning", "layover"]))
depart = db.StringProperty(required=True, default="San Francisco, CA")
arrive = db.StringProperty(required=True)
date = db.DateProperty(required=True)
time = db.TimeProperty(required=True)
flight_number = db.StringProperty(required=True, indexed=True)
confirm = db.StringProperty(required=True, indexed=True)
boarding_pass = db.TextProperty(required=False)
# parent = vacation
# confirm from vacation
# global variables
tours = {"london": datetime.date(11,1,1),
"tokyo": datetime.date(6,1,1),
"barcelona": datetime.date(5,1,1)}
destinations = {"rome": "Rome, Italy",
"rio": "Rio De Janiero, Brazil",
"paris": "Paris, France",
"london": "London, England",
"tokyo": "Tokyo, Japan",
"barcelona": "Barcelona, Spain"}
weather = {"rome": "70 ° F, Sunny",
"rio": "70 ° F, Sunny",
"paris": "70 ° F, Sunny",
"london": "50 ° F, Rainy",
"tokyo": "70 ° F, Sunny",
"barcelona": "70 ° F, Sunny"}
price_per_person = {"rome": 1200,
"rio": 1000,
"paris": 1500,
"london": 2500,
"tokyo": 3000,
"barcelona": 2000}
# utility methods
def log_record(text):
return logging.error(text.upper())
def sleep(n):
return time.sleep(abs(float(n)))
# vacation methods
def get_vacation(confirmation):
return Vacation.all().filter("confirm =", confirmation).get()
def generate_confirmation():
confirmation = ''.join(random.choice(string.hexdigits.upper()) for x in xrange(6))
while get_vacation(confirmation):
confirmation = ''.join(random.choice(string.hexdigits.upper()) for x in xrange(6))
return confirmation
def generate_new_vacation_and_flights(depart=None, arrive=None, date=None, nights=None, num_people=None, price=None, name=None):
new_vacation = Vacation(confirm=generate_confirmation(), depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
new_vacation.put()
sleep(0.5)
flights = generate_flights(new_vacation.confirm)
sleep(0.5)
for flight in flights:
flight.boarding_pass = generate_ticket_html(flight.flight_number)
flight.put()
sleep(0.5)
return new_vacation
# flight methods
def get_flight(flight_number):
return Flight.all().filter("flight_number =", flight_number).get()
def generate_flight_number():
flight_number = ''.join(random.choice(string.letters.upper()) for x in xrange(2)) + ''.join(random.choice(string.digits) for x in xrange(6))
while get_flight(flight_number):
flight_number = ''.join(random.choice(string.letters.upper()) for x in xrange(2)) + ''.join(random.choice(string.digits) for x in xrange(6))
return flight_number
def random_time(after_time=False):
if after_time:
minutes = random.choice(range(480,660))
dt = datetime.datetime.combine(datetime.date.today(), after_time) + datetime.timedelta(minutes=minutes)
return dt.time()
else:
return datetime.time(random.choice(range(0,12)), random.choice(range(60)), random.choice(range(60)))
def return_date(date, nights=7):
return date + datetime.timedelta(days=nights)
def generate_layover():
return random.choice(['Seattle, WA', 'Dallas, TX', 'Portland, OR', 'Los Angeles, CA', 'St. Louis, MO', 'Cedar Rapids, IA', 'Fargo, ND', 'New Orleans, LA', 'Cleveland, OH'])
def generate_flights(confirmation):
vacation = get_vacation(confirmation)
layover_dest = generate_layover()
departing = Flight(parent=vacation,
name="outgoing",
depart=vacation.depart,
arrive=layover_dest,
date=vacation.date,
time=random_time(),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
departing.put()
returning = Flight(parent=vacation,
name="returning",
depart=vacation.arrive,
arrive=vacation.depart,
date=return_date(vacation.date, vacation.nights),
time=random_time(),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
returning.put()
layover = Flight(parent=vacation,
name="layover",
depart=layover_dest,
arrive=vacation.arrive,
date=vacation.date,
time=random_time(departing.time),
flight_number=generate_flight_number(),
confirm=vacation.confirm)
layover.put()
vacation.flights = [departing.flight_number, layover.flight_number, returning.flight_number]
vacation.put()
return [departing, layover, returning]
def | (flight_number):
flight = get_flight(flight_number)
html = "<div class='ticket'><p><span>Flight Number: %s</span><span>Confirmation Number: %s</span></p><img src='images/jquery_QR.png' alt='QR_Code' /></div>" % (flight.flight_number, flight.confirm)
return html
def get_flights_from_confirmation(confirmation):
vacation = get_vacation(confirmation)
if vacation:
log_record(str(vacation))
flights = vacation.flights
if flights:
flights = map(get_flight, flights)
return flights
# class methods
def parse_date(unicode_date):
date_list = unicode_date.split("-")
date_list = map(int, date_list)
return datetime.date(date_list[0], date_list[1], date_list[2])
class HomeHandler(Handler):
def get(self):
self.render("home.html")
class FlightsHandler(Handler):
def get(self):
confirmation = self.request.get('confirmation')
flights = get_flights_from_confirmation(confirmation)
self.render("flights.html", confirmation=confirmation, flights=flights)
class DestinationHandler(Handler):
def get(self):
dest = self.request.get("dest")
if dest:
self.render("destination.html", destination=destinations[dest], dest=dest, price=price_per_person[dest])
else:
self.render('destination.html')
def post(self):
dest = self.request.get("arrive")
first_name = self.request.get("first-name")
last_name = self.request.get("last-name")
depart = self.request.get("depart")
arrive = destinations[dest]
num_people = self.request.get("num-people")
date = self.request.get("date")
nights = self.request.get("nights")
price = self.request.get("price")
for item in [dest, first_name, last_name, depart, arrive, num_people, str(parse_date(date)), nights, price]:
log_record(item)
if dest and first_name and last_name and depart and arrive and num_people and date and nights and price:
name = first_name + " " + last_name
date = parse_date(date)
nights = int(nights)
price = float(price)
num_people = int(num_people)
vacation = generate_new_vacation_and_flights(depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
self.redirect("/flights?confirmation="+vacation.confirm)
else:
self.render('tour.html', destination=destinations[dest], dest=dest, error="One of the boxes isn't filled in!")
class TourHandler(Handler):
def get(self):
dest = self.request.get("dest")
if dest:
self.render('tour.html', destination=destinations[dest], dest=dest, price=price_per_person[dest])
else:
self.render('tour.html')
def post(self):
dest = self.request.get("arrive")
log_record(dest)
first_name = self.request.get("first-name")
last_name = self.request.get("last-name")
depart = self.request.get("depart")
arrive = destinations[dest]
num_people = self.request.get("num_people")
price = self.request.get("price")
for item in [dest, first_name, last_name, depart, arrive, num_people, price]:
log_record(item)
if first_name and last_name and depart and arrive and num_people:
name = first_name + " " + last_name
date = tours[dest]
nights = 7
price = float(price)
num_people = int(num_people)
vacation = generate_new_vacation_and_flights(depart=depart, arrive=arrive, date=date, nights=nights, num_people=num_people, price=price, name=name)
self.redirect("/flights?confirmation="+vacation.confirm)
else:
self.render('tour.html', destination=destinations[dest], dest=dest, error="One of the boxes isn't filled in!")
class ConfirmationHandler(Handler):
def get(self):
self.render("check_confirm.html")
class CheckFlightsHandler(Handler):
def post(self):
confirmation = self.request.get('confirmation')
flights = get_flights_from_confirmation(confirmation)
if flights:
self.render("check_flights.html", confirmation=confirmation, flights=flights)
else:
self.render("check_flights.html", error=True, confirmation=confirmation)
app = webapp2.WSGIApplication([
('/', HomeHandler),
('/flights', FlightsHandler),
('/destination', DestinationHandler),
('/tour', TourHandler),
('/confirmation', ConfirmationHandler),
('/check-flights', CheckFlightsHandler)
], debug=True)
| generate_ticket_html | identifier_name |
binarize.py | import argparse
import math
import sys
import cv2
import os
import tensorflow as tf
import numpy as np
from fcnvgg import FCNVGG
from utils import *
from glob import glob
from tqdm import tqdm
from relative_darkness import relative_darkness
from thickness_score import thickness_score
from tensorflow.python import pywrap_tensorflow
import matplotlib as plt
#-------------------------------------------------------------------------------
PATCH_SIZE = 224
def | (img):
# 边缘用平均值填充,而不是用255填充
subwindows = []
positions = []
height = img.shape[0]
width = img.shape[1]
if not img.shape[0] % PATCH_SIZE == 0:
height = img.shape[0] - img.shape[0] % PATCH_SIZE + PATCH_SIZE
if not img.shape[1] % PATCH_SIZE == 0:
width = img.shape[1] - img.shape[1] % PATCH_SIZE + PATCH_SIZE
expanded_img = np.zeros((height, width, 3))
for row in range(height):
for col in range(width):
if row >= img.shape[0] or col >= img.shape[1]:
if col == img.shape[1] and row < img.shape[0]:
average = [0, 0, 0]
for c in range(3):
for i in range(col-5, col):
average[c] += img[row][i][c]
average[c] /= 5.0
expanded_img[row][col][c] = average[c] # 使用均值填充
elif col > img.shape[1] and row < img.shape[0]:
for c in range(3):
expanded_img[row][col][c] = expanded_img[row][img.shape[1]][c]
if row == img.shape[0]:
average = [0, 0, 0]
for c in range(3):
for i in range(row-5, row):
average[c] += expanded_img[i][col][c]
average[c] /= 5.0
expanded_img[row][col][c] = average[c] # 使用均值填充
elif row > img.shape[0]:
for c in range(3):
expanded_img[row][col][c] = expanded_img[img.shape[0]][col][c]
else:
for c in range(3):
expanded_img[row][col][c] = img[row][col][c]
# cv2.imwrite('expanded.png', expanded_img)
for y in range(int(height/PATCH_SIZE)):
for x in range(int(width/PATCH_SIZE)):
pos = (x, y)
sub_img = expanded_img[y*PATCH_SIZE:(y+1)*PATCH_SIZE, x*PATCH_SIZE:(x+1)*PATCH_SIZE]
subwindows.append(sub_img)
positions.append(pos)
return len(subwindows), zip(subwindows, positions)
def sample_generator(samples, image_size, batch_size, auto_scale=False, split=False):
# for offset in range(0, len(samples), batch_size):
# files = samples[offset:offset+batch_size]
image_file = samples[0] # 一次只处理一张图片
image = cv2.imread(image_file)
_, subwindows = get_subwindows(image)
batch_count = 0
for each in subwindows:
window = each[0]
pos = each[1]
patchs = [] # 每个batch只有一张图片
patch_names = []
batch_count += 1
if auto_scale:
score = thickness_score(window, name=str(batch_count)+'_'+os.path.basename(image_file))
if score == -1:
pass
# 划分阈值
if score < 1:
window = cv2.resize(window, (896, 896))
elif score >= 1 and score < 1.5:
window = cv2.resize(window, (448, 448))
elif score >= 1.5 and score < 2:
window = cv2.resize(window, (256, 256))
print('shape:', window.shape)
else:
pass
patchs.append(window.astype(np.float32))
patch_names.append(str(pos[0])+'-'+str(pos[1])+'@'+os.path.basename(image_file))
yield np.array(patchs), patch_names
#-------------------------------------------------------------------------------
# Parse commandline
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Generate data based on a model')
parser.add_argument('--name', default='aws1',
help='project name')
parser.add_argument('--checkpoint', type=int, default=-1,
help='checkpoint to restore; -1 is the most recent')
parser.add_argument('--samples-dir', default='aws1',
help='directory containing samples to analyse')
parser.add_argument('--output-dir', default='test-output',
help='directory for the resulting images')
parser.add_argument('--batch-size', type=int, default=1,
help='batch size')
parser.add_argument('--data-source', default='dibco',
help='data source')
parser.add_argument('--autoscale', default=False,
help='autoscale')
args = parser.parse_args()
#-------------------------------------------------------------------------------
# Check if we can get the checkpoint
#-------------------------------------------------------------------------------
state = tf.train.get_checkpoint_state(args.name)
if state is None:
print('[!] No network state found in ' + args.name)
sys.exit(1)
try:
checkpoint_file = state.all_model_checkpoint_paths[args.checkpoint]
except IndexError:
print('[!] Cannot find checkpoint ' + str(args.checkpoint_file))
sys.exit(1)
metagraph_file = checkpoint_file + '.meta'
if not os.path.exists(metagraph_file):
print('[!] Cannot find metagraph ' + metagraph_file)
sys.exit(1)
#-------------------------------------------------------------------------------
# Load the data source
#-------------------------------------------------------------------------------
try:
source = load_data_source(args.data_source)
label_colors = source.label_colors
except (ImportError, AttributeError, RuntimeError) as e:
print('[!] Unable to load data source:', str(e))
sys.exit(1)
#-------------------------------------------------------------------------------
# Create a list of files to analyse and make sure that the output directory
# exists
#-------------------------------------------------------------------------------
samples = glob(args.samples_dir + '/*.png')
if len(samples) == 0:
print('[!] No input samples found in', args.samples_dir)
sys.exit(1)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
#-------------------------------------------------------------------------------
# Print parameters
#-------------------------------------------------------------------------------
print('[i] Project name: ', args.name)
print('[i] Network checkpoint:', checkpoint_file)
print('[i] Metagraph file: ', metagraph_file)
print('[i] Number of samples: ', len(samples))
print('[i] Output directory: ', args.output_dir)
print('[i] Image size: ', source.image_size)
print('[i] # classes: ', source.num_classes)
print('[i] Batch size: ', args.batch_size)
#-------------------------------------------------------------------------------
# Create the network
#-------------------------------------------------------------------------------
with tf.Session() as sess:
print('[i] Creating the model...')
rd_feature = tf.placeholder(
tf.float32, shape=[None, None, None, 3], name='rd_feature_infer')
net = FCNVGG(sess, rd_feature)
net.build_from_metagraph(metagraph_file, checkpoint_file)
#---------------------------------------------------------------------------
# Process the images
#---------------------------------------------------------------------------
generator = sample_generator(samples, source.image_size, args.batch_size, auto_scale=args.autoscale, split=True)
# n_sample_batches = int(math.ceil(len(samples)/args.batch_size))
n_sample_batches, _ = get_subwindows(cv2.imread(samples[0]))
description = '[i] Processing samples'
whole_img = np.zeros((cv2.imread(samples[0]).shape[0]*2, cv2.imread(samples[0]).shape[1]*2, 3))
for x, names in tqdm(generator, total=n_sample_batches,
desc=description, unit='batches'):
vgg_layer1 = sess.graph.get_tensor_by_name('pool3:0')
feed = {net.image_input: x,
net.keep_prob: 1}
layer1, img_labels = sess.run([vgg_layer1, net.classes], feed_dict=feed)
imgs = draw_labels_batch(x, img_labels, label_colors, False)
#---------------------------------------------------------------------------
# 输出特征映射
#---------------------------------------------------------------------------
# chs = 256
# range_stop = chs // 3
# size_splits = [3 for i in range(0, range_stop)]
# if len(size_splits) * 3 < chs:
# size_splits.append(chs % 3)
# layer1_split = tf.split(layer1, num_or_size_splits=size_splits, axis=3) # conv1.shape = [128,24,24,64]
# layer1_concats_1 = []
# concat_step = len(layer1_split) // 2
# for i in range(0, concat_step, 2):
# concat = tf.concat([layer1_split[i], layer1_split[i + 1]], axis=1)
# layer1_concats_1.append(concat)
# layer1_concats_2 = []
# concat_step = len(layer1_concats_1) // 2
# for i in range(0, concat_step, 2):
# concat = tf.concat([layer1_concats_1[i], layer1_concats_1[i + 1]], axis=2)
# layer1_concats_2.append(concat)
# layer1_concats = tf.concat(layer1_concats_2, axis=0)
# print(layer1_concats.shape)
# layer1_np = layer1_concats.eval()
# # print(layer1_np[0])
# cv2.imwrite('featuremap' + names[0] + '.png', layer1_np[0])
# print("visualize finish.")
pos = names[0].split('@')[0]
pos_x = int(pos.split('-')[0])
pos_y = int(pos.split('-')[1])
cv2.imwrite(args.output_dir + '/' + names[0], cv2.resize(imgs[0, :, :, :], (PATCH_SIZE, PATCH_SIZE)))
whole_img[pos_y*PATCH_SIZE:(pos_y+1)*PATCH_SIZE, pos_x*PATCH_SIZE:(pos_x+1)*PATCH_SIZE, :] = \
cv2.resize(imgs[0, :, :, :], (PATCH_SIZE, PATCH_SIZE))
whole_img = whole_img[0:cv2.imread(samples[0]).shape[0], 0:cv2.imread(samples[0]).shape[1]]
cv2.imwrite(args.output_dir + '/' + 'whole_' + names[0], whole_img)
print('[i] All done.')
| get_subwindows | identifier_name |
binarize.py | import argparse
import math
import sys
import cv2
import os
import tensorflow as tf
import numpy as np
from fcnvgg import FCNVGG
from utils import *
from glob import glob
from tqdm import tqdm
from relative_darkness import relative_darkness
from thickness_score import thickness_score
from tensorflow.python import pywrap_tensorflow
import matplotlib as plt
#-------------------------------------------------------------------------------
PATCH_SIZE = 224
def get_subwindows(img):
# 边缘用平均值填充,而不是用255填充
subwindows = []
positions = []
height = img.shape[0]
width = img.shape[1]
if not img.shape[0] % PATCH_SIZE == 0:
height = img.shape[0] - img.shape[0] % PATCH_SIZE + PATCH_SIZE | for row in range(height):
for col in range(width):
if row >= img.shape[0] or col >= img.shape[1]:
if col == img.shape[1] and row < img.shape[0]:
average = [0, 0, 0]
for c in range(3):
for i in range(col-5, col):
average[c] += img[row][i][c]
average[c] /= 5.0
expanded_img[row][col][c] = average[c] # 使用均值填充
elif col > img.shape[1] and row < img.shape[0]:
for c in range(3):
expanded_img[row][col][c] = expanded_img[row][img.shape[1]][c]
if row == img.shape[0]:
average = [0, 0, 0]
for c in range(3):
for i in range(row-5, row):
average[c] += expanded_img[i][col][c]
average[c] /= 5.0
expanded_img[row][col][c] = average[c] # 使用均值填充
elif row > img.shape[0]:
for c in range(3):
expanded_img[row][col][c] = expanded_img[img.shape[0]][col][c]
else:
for c in range(3):
expanded_img[row][col][c] = img[row][col][c]
# cv2.imwrite('expanded.png', expanded_img)
for y in range(int(height/PATCH_SIZE)):
for x in range(int(width/PATCH_SIZE)):
pos = (x, y)
sub_img = expanded_img[y*PATCH_SIZE:(y+1)*PATCH_SIZE, x*PATCH_SIZE:(x+1)*PATCH_SIZE]
subwindows.append(sub_img)
positions.append(pos)
return len(subwindows), zip(subwindows, positions)
def sample_generator(samples, image_size, batch_size, auto_scale=False, split=False):
# for offset in range(0, len(samples), batch_size):
# files = samples[offset:offset+batch_size]
image_file = samples[0] # 一次只处理一张图片
image = cv2.imread(image_file)
_, subwindows = get_subwindows(image)
batch_count = 0
for each in subwindows:
window = each[0]
pos = each[1]
patchs = [] # 每个batch只有一张图片
patch_names = []
batch_count += 1
if auto_scale:
score = thickness_score(window, name=str(batch_count)+'_'+os.path.basename(image_file))
if score == -1:
pass
# 划分阈值
if score < 1:
window = cv2.resize(window, (896, 896))
elif score >= 1 and score < 1.5:
window = cv2.resize(window, (448, 448))
elif score >= 1.5 and score < 2:
window = cv2.resize(window, (256, 256))
print('shape:', window.shape)
else:
pass
patchs.append(window.astype(np.float32))
patch_names.append(str(pos[0])+'-'+str(pos[1])+'@'+os.path.basename(image_file))
yield np.array(patchs), patch_names
#-------------------------------------------------------------------------------
# Parse commandline
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Generate data based on a model')
parser.add_argument('--name', default='aws1',
help='project name')
parser.add_argument('--checkpoint', type=int, default=-1,
help='checkpoint to restore; -1 is the most recent')
parser.add_argument('--samples-dir', default='aws1',
help='directory containing samples to analyse')
parser.add_argument('--output-dir', default='test-output',
help='directory for the resulting images')
parser.add_argument('--batch-size', type=int, default=1,
help='batch size')
parser.add_argument('--data-source', default='dibco',
help='data source')
parser.add_argument('--autoscale', default=False,
help='autoscale')
args = parser.parse_args()
#-------------------------------------------------------------------------------
# Check if we can get the checkpoint
#-------------------------------------------------------------------------------
state = tf.train.get_checkpoint_state(args.name)
if state is None:
print('[!] No network state found in ' + args.name)
sys.exit(1)
try:
checkpoint_file = state.all_model_checkpoint_paths[args.checkpoint]
except IndexError:
print('[!] Cannot find checkpoint ' + str(args.checkpoint_file))
sys.exit(1)
metagraph_file = checkpoint_file + '.meta'
if not os.path.exists(metagraph_file):
print('[!] Cannot find metagraph ' + metagraph_file)
sys.exit(1)
#-------------------------------------------------------------------------------
# Load the data source
#-------------------------------------------------------------------------------
try:
source = load_data_source(args.data_source)
label_colors = source.label_colors
except (ImportError, AttributeError, RuntimeError) as e:
print('[!] Unable to load data source:', str(e))
sys.exit(1)
#-------------------------------------------------------------------------------
# Create a list of files to analyse and make sure that the output directory
# exists
#-------------------------------------------------------------------------------
samples = glob(args.samples_dir + '/*.png')
if len(samples) == 0:
print('[!] No input samples found in', args.samples_dir)
sys.exit(1)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
#-------------------------------------------------------------------------------
# Print parameters
#-------------------------------------------------------------------------------
print('[i] Project name: ', args.name)
print('[i] Network checkpoint:', checkpoint_file)
print('[i] Metagraph file: ', metagraph_file)
print('[i] Number of samples: ', len(samples))
print('[i] Output directory: ', args.output_dir)
print('[i] Image size: ', source.image_size)
print('[i] # classes: ', source.num_classes)
print('[i] Batch size: ', args.batch_size)
#-------------------------------------------------------------------------------
# Create the network
#-------------------------------------------------------------------------------
with tf.Session() as sess:
print('[i] Creating the model...')
rd_feature = tf.placeholder(
tf.float32, shape=[None, None, None, 3], name='rd_feature_infer')
net = FCNVGG(sess, rd_feature)
net.build_from_metagraph(metagraph_file, checkpoint_file)
#---------------------------------------------------------------------------
# Process the images
#---------------------------------------------------------------------------
generator = sample_generator(samples, source.image_size, args.batch_size, auto_scale=args.autoscale, split=True)
# n_sample_batches = int(math.ceil(len(samples)/args.batch_size))
n_sample_batches, _ = get_subwindows(cv2.imread(samples[0]))
description = '[i] Processing samples'
whole_img = np.zeros((cv2.imread(samples[0]).shape[0]*2, cv2.imread(samples[0]).shape[1]*2, 3))
for x, names in tqdm(generator, total=n_sample_batches,
desc=description, unit='batches'):
vgg_layer1 = sess.graph.get_tensor_by_name('pool3:0')
feed = {net.image_input: x,
net.keep_prob: 1}
layer1, img_labels = sess.run([vgg_layer1, net.classes], feed_dict=feed)
imgs = draw_labels_batch(x, img_labels, label_colors, False)
#---------------------------------------------------------------------------
# 输出特征映射
#---------------------------------------------------------------------------
# chs = 256
# range_stop = chs // 3
# size_splits = [3 for i in range(0, range_stop)]
# if len(size_splits) * 3 < chs:
# size_splits.append(chs % 3)
# layer1_split = tf.split(layer1, num_or_size_splits=size_splits, axis=3) # conv1.shape = [128,24,24,64]
# layer1_concats_1 = []
# concat_step = len(layer1_split) // 2
# for i in range(0, concat_step, 2):
# concat = tf.concat([layer1_split[i], layer1_split[i + 1]], axis=1)
# layer1_concats_1.append(concat)
# layer1_concats_2 = []
# concat_step = len(layer1_concats_1) // 2
# for i in range(0, concat_step, 2):
# concat = tf.concat([layer1_concats_1[i], layer1_concats_1[i + 1]], axis=2)
# layer1_concats_2.append(concat)
# layer1_concats = tf.concat(layer1_concats_2, axis=0)
# print(layer1_concats.shape)
# layer1_np = layer1_concats.eval()
# # print(layer1_np[0])
# cv2.imwrite('featuremap' + names[0] + '.png', layer1_np[0])
# print("visualize finish.")
pos = names[0].split('@')[0]
pos_x = int(pos.split('-')[0])
pos_y = int(pos.split('-')[1])
cv2.imwrite(args.output_dir + '/' + names[0], cv2.resize(imgs[0, :, :, :], (PATCH_SIZE, PATCH_SIZE)))
whole_img[pos_y*PATCH_SIZE:(pos_y+1)*PATCH_SIZE, pos_x*PATCH_SIZE:(pos_x+1)*PATCH_SIZE, :] = \
cv2.resize(imgs[0, :, :, :], (PATCH_SIZE, PATCH_SIZE))
whole_img = whole_img[0:cv2.imread(samples[0]).shape[0], 0:cv2.imread(samples[0]).shape[1]]
cv2.imwrite(args.output_dir + '/' + 'whole_' + names[0], whole_img)
print('[i] All done.') | if not img.shape[1] % PATCH_SIZE == 0:
width = img.shape[1] - img.shape[1] % PATCH_SIZE + PATCH_SIZE
expanded_img = np.zeros((height, width, 3)) | random_line_split |
binarize.py | import argparse
import math
import sys
import cv2
import os
import tensorflow as tf
import numpy as np
from fcnvgg import FCNVGG
from utils import *
from glob import glob
from tqdm import tqdm
from relative_darkness import relative_darkness
from thickness_score import thickness_score
from tensorflow.python import pywrap_tensorflow
import matplotlib as plt
#-------------------------------------------------------------------------------
PATCH_SIZE = 224
def get_subwindows(img):
# 边缘用平均值填充,而不是用255填充
subwindows = []
positions = []
height = img.shape[0]
width = img.shape[1]
if not img.shape[0] % PATCH_SIZE == 0:
height = img.shape[0] - img.shape[0] % PATCH_SIZE + PATCH_SIZE
if not img.shape[1] % PATCH_SIZE == 0:
width = img.shape[1] - img.shape[1] % PATCH_SIZE + PATCH_SIZE
expanded_img = np.zeros((height, width, 3))
for row in range(height):
for col in range(width):
if row >= img.shape[0] or col >= img.shape[1]:
if col == img.shape[1] and row < img.shape[0]:
average = [0, 0, 0]
for c in range(3):
for i in range(col-5, col):
average[c] += img[row][i][c]
average[c] /= 5.0
expanded_img[row][col][c] = average[c] # 使用均值填充
elif col > img.shape[1] and row < img.shape[0]:
for c in range(3):
expanded_img[row][col][c] = expanded_img[row][img.shape[1]][c]
if row == img.shape[0]:
average = [0, 0, 0]
for c in range(3):
for i in range(row-5, row):
average[c] += expanded_img[i][col][c]
average[c] /= 5.0
expanded_img[row][col][c] = average[c] # 使用均值填充
elif row > img.shape[0]:
for c in range(3):
expanded_img[row][col][c] = expanded_img[img.shape[0]][col][c]
else:
for c in range(3):
expanded_img[row][col][c] = img[row][col][c]
# cv2.imwrite('expanded.png', expanded_img)
for y in range(int(height/PATCH_SIZE)):
for x in range(int(width/PATCH_SIZE)):
pos = (x, y)
sub_img = expanded_img[y*PATCH_SIZE:(y+1)*PATCH_SIZE, x*PATCH_SIZE:(x+1)*PATCH_SIZE]
subwindows.append(sub_img)
positions.append(pos)
return len(subwindows), zip(subwindows, positions)
def sample_generator(samples, image_size, batch_size, auto_scale=False, split=False):
# for offset in range(0, len(samples), batch_size):
# files = samples[offset:offset+batch_size]
image_file = samples[0] # 一次只处理一张图片
image = cv2.im | arse commandline
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Generate data based on a model')
parser.add_argument('--name', default='aws1',
help='project name')
parser.add_argument('--checkpoint', type=int, default=-1,
help='checkpoint to restore; -1 is the most recent')
parser.add_argument('--samples-dir', default='aws1',
help='directory containing samples to analyse')
parser.add_argument('--output-dir', default='test-output',
help='directory for the resulting images')
parser.add_argument('--batch-size', type=int, default=1,
help='batch size')
parser.add_argument('--data-source', default='dibco',
help='data source')
parser.add_argument('--autoscale', default=False,
help='autoscale')
args = parser.parse_args()
#-------------------------------------------------------------------------------
# Check if we can get the checkpoint
#-------------------------------------------------------------------------------
state = tf.train.get_checkpoint_state(args.name)
if state is None:
print('[!] No network state found in ' + args.name)
sys.exit(1)
try:
checkpoint_file = state.all_model_checkpoint_paths[args.checkpoint]
except IndexError:
print('[!] Cannot find checkpoint ' + str(args.checkpoint_file))
sys.exit(1)
metagraph_file = checkpoint_file + '.meta'
if not os.path.exists(metagraph_file):
print('[!] Cannot find metagraph ' + metagraph_file)
sys.exit(1)
#-------------------------------------------------------------------------------
# Load the data source
#-------------------------------------------------------------------------------
try:
source = load_data_source(args.data_source)
label_colors = source.label_colors
except (ImportError, AttributeError, RuntimeError) as e:
print('[!] Unable to load data source:', str(e))
sys.exit(1)
#-------------------------------------------------------------------------------
# Create a list of files to analyse and make sure that the output directory
# exists
#-------------------------------------------------------------------------------
samples = glob(args.samples_dir + '/*.png')
if len(samples) == 0:
print('[!] No input samples found in', args.samples_dir)
sys.exit(1)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
#-------------------------------------------------------------------------------
# Print parameters
#-------------------------------------------------------------------------------
print('[i] Project name: ', args.name)
print('[i] Network checkpoint:', checkpoint_file)
print('[i] Metagraph file: ', metagraph_file)
print('[i] Number of samples: ', len(samples))
print('[i] Output directory: ', args.output_dir)
print('[i] Image size: ', source.image_size)
print('[i] # classes: ', source.num_classes)
print('[i] Batch size: ', args.batch_size)
#-------------------------------------------------------------------------------
# Create the network
#-------------------------------------------------------------------------------
with tf.Session() as sess:
print('[i] Creating the model...')
rd_feature = tf.placeholder(
tf.float32, shape=[None, None, None, 3], name='rd_feature_infer')
net = FCNVGG(sess, rd_feature)
net.build_from_metagraph(metagraph_file, checkpoint_file)
#---------------------------------------------------------------------------
# Process the images
#---------------------------------------------------------------------------
generator = sample_generator(samples, source.image_size, args.batch_size, auto_scale=args.autoscale, split=True)
# n_sample_batches = int(math.ceil(len(samples)/args.batch_size))
n_sample_batches, _ = get_subwindows(cv2.imread(samples[0]))
description = '[i] Processing samples'
whole_img = np.zeros((cv2.imread(samples[0]).shape[0]*2, cv2.imread(samples[0]).shape[1]*2, 3))
for x, names in tqdm(generator, total=n_sample_batches,
desc=description, unit='batches'):
vgg_layer1 = sess.graph.get_tensor_by_name('pool3:0')
feed = {net.image_input: x,
net.keep_prob: 1}
layer1, img_labels = sess.run([vgg_layer1, net.classes], feed_dict=feed)
imgs = draw_labels_batch(x, img_labels, label_colors, False)
#---------------------------------------------------------------------------
# 输出特征映射
#---------------------------------------------------------------------------
# chs = 256
# range_stop = chs // 3
# size_splits = [3 for i in range(0, range_stop)]
# if len(size_splits) * 3 < chs:
# size_splits.append(chs % 3)
# layer1_split = tf.split(layer1, num_or_size_splits=size_splits, axis=3) # conv1.shape = [128,24,24,64]
# layer1_concats_1 = []
# concat_step = len(layer1_split) // 2
# for i in range(0, concat_step, 2):
# concat = tf.concat([layer1_split[i], layer1_split[i + 1]], axis=1)
# layer1_concats_1.append(concat)
# layer1_concats_2 = []
# concat_step = len(layer1_concats_1) // 2
# for i in range(0, concat_step, 2):
# concat = tf.concat([layer1_concats_1[i], layer1_concats_1[i + 1]], axis=2)
# layer1_concats_2.append(concat)
# layer1_concats = tf.concat(layer1_concats_2, axis=0)
# print(layer1_concats.shape)
# layer1_np = layer1_concats.eval()
# # print(layer1_np[0])
# cv2.imwrite('featuremap' + names[0] + '.png', layer1_np[0])
# print("visualize finish.")
pos = names[0].split('@')[0]
pos_x = int(pos.split('-')[0])
pos_y = int(pos.split('-')[1])
cv2.imwrite(args.output_dir + '/' + names[0], cv2.resize(imgs[0, :, :, :], (PATCH_SIZE, PATCH_SIZE)))
whole_img[pos_y*PATCH_SIZE:(pos_y+1)*PATCH_SIZE, pos_x*PATCH_SIZE:(pos_x+1)*PATCH_SIZE, :] = \
cv2.resize(imgs[0, :, :, :], (PATCH_SIZE, PATCH_SIZE))
whole_img = whole_img[0:cv2.imread(samples[0]).shape[0], 0:cv2.imread(samples[0]).shape[1]]
cv2.imwrite(args.output_dir + '/' + 'whole_' + names[0], whole_img)
print('[i] All done.')
| read(image_file)
_, subwindows = get_subwindows(image)
batch_count = 0
for each in subwindows:
window = each[0]
pos = each[1]
patchs = [] # 每个batch只有一张图片
patch_names = []
batch_count += 1
if auto_scale:
score = thickness_score(window, name=str(batch_count)+'_'+os.path.basename(image_file))
if score == -1:
pass
# 划分阈值
if score < 1:
window = cv2.resize(window, (896, 896))
elif score >= 1 and score < 1.5:
window = cv2.resize(window, (448, 448))
elif score >= 1.5 and score < 2:
window = cv2.resize(window, (256, 256))
print('shape:', window.shape)
else:
pass
patchs.append(window.astype(np.float32))
patch_names.append(str(pos[0])+'-'+str(pos[1])+'@'+os.path.basename(image_file))
yield np.array(patchs), patch_names
#-------------------------------------------------------------------------------
# P | identifier_body |
binarize.py | import argparse
import math
import sys
import cv2
import os
import tensorflow as tf
import numpy as np
from fcnvgg import FCNVGG
from utils import *
from glob import glob
from tqdm import tqdm
from relative_darkness import relative_darkness
from thickness_score import thickness_score
from tensorflow.python import pywrap_tensorflow
import matplotlib as plt
#-------------------------------------------------------------------------------
PATCH_SIZE = 224
def get_subwindows(img):
# 边缘用平均值填充,而不是用255填充
subwindows = []
positions = []
height = img.shape[0]
width = img.shape[1]
if not img.shape[0] % PATCH_SIZE == 0:
height = img.shape[0] - img.sh | CH_SIZE == 0:
width = img.shape[1] - img.shape[1] % PATCH_SIZE + PATCH_SIZE
expanded_img = np.zeros((height, width, 3))
for row in range(height):
for col in range(width):
if row >= img.shape[0] or col >= img.shape[1]:
if col == img.shape[1] and row < img.shape[0]:
average = [0, 0, 0]
for c in range(3):
for i in range(col-5, col):
average[c] += img[row][i][c]
average[c] /= 5.0
expanded_img[row][col][c] = average[c] # 使用均值填充
elif col > img.shape[1] and row < img.shape[0]:
for c in range(3):
expanded_img[row][col][c] = expanded_img[row][img.shape[1]][c]
if row == img.shape[0]:
average = [0, 0, 0]
for c in range(3):
for i in range(row-5, row):
average[c] += expanded_img[i][col][c]
average[c] /= 5.0
expanded_img[row][col][c] = average[c] # 使用均值填充
elif row > img.shape[0]:
for c in range(3):
expanded_img[row][col][c] = expanded_img[img.shape[0]][col][c]
else:
for c in range(3):
expanded_img[row][col][c] = img[row][col][c]
# cv2.imwrite('expanded.png', expanded_img)
for y in range(int(height/PATCH_SIZE)):
for x in range(int(width/PATCH_SIZE)):
pos = (x, y)
sub_img = expanded_img[y*PATCH_SIZE:(y+1)*PATCH_SIZE, x*PATCH_SIZE:(x+1)*PATCH_SIZE]
subwindows.append(sub_img)
positions.append(pos)
return len(subwindows), zip(subwindows, positions)
def sample_generator(samples, image_size, batch_size, auto_scale=False, split=False):
# for offset in range(0, len(samples), batch_size):
# files = samples[offset:offset+batch_size]
image_file = samples[0] # 一次只处理一张图片
image = cv2.imread(image_file)
_, subwindows = get_subwindows(image)
batch_count = 0
for each in subwindows:
window = each[0]
pos = each[1]
patchs = [] # 每个batch只有一张图片
patch_names = []
batch_count += 1
if auto_scale:
score = thickness_score(window, name=str(batch_count)+'_'+os.path.basename(image_file))
if score == -1:
pass
# 划分阈值
if score < 1:
window = cv2.resize(window, (896, 896))
elif score >= 1 and score < 1.5:
window = cv2.resize(window, (448, 448))
elif score >= 1.5 and score < 2:
window = cv2.resize(window, (256, 256))
print('shape:', window.shape)
else:
pass
patchs.append(window.astype(np.float32))
patch_names.append(str(pos[0])+'-'+str(pos[1])+'@'+os.path.basename(image_file))
yield np.array(patchs), patch_names
#-------------------------------------------------------------------------------
# Parse commandline
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Generate data based on a model')
parser.add_argument('--name', default='aws1',
help='project name')
parser.add_argument('--checkpoint', type=int, default=-1,
help='checkpoint to restore; -1 is the most recent')
parser.add_argument('--samples-dir', default='aws1',
help='directory containing samples to analyse')
parser.add_argument('--output-dir', default='test-output',
help='directory for the resulting images')
parser.add_argument('--batch-size', type=int, default=1,
help='batch size')
parser.add_argument('--data-source', default='dibco',
help='data source')
parser.add_argument('--autoscale', default=False,
help='autoscale')
args = parser.parse_args()
#-------------------------------------------------------------------------------
# Check if we can get the checkpoint
#-------------------------------------------------------------------------------
state = tf.train.get_checkpoint_state(args.name)
if state is None:
print('[!] No network state found in ' + args.name)
sys.exit(1)
try:
checkpoint_file = state.all_model_checkpoint_paths[args.checkpoint]
except IndexError:
print('[!] Cannot find checkpoint ' + str(args.checkpoint_file))
sys.exit(1)
metagraph_file = checkpoint_file + '.meta'
if not os.path.exists(metagraph_file):
print('[!] Cannot find metagraph ' + metagraph_file)
sys.exit(1)
#-------------------------------------------------------------------------------
# Load the data source
#-------------------------------------------------------------------------------
try:
source = load_data_source(args.data_source)
label_colors = source.label_colors
except (ImportError, AttributeError, RuntimeError) as e:
print('[!] Unable to load data source:', str(e))
sys.exit(1)
#-------------------------------------------------------------------------------
# Create a list of files to analyse and make sure that the output directory
# exists
#-------------------------------------------------------------------------------
samples = glob(args.samples_dir + '/*.png')
if len(samples) == 0:
print('[!] No input samples found in', args.samples_dir)
sys.exit(1)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
#-------------------------------------------------------------------------------
# Print parameters
#-------------------------------------------------------------------------------
print('[i] Project name: ', args.name)
print('[i] Network checkpoint:', checkpoint_file)
print('[i] Metagraph file: ', metagraph_file)
print('[i] Number of samples: ', len(samples))
print('[i] Output directory: ', args.output_dir)
print('[i] Image size: ', source.image_size)
print('[i] # classes: ', source.num_classes)
print('[i] Batch size: ', args.batch_size)
#-------------------------------------------------------------------------------
# Create the network
#-------------------------------------------------------------------------------
with tf.Session() as sess:
print('[i] Creating the model...')
rd_feature = tf.placeholder(
tf.float32, shape=[None, None, None, 3], name='rd_feature_infer')
net = FCNVGG(sess, rd_feature)
net.build_from_metagraph(metagraph_file, checkpoint_file)
#---------------------------------------------------------------------------
# Process the images
#---------------------------------------------------------------------------
generator = sample_generator(samples, source.image_size, args.batch_size, auto_scale=args.autoscale, split=True)
# n_sample_batches = int(math.ceil(len(samples)/args.batch_size))
n_sample_batches, _ = get_subwindows(cv2.imread(samples[0]))
description = '[i] Processing samples'
whole_img = np.zeros((cv2.imread(samples[0]).shape[0]*2, cv2.imread(samples[0]).shape[1]*2, 3))
for x, names in tqdm(generator, total=n_sample_batches,
desc=description, unit='batches'):
vgg_layer1 = sess.graph.get_tensor_by_name('pool3:0')
feed = {net.image_input: x,
net.keep_prob: 1}
layer1, img_labels = sess.run([vgg_layer1, net.classes], feed_dict=feed)
imgs = draw_labels_batch(x, img_labels, label_colors, False)
#---------------------------------------------------------------------------
# 输出特征映射
#---------------------------------------------------------------------------
# chs = 256
# range_stop = chs // 3
# size_splits = [3 for i in range(0, range_stop)]
# if len(size_splits) * 3 < chs:
# size_splits.append(chs % 3)
# layer1_split = tf.split(layer1, num_or_size_splits=size_splits, axis=3) # conv1.shape = [128,24,24,64]
# layer1_concats_1 = []
# concat_step = len(layer1_split) // 2
# for i in range(0, concat_step, 2):
# concat = tf.concat([layer1_split[i], layer1_split[i + 1]], axis=1)
# layer1_concats_1.append(concat)
# layer1_concats_2 = []
# concat_step = len(layer1_concats_1) // 2
# for i in range(0, concat_step, 2):
# concat = tf.concat([layer1_concats_1[i], layer1_concats_1[i + 1]], axis=2)
# layer1_concats_2.append(concat)
# layer1_concats = tf.concat(layer1_concats_2, axis=0)
# print(layer1_concats.shape)
# layer1_np = layer1_concats.eval()
# # print(layer1_np[0])
# cv2.imwrite('featuremap' + names[0] + '.png', layer1_np[0])
# print("visualize finish.")
pos = names[0].split('@')[0]
pos_x = int(pos.split('-')[0])
pos_y = int(pos.split('-')[1])
cv2.imwrite(args.output_dir + '/' + names[0], cv2.resize(imgs[0, :, :, :], (PATCH_SIZE, PATCH_SIZE)))
whole_img[pos_y*PATCH_SIZE:(pos_y+1)*PATCH_SIZE, pos_x*PATCH_SIZE:(pos_x+1)*PATCH_SIZE, :] = \
cv2.resize(imgs[0, :, :, :], (PATCH_SIZE, PATCH_SIZE))
whole_img = whole_img[0:cv2.imread(samples[0]).shape[0], 0:cv2.imread(samples[0]).shape[1]]
cv2.imwrite(args.output_dir + '/' + 'whole_' + names[0], whole_img)
print('[i] All done.')
| ape[0] % PATCH_SIZE + PATCH_SIZE
if not img.shape[1] % PAT | conditional_block |
proxy.go | /*
Copyright 2015 Ian Bishop
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"log"
"net"
"sync"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pcap"
"github.com/vishvananda/netlink"
)
type listener struct {
sync.RWMutex
ifname string
extChan, intChan chan ndp
errChan chan error
ruleNet *net.IPNet
started, finished bool
}
type sessionStatus int
// sessions track clients who've previously sent neighbor solicits
type session struct {
upstream *listener
srcIP, dstIP, target net.IP
status sessionStatus
expiry time.Time
}
type ndp struct {
payload gopacket.Payload
icmp layers.ICMPv6
ip6 layers.IPv6
eth layers.Ethernet
}
const (
waiting sessionStatus = iota
valid
invalid
timeout = time.Duration(500 * time.Millisecond)
ttl = time.Duration(30 * time.Second)
routeCheckInterval = 30
// snaplen should be large enough to capture the layers we're interested in
snaplen = 100
)
var IPV6SolicitedNode = net.IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0xff, 0, 0, 0}
func Proxy(wg *sync.WaitGroup, ifname string, rules []string) {
defer wg.Done()
var err error
upstreams := make(map[string]*listener)
// shared channels upstreams send to
errChan := make(chan error)
intChan := make(chan ndp)
mainExtChan := make(chan ndp)
tickRouteChan := time.NewTicker(time.Second * routeCheckInterval).C
tickSessChan := time.NewTicker(time.Millisecond * 100).C
defer func() {
for _, upstream := range upstreams {
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}()
var sessions []session
// launch handler for main interface 'ifname'
l := &listener{ifname: ifname, intChan: intChan, extChan: mainExtChan, errChan: errChan}
go l.handler()
err = refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
for {
select {
case err = <-errChan:
fmt.Printf("%s\n", err)
return
case n := <-intChan:
sessions = proxyPacket(n, mainExtChan, upstreams, sessions)
case <-tickSessChan:
sessions = updateSessions(sessions)
case <-tickRouteChan:
err := refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
}
}
}
func proxyPacket(n ndp, extChan chan ndp, upstreams map[string]*listener, sessions []session) []session {
var target net.IP
// IPv6 bounds check
if len(n.payload) >= 16 {
target = net.IP(n.payload[:16])
} else {
return sessions
}
switch n.icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborAdvertisement:
for i, s := range sessions {
if s.target.Equal(target) && sessions[i].status == waiting {
vlog.Printf("advert, using existing session for target %s\n", target)
sessions[i].status = valid
sessions[i].expiry = time.Now().Add(ttl)
n.ip6.DstIP = s.srcIP
extChan <- n
return sessions
}
}
case layers.ICMPv6TypeNeighborSolicitation:
if !n.ip6.DstIP.IsMulticast() {
return sessions
}
for _, s := range sessions {
if s.target.Equal(target) {
switch s.status {
case waiting, invalid:
break
case valid:
// swap solicit for advert and send back out main interface
vlog.Printf("solicit, using existing session for target %s\n", target)
n.icmp.TypeCode = layers.CreateICMPv6TypeCode(layers.ICMPv6TypeNeighborAdvertisement, 0)
n.ip6.DstIP = n.ip6.SrcIP
n.ip6.SrcIP = nil
extChan <- n
}
return sessions
}
}
var s *session
// if msg arrived from the main interface, then send to matching upstreams
for _, upstream := range upstreams {
if upstream.ruleNet.Contains(target) {
vlog.Printf("session not found when handling solicit for target %s. Creating new session...\n", net.IP(n.payload[:16]))
s = &session{
upstream: upstream,
srcIP: n.ip6.SrcIP,
dstIP: n.ip6.DstIP,
target: target,
status: waiting,
expiry: time.Now().Add(timeout),
}
}
}
if s != nil {
if !s.upstream.started {
// launch upstream handler
go s.upstream.handler()
}
sessions = append(sessions, *s)
s.upstream.extChan <- n
}
}
return sessions
}
func updateSessions(sessions []session) []session {
for i := len(sessions) - 1; i >= 0; i-- {
if sessions[i].expiry.After(time.Now()) {
continue
}
switch sessions[i].status {
case waiting:
vlog.Printf("set waiting session %d to invalid, target %s", i, sessions[i].target)
sessions[i].status = invalid
sessions[i].expiry = time.Now().Add(ttl)
default:
vlog.Printf("remove session %d, target %s", i, sessions[i].target)
sessions = append(sessions[:i], sessions[i+1:]...)
}
}
return sessions
}
func refreshRoutes(rules []string, intChan chan ndp, errChan chan error, upstreams map[string]*listener) error {
vlog.Println("refreshing routes...")
for _, rule := range rules {
_, ruleNet, err := net.ParseCIDR(rule)
if err != nil {
return fmt.Errorf("invalid rule '%s', %s", rule, err)
}
routes, err := netlink.RouteList(nil, netlink.FAMILY_V6)
if err != nil {
return fmt.Errorf("error enumerating routes, %s", err)
}
var route *netlink.Route
for _, r := range routes {
if r.Dst != nil && r.Dst.Contains(ruleNet.IP) {
route = &r
break
}
}
if route == nil {
// cancel any proxies for removed routes
for _, upstream := range upstreams {
if upstream.ruleNet.IP.Equal(ruleNet.IP) {
log.Printf("route for upstream if %s went away. Removing listener...\n", upstream.ifname)
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}
// route not found, skip
continue
}
links, err := netlink.LinkList()
if err != nil {
return fmt.Errorf("error enumerating links, %s", err)
}
for _, link := range links {
if link.Attrs().Index == route.LinkIndex {
if _, ok := upstreams[link.Attrs().Name]; !ok {
log.Printf("new upstream for link '%s', rule '%s', route '%s'\n", link.Attrs().Name, rule, route.Dst)
upstreams[link.Attrs().Name] = &listener{
ifname: link.Attrs().Name,
extChan: make(chan ndp),
intChan: intChan,
errChan: errChan,
ruleNet: ruleNet,
}
}
}
}
}
for name, listener := range upstreams {
listener.RLock()
if listener.finished {
delete(upstreams, name)
}
listener.RUnlock()
}
return nil
}
func (l *listener) handler() {
var err error
var handle *pcap.Handle
log.Printf("spawning listener for if %s\n", l.ifname)
l.Lock()
l.started = true
l.Unlock()
defer func() {
if err != nil {
l.errChan <- err
}
l.Lock()
l.finished = true
l.Unlock()
log.Printf("exiting listener for if %s\n", l.ifname)
}()
// open interface in promiscuous mode in order to pickup solicited-node multicasts
handle, err = pcap.OpenLive(l.ifname, snaplen, true, pcap.BlockForever)
if err != nil {
err = fmt.Errorf("pcap open error: %s", err)
return
}
defer handle.Close()
// limit captured packets to icmp6
err = handle.SetBPFFilter("icmp6")
if err != nil {
return
}
var iface *net.Interface
iface, err = net.InterfaceByName(l.ifname)
if err != nil {
return
}
var addrs []net.Addr
var linklocal net.IP
addrs, err = iface.Addrs()
if err != nil {
return
}
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
if v.IP.IsLinkLocalUnicast() {
linklocal = v.IP
break
}
}
}
if linklocal.IsUnspecified() {
err = fmt.Errorf("error finding link local unicast address for if %s", l.ifname)
return
}
var eth layers.Ethernet
var ip6 layers.IPv6
var ip6extensions layers.IPv6ExtensionSkipper
var icmp layers.ICMPv6
var payload gopacket.Payload
decoded := []gopacket.LayerType{}
parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip6, &ip6extensions, &icmp, &payload)
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
packetsChan := packetSource.Packets()
for {
select {
case packet := <-packetsChan:
parser.DecodeLayers(packet.Data(), &decoded)
for _, layerType := range decoded {
switch layerType {
case layers.LayerTypeICMPv6:
var target net.IP
// IPv6 bounds check
if len(payload) >= 16 {
target = net.IP(payload[:16])
} else {
continue
}
switch icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborSolicitation, layers.ICMPv6TypeNeighborAdvertisement:
n := ndp{eth: eth, ip6: ip6, icmp: icmp, payload: payload}
vlog.Printf("%s\tread\t%s\tmac_src %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, icmp.TypeCode, eth.SrcMAC, ip6.SrcIP, ip6.DstIP, target)
l.intChan <- n
}
}
}
case n, ok := <-l.extChan:
if !ok {
// channel was closed
return
}
n.eth.DstMAC = nil
if n.ip6.DstIP.IsLinkLocalMulticast() {
// Ethernet MAC is derived by the four low-order octets of IPv6 address
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
} else {
var neighbors []netlink.Neigh
neighbors, err = netlink.NeighList(iface.Index, netlink.FAMILY_V6)
if err != nil {
return
}
for _, neighbor := range neighbors {
if neighbor.IP.Equal(n.ip6.DstIP) {
n.eth.DstMAC = neighbor.HardwareAddr
break
}
}
}
if n.eth.DstMAC == nil {
vlog.Printf("%s: could not find destination MAC address. %s mac_src %s ip6_dst %s ip6_src %s target %s", l.ifname, n.icmp.TypeCode, n.eth.SrcMAC, n.ip6.DstIP, n.ip6.SrcIP, net.IP(n.payload[:16]))
// Try Solicited-Node multicast address
// dst IP is derived by the first 13 octets of multicast address +
// last 3 octets of dst IP
n.ip6.DstIP = append(IPV6SolicitedNode[:13], n.ip6.DstIP[13:]...)
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
}
n.eth.SrcMAC = iface.HardwareAddr
n.ip6.SrcIP = linklocal
buf := gopacket.NewSerializeBuffer()
n.icmp.SetNetworkLayerForChecksum(&n.ip6)
opts := gopacket.SerializeOptions{ComputeChecksums: true}
switch n.icmp.TypeCode.Type() { | case layers.ICMPv6TypeNeighborAdvertisement:
// target link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x02, 0x01)
n.icmp.TypeBytes[0] = 0xc0 // router,solicit,override flags
}
n.payload = append(n.payload[:18], iface.HardwareAddr...)
err = gopacket.SerializeLayers(buf, opts, &n.eth, &n.ip6, &n.icmp, &n.payload)
if err != nil {
err = fmt.Errorf("serialize layers error: %s", err)
return
}
err = handle.WritePacketData(buf.Bytes())
if err != nil {
err = fmt.Errorf("pcap write error: %s", err)
return
}
vlog.Printf("%s\twrite\t%s\tmac_dst %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, n.icmp.TypeCode, n.eth.DstMAC, n.ip6.SrcIP, n.ip6.DstIP, net.IP(n.payload[:16]))
}
}
} | case layers.ICMPv6TypeNeighborSolicitation:
// source link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x01, 0x01) | random_line_split |
proxy.go | /*
Copyright 2015 Ian Bishop
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"log"
"net"
"sync"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pcap"
"github.com/vishvananda/netlink"
)
type listener struct {
sync.RWMutex
ifname string
extChan, intChan chan ndp
errChan chan error
ruleNet *net.IPNet
started, finished bool
}
type sessionStatus int
// sessions track clients who've previously sent neighbor solicits
type session struct {
upstream *listener
srcIP, dstIP, target net.IP
status sessionStatus
expiry time.Time
}
type ndp struct {
payload gopacket.Payload
icmp layers.ICMPv6
ip6 layers.IPv6
eth layers.Ethernet
}
const (
waiting sessionStatus = iota
valid
invalid
timeout = time.Duration(500 * time.Millisecond)
ttl = time.Duration(30 * time.Second)
routeCheckInterval = 30
// snaplen should be large enough to capture the layers we're interested in
snaplen = 100
)
var IPV6SolicitedNode = net.IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0xff, 0, 0, 0}
func Proxy(wg *sync.WaitGroup, ifname string, rules []string) {
defer wg.Done()
var err error
upstreams := make(map[string]*listener)
// shared channels upstreams send to
errChan := make(chan error)
intChan := make(chan ndp)
mainExtChan := make(chan ndp)
tickRouteChan := time.NewTicker(time.Second * routeCheckInterval).C
tickSessChan := time.NewTicker(time.Millisecond * 100).C
defer func() {
for _, upstream := range upstreams {
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}()
var sessions []session
// launch handler for main interface 'ifname'
l := &listener{ifname: ifname, intChan: intChan, extChan: mainExtChan, errChan: errChan}
go l.handler()
err = refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
for {
select {
case err = <-errChan:
fmt.Printf("%s\n", err)
return
case n := <-intChan:
sessions = proxyPacket(n, mainExtChan, upstreams, sessions)
case <-tickSessChan:
sessions = updateSessions(sessions)
case <-tickRouteChan:
err := refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
}
}
}
func proxyPacket(n ndp, extChan chan ndp, upstreams map[string]*listener, sessions []session) []session {
var target net.IP
// IPv6 bounds check
if len(n.payload) >= 16 {
target = net.IP(n.payload[:16])
} else {
return sessions
}
switch n.icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborAdvertisement:
for i, s := range sessions {
if s.target.Equal(target) && sessions[i].status == waiting {
vlog.Printf("advert, using existing session for target %s\n", target)
sessions[i].status = valid
sessions[i].expiry = time.Now().Add(ttl)
n.ip6.DstIP = s.srcIP
extChan <- n
return sessions
}
}
case layers.ICMPv6TypeNeighborSolicitation:
if !n.ip6.DstIP.IsMulticast() {
return sessions
}
for _, s := range sessions {
if s.target.Equal(target) {
switch s.status {
case waiting, invalid:
break
case valid:
// swap solicit for advert and send back out main interface
vlog.Printf("solicit, using existing session for target %s\n", target)
n.icmp.TypeCode = layers.CreateICMPv6TypeCode(layers.ICMPv6TypeNeighborAdvertisement, 0)
n.ip6.DstIP = n.ip6.SrcIP
n.ip6.SrcIP = nil
extChan <- n
}
return sessions
}
}
var s *session
// if msg arrived from the main interface, then send to matching upstreams
for _, upstream := range upstreams {
if upstream.ruleNet.Contains(target) {
vlog.Printf("session not found when handling solicit for target %s. Creating new session...\n", net.IP(n.payload[:16]))
s = &session{
upstream: upstream,
srcIP: n.ip6.SrcIP,
dstIP: n.ip6.DstIP,
target: target,
status: waiting,
expiry: time.Now().Add(timeout),
}
}
}
if s != nil {
if !s.upstream.started {
// launch upstream handler
go s.upstream.handler()
}
sessions = append(sessions, *s)
s.upstream.extChan <- n
}
}
return sessions
}
func updateSessions(sessions []session) []session |
func refreshRoutes(rules []string, intChan chan ndp, errChan chan error, upstreams map[string]*listener) error {
vlog.Println("refreshing routes...")
for _, rule := range rules {
_, ruleNet, err := net.ParseCIDR(rule)
if err != nil {
return fmt.Errorf("invalid rule '%s', %s", rule, err)
}
routes, err := netlink.RouteList(nil, netlink.FAMILY_V6)
if err != nil {
return fmt.Errorf("error enumerating routes, %s", err)
}
var route *netlink.Route
for _, r := range routes {
if r.Dst != nil && r.Dst.Contains(ruleNet.IP) {
route = &r
break
}
}
if route == nil {
// cancel any proxies for removed routes
for _, upstream := range upstreams {
if upstream.ruleNet.IP.Equal(ruleNet.IP) {
log.Printf("route for upstream if %s went away. Removing listener...\n", upstream.ifname)
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}
// route not found, skip
continue
}
links, err := netlink.LinkList()
if err != nil {
return fmt.Errorf("error enumerating links, %s", err)
}
for _, link := range links {
if link.Attrs().Index == route.LinkIndex {
if _, ok := upstreams[link.Attrs().Name]; !ok {
log.Printf("new upstream for link '%s', rule '%s', route '%s'\n", link.Attrs().Name, rule, route.Dst)
upstreams[link.Attrs().Name] = &listener{
ifname: link.Attrs().Name,
extChan: make(chan ndp),
intChan: intChan,
errChan: errChan,
ruleNet: ruleNet,
}
}
}
}
}
for name, listener := range upstreams {
listener.RLock()
if listener.finished {
delete(upstreams, name)
}
listener.RUnlock()
}
return nil
}
func (l *listener) handler() {
var err error
var handle *pcap.Handle
log.Printf("spawning listener for if %s\n", l.ifname)
l.Lock()
l.started = true
l.Unlock()
defer func() {
if err != nil {
l.errChan <- err
}
l.Lock()
l.finished = true
l.Unlock()
log.Printf("exiting listener for if %s\n", l.ifname)
}()
// open interface in promiscuous mode in order to pickup solicited-node multicasts
handle, err = pcap.OpenLive(l.ifname, snaplen, true, pcap.BlockForever)
if err != nil {
err = fmt.Errorf("pcap open error: %s", err)
return
}
defer handle.Close()
// limit captured packets to icmp6
err = handle.SetBPFFilter("icmp6")
if err != nil {
return
}
var iface *net.Interface
iface, err = net.InterfaceByName(l.ifname)
if err != nil {
return
}
var addrs []net.Addr
var linklocal net.IP
addrs, err = iface.Addrs()
if err != nil {
return
}
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
if v.IP.IsLinkLocalUnicast() {
linklocal = v.IP
break
}
}
}
if linklocal.IsUnspecified() {
err = fmt.Errorf("error finding link local unicast address for if %s", l.ifname)
return
}
var eth layers.Ethernet
var ip6 layers.IPv6
var ip6extensions layers.IPv6ExtensionSkipper
var icmp layers.ICMPv6
var payload gopacket.Payload
decoded := []gopacket.LayerType{}
parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip6, &ip6extensions, &icmp, &payload)
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
packetsChan := packetSource.Packets()
for {
select {
case packet := <-packetsChan:
parser.DecodeLayers(packet.Data(), &decoded)
for _, layerType := range decoded {
switch layerType {
case layers.LayerTypeICMPv6:
var target net.IP
// IPv6 bounds check
if len(payload) >= 16 {
target = net.IP(payload[:16])
} else {
continue
}
switch icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborSolicitation, layers.ICMPv6TypeNeighborAdvertisement:
n := ndp{eth: eth, ip6: ip6, icmp: icmp, payload: payload}
vlog.Printf("%s\tread\t%s\tmac_src %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, icmp.TypeCode, eth.SrcMAC, ip6.SrcIP, ip6.DstIP, target)
l.intChan <- n
}
}
}
case n, ok := <-l.extChan:
if !ok {
// channel was closed
return
}
n.eth.DstMAC = nil
if n.ip6.DstIP.IsLinkLocalMulticast() {
// Ethernet MAC is derived by the four low-order octets of IPv6 address
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
} else {
var neighbors []netlink.Neigh
neighbors, err = netlink.NeighList(iface.Index, netlink.FAMILY_V6)
if err != nil {
return
}
for _, neighbor := range neighbors {
if neighbor.IP.Equal(n.ip6.DstIP) {
n.eth.DstMAC = neighbor.HardwareAddr
break
}
}
}
if n.eth.DstMAC == nil {
vlog.Printf("%s: could not find destination MAC address. %s mac_src %s ip6_dst %s ip6_src %s target %s", l.ifname, n.icmp.TypeCode, n.eth.SrcMAC, n.ip6.DstIP, n.ip6.SrcIP, net.IP(n.payload[:16]))
// Try Solicited-Node multicast address
// dst IP is derived by the first 13 octets of multicast address +
// last 3 octets of dst IP
n.ip6.DstIP = append(IPV6SolicitedNode[:13], n.ip6.DstIP[13:]...)
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
}
n.eth.SrcMAC = iface.HardwareAddr
n.ip6.SrcIP = linklocal
buf := gopacket.NewSerializeBuffer()
n.icmp.SetNetworkLayerForChecksum(&n.ip6)
opts := gopacket.SerializeOptions{ComputeChecksums: true}
switch n.icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborSolicitation:
// source link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x01, 0x01)
case layers.ICMPv6TypeNeighborAdvertisement:
// target link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x02, 0x01)
n.icmp.TypeBytes[0] = 0xc0 // router,solicit,override flags
}
n.payload = append(n.payload[:18], iface.HardwareAddr...)
err = gopacket.SerializeLayers(buf, opts, &n.eth, &n.ip6, &n.icmp, &n.payload)
if err != nil {
err = fmt.Errorf("serialize layers error: %s", err)
return
}
err = handle.WritePacketData(buf.Bytes())
if err != nil {
err = fmt.Errorf("pcap write error: %s", err)
return
}
vlog.Printf("%s\twrite\t%s\tmac_dst %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, n.icmp.TypeCode, n.eth.DstMAC, n.ip6.SrcIP, n.ip6.DstIP, net.IP(n.payload[:16]))
}
}
}
| {
for i := len(sessions) - 1; i >= 0; i-- {
if sessions[i].expiry.After(time.Now()) {
continue
}
switch sessions[i].status {
case waiting:
vlog.Printf("set waiting session %d to invalid, target %s", i, sessions[i].target)
sessions[i].status = invalid
sessions[i].expiry = time.Now().Add(ttl)
default:
vlog.Printf("remove session %d, target %s", i, sessions[i].target)
sessions = append(sessions[:i], sessions[i+1:]...)
}
}
return sessions
} | identifier_body |
proxy.go | /*
Copyright 2015 Ian Bishop
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"log"
"net"
"sync"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pcap"
"github.com/vishvananda/netlink"
)
type listener struct {
sync.RWMutex
ifname string
extChan, intChan chan ndp
errChan chan error
ruleNet *net.IPNet
started, finished bool
}
type sessionStatus int
// sessions track clients who've previously sent neighbor solicits
type session struct {
upstream *listener
srcIP, dstIP, target net.IP
status sessionStatus
expiry time.Time
}
type ndp struct {
payload gopacket.Payload
icmp layers.ICMPv6
ip6 layers.IPv6
eth layers.Ethernet
}
const (
waiting sessionStatus = iota
valid
invalid
timeout = time.Duration(500 * time.Millisecond)
ttl = time.Duration(30 * time.Second)
routeCheckInterval = 30
// snaplen should be large enough to capture the layers we're interested in
snaplen = 100
)
var IPV6SolicitedNode = net.IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0xff, 0, 0, 0}
func Proxy(wg *sync.WaitGroup, ifname string, rules []string) {
defer wg.Done()
var err error
upstreams := make(map[string]*listener)
// shared channels upstreams send to
errChan := make(chan error)
intChan := make(chan ndp)
mainExtChan := make(chan ndp)
tickRouteChan := time.NewTicker(time.Second * routeCheckInterval).C
tickSessChan := time.NewTicker(time.Millisecond * 100).C
defer func() {
for _, upstream := range upstreams {
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}()
var sessions []session
// launch handler for main interface 'ifname'
l := &listener{ifname: ifname, intChan: intChan, extChan: mainExtChan, errChan: errChan}
go l.handler()
err = refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
for |
}
func proxyPacket(n ndp, extChan chan ndp, upstreams map[string]*listener, sessions []session) []session {
var target net.IP
// IPv6 bounds check
if len(n.payload) >= 16 {
target = net.IP(n.payload[:16])
} else {
return sessions
}
switch n.icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborAdvertisement:
for i, s := range sessions {
if s.target.Equal(target) && sessions[i].status == waiting {
vlog.Printf("advert, using existing session for target %s\n", target)
sessions[i].status = valid
sessions[i].expiry = time.Now().Add(ttl)
n.ip6.DstIP = s.srcIP
extChan <- n
return sessions
}
}
case layers.ICMPv6TypeNeighborSolicitation:
if !n.ip6.DstIP.IsMulticast() {
return sessions
}
for _, s := range sessions {
if s.target.Equal(target) {
switch s.status {
case waiting, invalid:
break
case valid:
// swap solicit for advert and send back out main interface
vlog.Printf("solicit, using existing session for target %s\n", target)
n.icmp.TypeCode = layers.CreateICMPv6TypeCode(layers.ICMPv6TypeNeighborAdvertisement, 0)
n.ip6.DstIP = n.ip6.SrcIP
n.ip6.SrcIP = nil
extChan <- n
}
return sessions
}
}
var s *session
// if msg arrived from the main interface, then send to matching upstreams
for _, upstream := range upstreams {
if upstream.ruleNet.Contains(target) {
vlog.Printf("session not found when handling solicit for target %s. Creating new session...\n", net.IP(n.payload[:16]))
s = &session{
upstream: upstream,
srcIP: n.ip6.SrcIP,
dstIP: n.ip6.DstIP,
target: target,
status: waiting,
expiry: time.Now().Add(timeout),
}
}
}
if s != nil {
if !s.upstream.started {
// launch upstream handler
go s.upstream.handler()
}
sessions = append(sessions, *s)
s.upstream.extChan <- n
}
}
return sessions
}
func updateSessions(sessions []session) []session {
for i := len(sessions) - 1; i >= 0; i-- {
if sessions[i].expiry.After(time.Now()) {
continue
}
switch sessions[i].status {
case waiting:
vlog.Printf("set waiting session %d to invalid, target %s", i, sessions[i].target)
sessions[i].status = invalid
sessions[i].expiry = time.Now().Add(ttl)
default:
vlog.Printf("remove session %d, target %s", i, sessions[i].target)
sessions = append(sessions[:i], sessions[i+1:]...)
}
}
return sessions
}
func refreshRoutes(rules []string, intChan chan ndp, errChan chan error, upstreams map[string]*listener) error {
vlog.Println("refreshing routes...")
for _, rule := range rules {
_, ruleNet, err := net.ParseCIDR(rule)
if err != nil {
return fmt.Errorf("invalid rule '%s', %s", rule, err)
}
routes, err := netlink.RouteList(nil, netlink.FAMILY_V6)
if err != nil {
return fmt.Errorf("error enumerating routes, %s", err)
}
var route *netlink.Route
for _, r := range routes {
if r.Dst != nil && r.Dst.Contains(ruleNet.IP) {
route = &r
break
}
}
if route == nil {
// cancel any proxies for removed routes
for _, upstream := range upstreams {
if upstream.ruleNet.IP.Equal(ruleNet.IP) {
log.Printf("route for upstream if %s went away. Removing listener...\n", upstream.ifname)
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}
// route not found, skip
continue
}
links, err := netlink.LinkList()
if err != nil {
return fmt.Errorf("error enumerating links, %s", err)
}
for _, link := range links {
if link.Attrs().Index == route.LinkIndex {
if _, ok := upstreams[link.Attrs().Name]; !ok {
log.Printf("new upstream for link '%s', rule '%s', route '%s'\n", link.Attrs().Name, rule, route.Dst)
upstreams[link.Attrs().Name] = &listener{
ifname: link.Attrs().Name,
extChan: make(chan ndp),
intChan: intChan,
errChan: errChan,
ruleNet: ruleNet,
}
}
}
}
}
for name, listener := range upstreams {
listener.RLock()
if listener.finished {
delete(upstreams, name)
}
listener.RUnlock()
}
return nil
}
func (l *listener) handler() {
var err error
var handle *pcap.Handle
log.Printf("spawning listener for if %s\n", l.ifname)
l.Lock()
l.started = true
l.Unlock()
defer func() {
if err != nil {
l.errChan <- err
}
l.Lock()
l.finished = true
l.Unlock()
log.Printf("exiting listener for if %s\n", l.ifname)
}()
// open interface in promiscuous mode in order to pickup solicited-node multicasts
handle, err = pcap.OpenLive(l.ifname, snaplen, true, pcap.BlockForever)
if err != nil {
err = fmt.Errorf("pcap open error: %s", err)
return
}
defer handle.Close()
// limit captured packets to icmp6
err = handle.SetBPFFilter("icmp6")
if err != nil {
return
}
var iface *net.Interface
iface, err = net.InterfaceByName(l.ifname)
if err != nil {
return
}
var addrs []net.Addr
var linklocal net.IP
addrs, err = iface.Addrs()
if err != nil {
return
}
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
if v.IP.IsLinkLocalUnicast() {
linklocal = v.IP
break
}
}
}
if linklocal.IsUnspecified() {
err = fmt.Errorf("error finding link local unicast address for if %s", l.ifname)
return
}
var eth layers.Ethernet
var ip6 layers.IPv6
var ip6extensions layers.IPv6ExtensionSkipper
var icmp layers.ICMPv6
var payload gopacket.Payload
decoded := []gopacket.LayerType{}
parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip6, &ip6extensions, &icmp, &payload)
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
packetsChan := packetSource.Packets()
for {
select {
case packet := <-packetsChan:
parser.DecodeLayers(packet.Data(), &decoded)
for _, layerType := range decoded {
switch layerType {
case layers.LayerTypeICMPv6:
var target net.IP
// IPv6 bounds check
if len(payload) >= 16 {
target = net.IP(payload[:16])
} else {
continue
}
switch icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborSolicitation, layers.ICMPv6TypeNeighborAdvertisement:
n := ndp{eth: eth, ip6: ip6, icmp: icmp, payload: payload}
vlog.Printf("%s\tread\t%s\tmac_src %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, icmp.TypeCode, eth.SrcMAC, ip6.SrcIP, ip6.DstIP, target)
l.intChan <- n
}
}
}
case n, ok := <-l.extChan:
if !ok {
// channel was closed
return
}
n.eth.DstMAC = nil
if n.ip6.DstIP.IsLinkLocalMulticast() {
// Ethernet MAC is derived by the four low-order octets of IPv6 address
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
} else {
var neighbors []netlink.Neigh
neighbors, err = netlink.NeighList(iface.Index, netlink.FAMILY_V6)
if err != nil {
return
}
for _, neighbor := range neighbors {
if neighbor.IP.Equal(n.ip6.DstIP) {
n.eth.DstMAC = neighbor.HardwareAddr
break
}
}
}
if n.eth.DstMAC == nil {
vlog.Printf("%s: could not find destination MAC address. %s mac_src %s ip6_dst %s ip6_src %s target %s", l.ifname, n.icmp.TypeCode, n.eth.SrcMAC, n.ip6.DstIP, n.ip6.SrcIP, net.IP(n.payload[:16]))
// Try Solicited-Node multicast address
// dst IP is derived by the first 13 octets of multicast address +
// last 3 octets of dst IP
n.ip6.DstIP = append(IPV6SolicitedNode[:13], n.ip6.DstIP[13:]...)
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
}
n.eth.SrcMAC = iface.HardwareAddr
n.ip6.SrcIP = linklocal
buf := gopacket.NewSerializeBuffer()
n.icmp.SetNetworkLayerForChecksum(&n.ip6)
opts := gopacket.SerializeOptions{ComputeChecksums: true}
switch n.icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborSolicitation:
// source link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x01, 0x01)
case layers.ICMPv6TypeNeighborAdvertisement:
// target link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x02, 0x01)
n.icmp.TypeBytes[0] = 0xc0 // router,solicit,override flags
}
n.payload = append(n.payload[:18], iface.HardwareAddr...)
err = gopacket.SerializeLayers(buf, opts, &n.eth, &n.ip6, &n.icmp, &n.payload)
if err != nil {
err = fmt.Errorf("serialize layers error: %s", err)
return
}
err = handle.WritePacketData(buf.Bytes())
if err != nil {
err = fmt.Errorf("pcap write error: %s", err)
return
}
vlog.Printf("%s\twrite\t%s\tmac_dst %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, n.icmp.TypeCode, n.eth.DstMAC, n.ip6.SrcIP, n.ip6.DstIP, net.IP(n.payload[:16]))
}
}
}
| {
select {
case err = <-errChan:
fmt.Printf("%s\n", err)
return
case n := <-intChan:
sessions = proxyPacket(n, mainExtChan, upstreams, sessions)
case <-tickSessChan:
sessions = updateSessions(sessions)
case <-tickRouteChan:
err := refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
}
} | conditional_block |
proxy.go | /*
Copyright 2015 Ian Bishop
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"log"
"net"
"sync"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pcap"
"github.com/vishvananda/netlink"
)
type listener struct {
sync.RWMutex
ifname string
extChan, intChan chan ndp
errChan chan error
ruleNet *net.IPNet
started, finished bool
}
type sessionStatus int
// sessions track clients who've previously sent neighbor solicits
type session struct {
upstream *listener
srcIP, dstIP, target net.IP
status sessionStatus
expiry time.Time
}
type ndp struct {
payload gopacket.Payload
icmp layers.ICMPv6
ip6 layers.IPv6
eth layers.Ethernet
}
const (
waiting sessionStatus = iota
valid
invalid
timeout = time.Duration(500 * time.Millisecond)
ttl = time.Duration(30 * time.Second)
routeCheckInterval = 30
// snaplen should be large enough to capture the layers we're interested in
snaplen = 100
)
var IPV6SolicitedNode = net.IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0xff, 0, 0, 0}
func Proxy(wg *sync.WaitGroup, ifname string, rules []string) {
defer wg.Done()
var err error
upstreams := make(map[string]*listener)
// shared channels upstreams send to
errChan := make(chan error)
intChan := make(chan ndp)
mainExtChan := make(chan ndp)
tickRouteChan := time.NewTicker(time.Second * routeCheckInterval).C
tickSessChan := time.NewTicker(time.Millisecond * 100).C
defer func() {
for _, upstream := range upstreams {
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}()
var sessions []session
// launch handler for main interface 'ifname'
l := &listener{ifname: ifname, intChan: intChan, extChan: mainExtChan, errChan: errChan}
go l.handler()
err = refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
for {
select {
case err = <-errChan:
fmt.Printf("%s\n", err)
return
case n := <-intChan:
sessions = proxyPacket(n, mainExtChan, upstreams, sessions)
case <-tickSessChan:
sessions = updateSessions(sessions)
case <-tickRouteChan:
err := refreshRoutes(rules, intChan, errChan, upstreams)
if err != nil {
fmt.Printf("%s\n", err)
return
}
}
}
}
func proxyPacket(n ndp, extChan chan ndp, upstreams map[string]*listener, sessions []session) []session {
var target net.IP
// IPv6 bounds check
if len(n.payload) >= 16 {
target = net.IP(n.payload[:16])
} else {
return sessions
}
switch n.icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborAdvertisement:
for i, s := range sessions {
if s.target.Equal(target) && sessions[i].status == waiting {
vlog.Printf("advert, using existing session for target %s\n", target)
sessions[i].status = valid
sessions[i].expiry = time.Now().Add(ttl)
n.ip6.DstIP = s.srcIP
extChan <- n
return sessions
}
}
case layers.ICMPv6TypeNeighborSolicitation:
if !n.ip6.DstIP.IsMulticast() {
return sessions
}
for _, s := range sessions {
if s.target.Equal(target) {
switch s.status {
case waiting, invalid:
break
case valid:
// swap solicit for advert and send back out main interface
vlog.Printf("solicit, using existing session for target %s\n", target)
n.icmp.TypeCode = layers.CreateICMPv6TypeCode(layers.ICMPv6TypeNeighborAdvertisement, 0)
n.ip6.DstIP = n.ip6.SrcIP
n.ip6.SrcIP = nil
extChan <- n
}
return sessions
}
}
var s *session
// if msg arrived from the main interface, then send to matching upstreams
for _, upstream := range upstreams {
if upstream.ruleNet.Contains(target) {
vlog.Printf("session not found when handling solicit for target %s. Creating new session...\n", net.IP(n.payload[:16]))
s = &session{
upstream: upstream,
srcIP: n.ip6.SrcIP,
dstIP: n.ip6.DstIP,
target: target,
status: waiting,
expiry: time.Now().Add(timeout),
}
}
}
if s != nil {
if !s.upstream.started {
// launch upstream handler
go s.upstream.handler()
}
sessions = append(sessions, *s)
s.upstream.extChan <- n
}
}
return sessions
}
func updateSessions(sessions []session) []session {
for i := len(sessions) - 1; i >= 0; i-- {
if sessions[i].expiry.After(time.Now()) {
continue
}
switch sessions[i].status {
case waiting:
vlog.Printf("set waiting session %d to invalid, target %s", i, sessions[i].target)
sessions[i].status = invalid
sessions[i].expiry = time.Now().Add(ttl)
default:
vlog.Printf("remove session %d, target %s", i, sessions[i].target)
sessions = append(sessions[:i], sessions[i+1:]...)
}
}
return sessions
}
func refreshRoutes(rules []string, intChan chan ndp, errChan chan error, upstreams map[string]*listener) error {
vlog.Println("refreshing routes...")
for _, rule := range rules {
_, ruleNet, err := net.ParseCIDR(rule)
if err != nil {
return fmt.Errorf("invalid rule '%s', %s", rule, err)
}
routes, err := netlink.RouteList(nil, netlink.FAMILY_V6)
if err != nil {
return fmt.Errorf("error enumerating routes, %s", err)
}
var route *netlink.Route
for _, r := range routes {
if r.Dst != nil && r.Dst.Contains(ruleNet.IP) {
route = &r
break
}
}
if route == nil {
// cancel any proxies for removed routes
for _, upstream := range upstreams {
if upstream.ruleNet.IP.Equal(ruleNet.IP) {
log.Printf("route for upstream if %s went away. Removing listener...\n", upstream.ifname)
close(upstream.extChan)
delete(upstreams, upstream.ifname)
}
}
// route not found, skip
continue
}
links, err := netlink.LinkList()
if err != nil {
return fmt.Errorf("error enumerating links, %s", err)
}
for _, link := range links {
if link.Attrs().Index == route.LinkIndex {
if _, ok := upstreams[link.Attrs().Name]; !ok {
log.Printf("new upstream for link '%s', rule '%s', route '%s'\n", link.Attrs().Name, rule, route.Dst)
upstreams[link.Attrs().Name] = &listener{
ifname: link.Attrs().Name,
extChan: make(chan ndp),
intChan: intChan,
errChan: errChan,
ruleNet: ruleNet,
}
}
}
}
}
for name, listener := range upstreams {
listener.RLock()
if listener.finished {
delete(upstreams, name)
}
listener.RUnlock()
}
return nil
}
func (l *listener) | () {
var err error
var handle *pcap.Handle
log.Printf("spawning listener for if %s\n", l.ifname)
l.Lock()
l.started = true
l.Unlock()
defer func() {
if err != nil {
l.errChan <- err
}
l.Lock()
l.finished = true
l.Unlock()
log.Printf("exiting listener for if %s\n", l.ifname)
}()
// open interface in promiscuous mode in order to pickup solicited-node multicasts
handle, err = pcap.OpenLive(l.ifname, snaplen, true, pcap.BlockForever)
if err != nil {
err = fmt.Errorf("pcap open error: %s", err)
return
}
defer handle.Close()
// limit captured packets to icmp6
err = handle.SetBPFFilter("icmp6")
if err != nil {
return
}
var iface *net.Interface
iface, err = net.InterfaceByName(l.ifname)
if err != nil {
return
}
var addrs []net.Addr
var linklocal net.IP
addrs, err = iface.Addrs()
if err != nil {
return
}
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
if v.IP.IsLinkLocalUnicast() {
linklocal = v.IP
break
}
}
}
if linklocal.IsUnspecified() {
err = fmt.Errorf("error finding link local unicast address for if %s", l.ifname)
return
}
var eth layers.Ethernet
var ip6 layers.IPv6
var ip6extensions layers.IPv6ExtensionSkipper
var icmp layers.ICMPv6
var payload gopacket.Payload
decoded := []gopacket.LayerType{}
parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip6, &ip6extensions, &icmp, &payload)
packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
packetsChan := packetSource.Packets()
for {
select {
case packet := <-packetsChan:
parser.DecodeLayers(packet.Data(), &decoded)
for _, layerType := range decoded {
switch layerType {
case layers.LayerTypeICMPv6:
var target net.IP
// IPv6 bounds check
if len(payload) >= 16 {
target = net.IP(payload[:16])
} else {
continue
}
switch icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborSolicitation, layers.ICMPv6TypeNeighborAdvertisement:
n := ndp{eth: eth, ip6: ip6, icmp: icmp, payload: payload}
vlog.Printf("%s\tread\t%s\tmac_src %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, icmp.TypeCode, eth.SrcMAC, ip6.SrcIP, ip6.DstIP, target)
l.intChan <- n
}
}
}
case n, ok := <-l.extChan:
if !ok {
// channel was closed
return
}
n.eth.DstMAC = nil
if n.ip6.DstIP.IsLinkLocalMulticast() {
// Ethernet MAC is derived by the four low-order octets of IPv6 address
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
} else {
var neighbors []netlink.Neigh
neighbors, err = netlink.NeighList(iface.Index, netlink.FAMILY_V6)
if err != nil {
return
}
for _, neighbor := range neighbors {
if neighbor.IP.Equal(n.ip6.DstIP) {
n.eth.DstMAC = neighbor.HardwareAddr
break
}
}
}
if n.eth.DstMAC == nil {
vlog.Printf("%s: could not find destination MAC address. %s mac_src %s ip6_dst %s ip6_src %s target %s", l.ifname, n.icmp.TypeCode, n.eth.SrcMAC, n.ip6.DstIP, n.ip6.SrcIP, net.IP(n.payload[:16]))
// Try Solicited-Node multicast address
// dst IP is derived by the first 13 octets of multicast address +
// last 3 octets of dst IP
n.ip6.DstIP = append(IPV6SolicitedNode[:13], n.ip6.DstIP[13:]...)
n.eth.DstMAC = append(net.HardwareAddr{0x33, 0x33}, n.ip6.DstIP[12:]...)
}
n.eth.SrcMAC = iface.HardwareAddr
n.ip6.SrcIP = linklocal
buf := gopacket.NewSerializeBuffer()
n.icmp.SetNetworkLayerForChecksum(&n.ip6)
opts := gopacket.SerializeOptions{ComputeChecksums: true}
switch n.icmp.TypeCode.Type() {
case layers.ICMPv6TypeNeighborSolicitation:
// source link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x01, 0x01)
case layers.ICMPv6TypeNeighborAdvertisement:
// target link-layer address opt type, opt length
n.payload = append(n.payload[:16], 0x02, 0x01)
n.icmp.TypeBytes[0] = 0xc0 // router,solicit,override flags
}
n.payload = append(n.payload[:18], iface.HardwareAddr...)
err = gopacket.SerializeLayers(buf, opts, &n.eth, &n.ip6, &n.icmp, &n.payload)
if err != nil {
err = fmt.Errorf("serialize layers error: %s", err)
return
}
err = handle.WritePacketData(buf.Bytes())
if err != nil {
err = fmt.Errorf("pcap write error: %s", err)
return
}
vlog.Printf("%s\twrite\t%s\tmac_dst %s\tip6_src %s\tip6_dst %s\ttarget %s\n", l.ifname, n.icmp.TypeCode, n.eth.DstMAC, n.ip6.SrcIP, n.ip6.DstIP, net.IP(n.payload[:16]))
}
}
}
| handler | identifier_name |
piston.rs | // Copyright 2014 Dawid Ciężarkiewicz
// See LICENSE file for more information
use cgmath;
use cgmath::FixedArray;
use cgmath::{Matrix, Matrix4, Matrix3, Point3, Vector3, Vector4, ToMatrix4};
use cgmath::rad;
use cgmath::Point as CgPoint;
use cgmath::{Transform, AffineMatrix3};
use cgmath::Vector;
use creature::Creature;
use creature::{Grunt, Scout, Heavy, Human};
use device;
use device::draw::CommandBuffer;
use gfx::GlCommandBuffer;
use gfx::GlDevice;
use game::Action;
use game::GameState;
use game::{Run, Move, Turn, Melee, Wait};
use gfx;
use gfx::{Device, DeviceHelper};
use hex2d::{Forward, Backward, Left, Right, Direction, AbsoluteDirection};
use hex2d::{North, Position, Point};
use input::keyboard as key;
use map::{Wall, Sand, GlassWall, Floor};
use std;
use glfw_window::GlfwWindow as Window;
use std::collections::{RingBuf};
use std::num::{zero, one};
use time;
use obj;
use genmesh;
use genmesh::Indexer;
use shader_version;
use gfx::ToSlice;
use piston::{
Events,
Render,
Update,
Input,
WindowSettings,
};
use current::Set;
use event::{
Ups, MaxFps,
};
use piston::input::{
InputEvent,
Press,
Release,
Keyboard,
};
use std::mem::size_of;
#[vertex_format]
struct Vertex {
#[as_float]
#[name = "a_Pos"]
pos: [f32, ..3],
#[as_float]
#[name = "a_Normal"]
normal: [f32, ..3],
}
impl std::cmp::PartialEq for Vertex {
fn eq(&self, other: &Vertex) -> bool {
self.pos.as_slice() == other.pos.as_slice() &&
self.normal.as_slice() == other.normal.as_slice()
}
}
impl std::clone::Clone for Vertex {
fn clone(&self) -> Vertex {
Vertex {
pos: self.pos,
normal: self.normal
}
}
}
// The shader_param attribute makes sure the following struct can be used to
// pass parameters to a shader. Its argument is the name of the type that will
// be generated to represent your the program. Search for `Batch` below, to
// see how it's used.
#[shader_param(Batch)]
struct Params {
#[name = "u_Projection"]
projection: [[f32, ..4], ..4],
#[name = "u_View"]
view: [[f32, ..4], ..4],
#[name = "u_Model"]
model: [[f32, ..4], ..4],
#[name = "u_Color"]
color: [f32, ..4],
#[name = "u_LightDirection"]
light: [f32, ..3],
}
static VERTEX_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
in vec3 a_Pos;
in vec3 a_Normal;
smooth out vec4 v_Color;
uniform mat4 u_Projection;
uniform mat4 u_View;
uniform mat4 u_Model;
uniform vec4 u_Color;
uniform vec3 u_LightDirection;
void main() {
vec3 normal = normalize(vec3(u_Model * vec4(a_Normal, 0.0)));
float dot = max(dot(normal, u_LightDirection), 0.0);
v_Color = u_Color * (dot + 2) / 3;
gl_Position = u_Projection * u_View * u_Model * vec4(a_Pos, 1.0);
}
"
};
static FRAGMENT_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
smooth in vec4 v_Color;
out vec4 o_Color;
void main() {
o_Color = v_Color;
}
"
};
struct Renderer<C : device::draw::CommandBuffer, D: gfx::Device<C>> {
graphics: gfx::Graphics<D, C>,
tile_batch: Batch,
creature_batch: Batch,
projection: Matrix4<f32>,
view: Matrix4<f32>,
frame: gfx::Frame,
cd: gfx::ClearData,
}
type Color = [f32, ..4];
static BACKGROUND_COLOR: Color = [0.0f32, 0.0, 0.0, 1.0];
static PLAYER_COLOR : Color = [0.0f32, 0.0, 1.0, 1.0];
static WALL_COLOR : Color = [0.3f32, 0.2, 0.0, 1.0];
static GLASSWALL_COLOR : Color = [0.7f32, 0.7, 0.95, 1.0];
static SAND_COLOR : Color = [1.0f32, 1.0, 0.8, 1.0];
static FLOOR_COLOR : Color = [1.0f32, 0.9, 0.9, 1.0];
static SCOUT_COLOR : Color = [0.0f32, 0.8, 0.0, 1.0];
static GRUNT_COLOR : Color = [0.0f32, 0.6, 0.0, 1.0];
static HEAVY_COLOR : Color = [0.0f32, 0.4, 0.0, 1.0];
static WALL_HEIGHT : f32 = 0.3f32;
static HACK_PLAYER_KNOWS_ALL : bool = false;
static HACK_PLAYER_SEES_EVERYONE : bool = false;
fn grey_out(c : Color) -> Color {
let [r, g, b, a] = c;
[ (r+0.4f32)/4.0f32, (g + 0.4f32)/4.0f32, (b + 0.4f32)/4.0f32, a]
}
static BILLION : f32 = 1000000000f32;
static TAU : f32 = std::f32::consts::PI_2;
static TILE_OUTER_R : f32 = 1.0f32;
//static tile_inner_r : f32 = TILE_OUTER_R * 3f32.sqrt() / 2f32;
fn tile_inner_r() -> f32 {
TILE_OUTER_R * 3f32.sqrt() / 2f32
}
#[allow(dead_code)]
fn edge_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32
}
#[allow(dead_code)]
fn side_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32 + TAU / 12f32
}
fn dir_to_angle(d : AbsoluteDirection) -> f32 {
-(d.to_uint() as f32 * TAU) / 6.0f32
}
type IndexVector = Vec<u8>;
type VertexVector = Vec<Vertex>;
pub fn load_hex(path : &str) -> (IndexVector, VertexVector) {
let obj = obj::load(&Path::new(path)).unwrap();
let mut index_data : Vec<u8> = vec!();
let mut vertex_data : Vec<Vertex> = vec!();
{
let mut indexer = genmesh::LruIndexer::new(16, |_, v| {
vertex_data.push(v);
});
for o in obj.object_iter() {
for g in o.group_iter() {
for i in g.indices().iter() {
match i {
&genmesh::PolyTri(poly) => {
for i in vec!(poly.x, poly.y, poly.z).iter() {
match i {
&(v, _, Some(n)) => {
let normal = obj.normal()[n];
let vertex = obj.position()[v];
let index = indexer.index(
Vertex {
pos: vertex,
normal: normal,
}
);
index_data.push(index as u8);
},
_ => { panic!() }
}
}
},
_ => { panic!() },
}
}
}
}
}
(index_data, vertex_data)
}
pub fn point_to_coordinate(p : Point) -> (f32, f32) {
(
p.x as f32 * TILE_OUTER_R * 3f32 / 2f32,
-((p.y * 2) as f32 + p.x as f32) * tile_inner_r()
)
}
impl<C : CommandBuffer, D: gfx::Device<C>> Renderer<C, D> {
fn new(mut device: D, frame: gfx::Frame) -> Renderer<C, D> {
let (w, h) = (frame.width, frame.height);
let (tile_index_data, tile_vertex_data) = load_hex("assets/hex.obj");
let (creature_index_data, creature_vertex_data) = load_hex("assets/creature.obj");
let tile_mesh = device.create_mesh(tile_vertex_data.as_slice());
let creature_mesh = device.create_mesh(creature_vertex_data.as_slice());
let tile_slice = device.create_buffer_static::<u8>(tile_index_data.as_slice())
.to_slice(gfx::TriangleList);
let creature_slice = device.create_buffer_static::<u8>(creature_index_data.as_slice())
.to_slice(gfx::TriangleList);
let program = device.link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone())
.unwrap();
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true).multi_sample();
let mut graphics = gfx::Graphics::new(device);
let tile : Batch = graphics.make_batch(&program, &tile_mesh, tile_slice, &state).unwrap();
let creature : Batch = graphics.make_batch(&program, &creature_mesh, creature_slice, &state).unwrap();
let aspect = w as f32 / h as f32;
let proj = cgmath::perspective(cgmath::deg(45.0f32), aspect, 1.0, 100.0);
Renderer {
graphics: graphics,
frame: frame,
tile_batch : tile,
creature_batch : creature,
projection: proj,
view: proj,
cd: gfx::ClearData {
color: BACKGROUND_COLOR,
depth: 1.0,
stencil: 0,
},
}
}
fn render_params(&self, px : f32, py : f32, pz : f32, rotation : f32, color : Color) -> Params {
let mut model = Matrix4::identity();
model[3] = Vector4::new(px, py, pz, 1.0f32);
let rot = Matrix3::from_angle_z(rad(rotation)).to_matrix4();
//
//model = rot.rotate_vector(&model);
let model = model.mul_m(&rot);
Params {
projection: self.projection.into_fixed(),
view: self.view.into_fixed(),
color : color,
model: model.into_fixed(),
light: Vector3::unit_z().into_fixed(),
}
}
fn set_view(&mut self, view: &AffineMatrix3<f32>) {
self.view = view.mat;
}
/// Clear
fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
fn end_frame(&mut self) {
self.graphics.end_frame();
}
fn render_batch(&mut self, batch : &Batch, params : &Params) {
self.graphics.draw(batch, params, &self.frame);
}
pub fn render_tile(&mut self, p : Point, c : Color, elevate : bool) {
let (px, py) = point_to_coordinate(p);
let params = self.render_params(px, py, if elevate {WALL_HEIGHT} else {0.0}, 0.0, c);
let batch = self.tile_batch;
self.render_batch(&batch, ¶ms);
}
pub fn render_creature(&mut self, pos : Position, c : Color) {
let (px, py) = point_to_coordinate(pos.p);
let params = self.render_params(px, py, 0.3, dir_to_angle(pos.dir), c);
let batch = self.creature_batch;
self.render_batch(&batch, ¶ms);
}
}
/// linearly interpolate between two values
fn mix<F : FloatMath> (x : F, y : F, a : F) -> F {
assert!(a >= zero());
assert!(a <= one());
y * a + x * (one::<F>() - a)
}
struct SmoothMovement<T> {
speed : f32,
destination: T,
pub current: T,
}
impl<V : cgmath::EuclideanVector<f32>, T : cgmath::Point<f32, V>> SmoothMovement<T> {
pub fn new(speed : f32) -> SmoothMovement<T> {
SmoothMovement {
speed: speed,
destination: cgmath::Point::origin(),
current: cgmath::Point::origin(),
}
}
pub fn update(&mut self, dt : f32) {
let d = self.destination.sub_p(&self.current);
self.current.add_self_v(&d.mul_s(dt * self.speed));
}
pub fn set_destination(&mut self, dest : T) {
self.destination = dest;
}
pub fn finish_immediately(&mut self) {
self.current = self.destination.clone();
}
}
pub struct PistonUI {
renderer : Renderer<GlCommandBuffer, GlDevice>,
render_controller : RenderController,
input_controller: InputController,
}
pub struct RenderController {
player_pos: Position,
camera_pos : SmoothMovement<Point3<f32>>,
camera_focus : SmoothMovement<Point3<f32>>,
}
pub struct InputController {
shift_pressed: bool,
alt_pressed: bool,
ctrl_pressed: bool,
is_running: bool,
action_queue: RingBuf<Action>,
}
impl InputController {
pub fn new() -> InputController {
InputController {
shift_pressed: false,
alt_pressed: false,
ctrl_pressed: false,
is_running: true,
action_queue: RingBuf::new(),
}
}
fn move_or_run(&self, dir : Direction) -> Action {
if self.is_running {
Run(dir)
} else {
Move(dir)
}
}
fn push_move_or_run(&mut self, dir : Direction) {
let a = self.move_or_run(dir);
self.action_queue.push_back(a)
}
fn push_turn(&mut self, dir : Direction) {
self.action_queue.push_back(Turn(dir))
}
fn push_melee(&mut self, dir : Direction) {
self.action_queue.push_back(Melee(dir))
}
fn push_wait(&mut self) {
self.action_queue.push_back(Wait)
}
pub fn push_input(&mut self, i : InputEvent) {
match i {
Press(Keyboard(k)) => {
match (k, self.shift_pressed, self.ctrl_pressed) {
(key::LShift, _, _) => self.shift_pressed = true,
(key::RShift, _, _) => self.shift_pressed = true,
(key::LAlt, _, _) => self.alt_pressed = true,
(key::RAlt, _, _) => self.alt_pressed = true,
(key::LCtrl, _, _) => self.ctrl_pressed = true,
(key::RCtrl, _, _) => self.ctrl_pressed = true,
(key::R, _, _) => self.is_running = !self.is_running,
(key::K, _, false) => self.push_move_or_run(Forward),
(key::L, true, false) => self.push_move_or_run(Right),
(key::H, true, false) => self.push_move_or_run(Left),
(key::J, _, false) => self.push_move_or_run(Backward),
(key::L, false, false) => self.push_turn(Right),
(key::H, false, false) => self.push_turn(Left),
(key::K, _, true) => self.push_melee(Forward),
(key::L, _, true) => self.push_melee(Right),
(key::H, _, true) => self.push_melee(Left),
(key::Period, _, _) => self.push_wait(),
_ => { }
}
},
Release(Keyboard(k)) => {
match k {
key::LShift|key::RShift => {
self.shift_pressed = false
},
key::LAlt|key::RAlt => {
self.alt_pressed = false
},
key::LCtrl|key::RCtrl=> {
self.ctrl_pressed = false
},
_ => {}
}
},
_ => {}
}
}
pub fn pop_action(&mut self) -> Option<Action> {
self.action_queue.pop_front()
}
}
impl RenderController {
fn new() -> RenderController {
let cp = SmoothMovement::new(1.0f32);
let cf = SmoothMovement::new(2.0f32);
RenderController {
player_pos: Position::new(Point::new(0,0), North),
camera_pos: cp,
camera_focus: cf,
}
}
pub fn render_map(
&self,
renderer : &mut Renderer<GlCommandBuffer, GlDevice>, game : &GameState) {
let &GameState {
ref player,
..
} = game;
let player = player.as_ref().and_then(|pl| pl.try_borrow());
game.map.for_each_point(|ap| {
if player.as_ref().map_or(true, |pl| pl.knows(ap) || HACK_PLAYER_KNOWS_ALL) {
let tiletype = game.map.at(ap).tiletype;
let (color, elevate) = match tiletype {
Wall => (WALL_COLOR, true),
GlassWall => (GLASSWALL_COLOR, true),
Floor => (FLOOR_COLOR, false),
Sand => (SAND_COLOR, false),
};
let color = if player.as_ref().map_or(
false, |pl| !pl.sees(ap) && pl.is_alive()
) {
grey_out(color)
} else {
color
};
renderer.render_tile(ap, color, elevate);
};
});
for creature in game.creatures_iter() {
let creature = creature.borrow();
let ap = creature.pos().p;
if !player.as_ref().map_or(
true, |pl| pl.sees(ap) || HACK_PLAYER_SEES_EVERYONE
) {
continue;
}
match self.creature_color(&*creature) {
Some(color) => renderer.render_creature(*creature.pos(), color),
None => {}
}
};
}
fn creature_color(&self, cr : &Creature) -> Option<Color> {
let now_ns = time::precise_time_ns();
let duration_s = 0.8f32;
let base_color = if cr.is_player() {
PLAYER_COLOR
} else {
match cr.race() {
Scout => SCOUT_COLOR,
Grunt => GRUNT_COLOR,
Heavy => HEAVY_COLOR,
Human => panic!(),
}
};
let color = base_color;
let since_s = (now_ns - cr.was_attacked_ns()) as f32 / BILLION;
let color = if since_s < duration_s {
let f = since_s / duration_s;
[
mix(1f32, color[0], f),
mix(0f32, color[1], f),
mix(0f32, color[2], f),
color[3],
]
} else {
color
};
let color = if !cr.is_alive() {
let since_s = (now_ns - cr.death_ns()) as f32 / BILLION;
let f = since_s / duration_s;
if f < 1.0 {
Some([
mix(color[0], FLOOR_COLOR[0], f),
mix(color[1], FLOOR_COLOR[1], f),
mix(color[2], FLOOR_COLOR[2], f),
color[3],
])
} else {
None
}
} else {
Some(color)
};
color
}
fn move_camera_to_destination(&mut self) {
self.camera_pos.finish_immediately();
self.camera_focus.finish_immediately();
}
fn set_player_pos(&mut self, pl: &Creature) {
let pos = *pl.pos();
if self.player_pos == pos {
return;
} |
let (fx, fy) = point_to_coordinate(front);
let (x, y) = point_to_coordinate(pos.p);
let (dx, dy) = (fx - x, fy - y);
let how_much_behind = 5f32;
let how_much_front = 3f32;
let (dbx, dby) = (dx * how_much_behind, dy * how_much_behind);
let (dfx, dfy) = (dx * how_much_front, dy * how_much_front);
self.camera_pos.set_destination(Point3::new(x - dbx, y - dby, 8.0));
self.camera_focus.set_destination(Point3::new(x + dfx, y + dfy, 0.0));
}
fn update_movement(&mut self, dt : f32) {
self.camera_pos.update(dt);
self.camera_focus.update(dt);
}
fn update_camera(&mut self, renderer : &mut Renderer<GlCommandBuffer, GlDevice>) {
let view : AffineMatrix3<f32> = Transform::look_at(
&self.camera_pos.current,
&self.camera_focus.current,
&Vector3::unit_z(),
);
renderer.set_view(&view);
}
}
impl PistonUI {
pub fn new() -> (PistonUI, Window) {
let width = 800;
let height = 600;
let window = Window::new(
shader_version::opengl::OpenGL_3_2,
WindowSettings {
title: "Rustyhex".to_string(),
size: [width, height],
fullscreen: false,
exit_on_esc: true,
samples: 4,
}
);
let frame = gfx::Frame::new(width as u16, height as u16);
let device = gfx::GlDevice::new(|s| window.window.get_proc_address(s));
let renderer = Renderer::new(device, frame);
(PistonUI {
render_controller: RenderController::new(),
input_controller: InputController::new(),
renderer: renderer,
}, window)
}
fn game_update(&mut self, game : &mut GameState) {
loop {
if game.tick() {
match self.input_controller.pop_action() {
Some(action) => {
game.player.as_ref().map(|pl| pl.borrow_mut().action_set(action));
},
_ => {
break;
}
};
} else {
break;
}
}
match game.player {
Some(ref pl) => self.render_controller.set_player_pos(&*pl.borrow()),
None => {}
}
}
pub fn run (&mut self, window : Window, game : &mut GameState) {
game.update_player_los();
{
let ref pl = game.player.as_ref();
if pl.is_some() {
let pl = pl.unwrap();
self.render_controller.set_player_pos(&*pl.borrow());
self.render_controller.move_camera_to_destination();
let &PistonUI {
ref mut renderer,
ref mut render_controller,
..
} = self;
render_controller.update_camera(renderer);
}
}
let mut render_time = time::precise_time_ns();
let mut events = Events::new(window).set(Ups(60)).set(MaxFps(60));
for e in events {
match e {
Render(_) => {
let &PistonUI {
ref mut renderer,
ref mut render_controller,
..
} = self;
let t = time::precise_time_ns();
let dt = t - render_time;
render_time = t;
render_controller.update_movement(dt as f32 / BILLION as f32);
render_controller.update_camera(renderer);
renderer.clear();
render_controller.render_map(renderer, game);
renderer.end_frame();
},
Update(_) => {
self.game_update(game);
},
Input(i) => {
self.input_controller.push_input(i.clone());
}
}
}
}
} | self.player_pos = pos;
let front = pos.p + pos.dir; | random_line_split |
piston.rs | // Copyright 2014 Dawid Ciężarkiewicz
// See LICENSE file for more information
use cgmath;
use cgmath::FixedArray;
use cgmath::{Matrix, Matrix4, Matrix3, Point3, Vector3, Vector4, ToMatrix4};
use cgmath::rad;
use cgmath::Point as CgPoint;
use cgmath::{Transform, AffineMatrix3};
use cgmath::Vector;
use creature::Creature;
use creature::{Grunt, Scout, Heavy, Human};
use device;
use device::draw::CommandBuffer;
use gfx::GlCommandBuffer;
use gfx::GlDevice;
use game::Action;
use game::GameState;
use game::{Run, Move, Turn, Melee, Wait};
use gfx;
use gfx::{Device, DeviceHelper};
use hex2d::{Forward, Backward, Left, Right, Direction, AbsoluteDirection};
use hex2d::{North, Position, Point};
use input::keyboard as key;
use map::{Wall, Sand, GlassWall, Floor};
use std;
use glfw_window::GlfwWindow as Window;
use std::collections::{RingBuf};
use std::num::{zero, one};
use time;
use obj;
use genmesh;
use genmesh::Indexer;
use shader_version;
use gfx::ToSlice;
use piston::{
Events,
Render,
Update,
Input,
WindowSettings,
};
use current::Set;
use event::{
Ups, MaxFps,
};
use piston::input::{
InputEvent,
Press,
Release,
Keyboard,
};
use std::mem::size_of;
#[vertex_format]
struct Vertex {
#[as_float]
#[name = "a_Pos"]
pos: [f32, ..3],
#[as_float]
#[name = "a_Normal"]
normal: [f32, ..3],
}
impl std::cmp::PartialEq for Vertex {
fn eq(&self, other: &Vertex) -> bool {
self.pos.as_slice() == other.pos.as_slice() &&
self.normal.as_slice() == other.normal.as_slice()
}
}
impl std::clone::Clone for Vertex {
fn clone(&self) -> Vertex {
Vertex {
pos: self.pos,
normal: self.normal
}
}
}
// The shader_param attribute makes sure the following struct can be used to
// pass parameters to a shader. Its argument is the name of the type that will
// be generated to represent your the program. Search for `Batch` below, to
// see how it's used.
#[shader_param(Batch)]
struct Params {
#[name = "u_Projection"]
projection: [[f32, ..4], ..4],
#[name = "u_View"]
view: [[f32, ..4], ..4],
#[name = "u_Model"]
model: [[f32, ..4], ..4],
#[name = "u_Color"]
color: [f32, ..4],
#[name = "u_LightDirection"]
light: [f32, ..3],
}
static VERTEX_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
in vec3 a_Pos;
in vec3 a_Normal;
smooth out vec4 v_Color;
uniform mat4 u_Projection;
uniform mat4 u_View;
uniform mat4 u_Model;
uniform vec4 u_Color;
uniform vec3 u_LightDirection;
void main() {
vec3 normal = normalize(vec3(u_Model * vec4(a_Normal, 0.0)));
float dot = max(dot(normal, u_LightDirection), 0.0);
v_Color = u_Color * (dot + 2) / 3;
gl_Position = u_Projection * u_View * u_Model * vec4(a_Pos, 1.0);
}
"
};
static FRAGMENT_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
smooth in vec4 v_Color;
out vec4 o_Color;
void main() {
o_Color = v_Color;
}
"
};
struct Renderer<C : device::draw::CommandBuffer, D: gfx::Device<C>> {
graphics: gfx::Graphics<D, C>,
tile_batch: Batch,
creature_batch: Batch,
projection: Matrix4<f32>,
view: Matrix4<f32>,
frame: gfx::Frame,
cd: gfx::ClearData,
}
type Color = [f32, ..4];
static BACKGROUND_COLOR: Color = [0.0f32, 0.0, 0.0, 1.0];
static PLAYER_COLOR : Color = [0.0f32, 0.0, 1.0, 1.0];
static WALL_COLOR : Color = [0.3f32, 0.2, 0.0, 1.0];
static GLASSWALL_COLOR : Color = [0.7f32, 0.7, 0.95, 1.0];
static SAND_COLOR : Color = [1.0f32, 1.0, 0.8, 1.0];
static FLOOR_COLOR : Color = [1.0f32, 0.9, 0.9, 1.0];
static SCOUT_COLOR : Color = [0.0f32, 0.8, 0.0, 1.0];
static GRUNT_COLOR : Color = [0.0f32, 0.6, 0.0, 1.0];
static HEAVY_COLOR : Color = [0.0f32, 0.4, 0.0, 1.0];
static WALL_HEIGHT : f32 = 0.3f32;
static HACK_PLAYER_KNOWS_ALL : bool = false;
static HACK_PLAYER_SEES_EVERYONE : bool = false;
fn grey_out(c : Color) -> Color {
let [r, g, b, a] = c;
[ (r+0.4f32)/4.0f32, (g + 0.4f32)/4.0f32, (b + 0.4f32)/4.0f32, a]
}
static BILLION : f32 = 1000000000f32;
static TAU : f32 = std::f32::consts::PI_2;
static TILE_OUTER_R : f32 = 1.0f32;
//static tile_inner_r : f32 = TILE_OUTER_R * 3f32.sqrt() / 2f32;
fn tile_inner_r() -> f32 {
TILE_OUTER_R * 3f32.sqrt() / 2f32
}
#[allow(dead_code)]
fn edge_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32
}
#[allow(dead_code)]
fn side_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32 + TAU / 12f32
}
fn dir_to_angle(d : AbsoluteDirection) -> f32 {
-(d.to_uint() as f32 * TAU) / 6.0f32
}
type IndexVector = Vec<u8>;
type VertexVector = Vec<Vertex>;
pub fn load_hex(path : &str) -> (IndexVector, VertexVector) {
let obj = obj::load(&Path::new(path)).unwrap();
let mut index_data : Vec<u8> = vec!();
let mut vertex_data : Vec<Vertex> = vec!();
{
let mut indexer = genmesh::LruIndexer::new(16, |_, v| {
vertex_data.push(v);
});
for o in obj.object_iter() {
for g in o.group_iter() {
for i in g.indices().iter() {
match i {
&genmesh::PolyTri(poly) => {
for i in vec!(poly.x, poly.y, poly.z).iter() {
match i {
&(v, _, Some(n)) => {
let normal = obj.normal()[n];
let vertex = obj.position()[v];
let index = indexer.index(
Vertex {
pos: vertex,
normal: normal,
}
);
index_data.push(index as u8);
},
_ => { panic!() }
}
}
},
_ => { panic!() },
}
}
}
}
}
(index_data, vertex_data)
}
pub fn point_to_coordinate(p : Point) -> (f32, f32) {
(
p.x as f32 * TILE_OUTER_R * 3f32 / 2f32,
-((p.y * 2) as f32 + p.x as f32) * tile_inner_r()
)
}
impl<C : CommandBuffer, D: gfx::Device<C>> Renderer<C, D> {
fn new(mut device: D, frame: gfx::Frame) -> Renderer<C, D> {
let (w, h) = (frame.width, frame.height);
let (tile_index_data, tile_vertex_data) = load_hex("assets/hex.obj");
let (creature_index_data, creature_vertex_data) = load_hex("assets/creature.obj");
let tile_mesh = device.create_mesh(tile_vertex_data.as_slice());
let creature_mesh = device.create_mesh(creature_vertex_data.as_slice());
let tile_slice = device.create_buffer_static::<u8>(tile_index_data.as_slice())
.to_slice(gfx::TriangleList);
let creature_slice = device.create_buffer_static::<u8>(creature_index_data.as_slice())
.to_slice(gfx::TriangleList);
let program = device.link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone())
.unwrap();
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true).multi_sample();
let mut graphics = gfx::Graphics::new(device);
let tile : Batch = graphics.make_batch(&program, &tile_mesh, tile_slice, &state).unwrap();
let creature : Batch = graphics.make_batch(&program, &creature_mesh, creature_slice, &state).unwrap();
let aspect = w as f32 / h as f32;
let proj = cgmath::perspective(cgmath::deg(45.0f32), aspect, 1.0, 100.0);
Renderer {
graphics: graphics,
frame: frame,
tile_batch : tile,
creature_batch : creature,
projection: proj,
view: proj,
cd: gfx::ClearData {
color: BACKGROUND_COLOR,
depth: 1.0,
stencil: 0,
},
}
}
fn render_params(&self, px : f32, py : f32, pz : f32, rotation : f32, color : Color) -> Params {
let mut model = Matrix4::identity();
model[3] = Vector4::new(px, py, pz, 1.0f32);
let rot = Matrix3::from_angle_z(rad(rotation)).to_matrix4();
//
//model = rot.rotate_vector(&model);
let model = model.mul_m(&rot);
Params {
projection: self.projection.into_fixed(),
view: self.view.into_fixed(),
color : color,
model: model.into_fixed(),
light: Vector3::unit_z().into_fixed(),
}
}
fn set_view(&mut self, view: &AffineMatrix3<f32>) {
self.view = view.mat;
}
/// Clear
fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
fn end_frame(&mut self) {
self.graphics.end_frame();
}
fn render_batch(&mut self, batch : &Batch, params : &Params) {
self.graphics.draw(batch, params, &self.frame);
}
pub fn render_tile(&mut self, p : Point, c : Color, elevate : bool) {
let (px, py) = point_to_coordinate(p);
let params = self.render_params(px, py, if elevate {WALL_HEIGHT} else {0.0}, 0.0, c);
let batch = self.tile_batch;
self.render_batch(&batch, ¶ms);
}
pub fn render_creature(&mut self, pos : Position, c : Color) {
let (px, py) = point_to_coordinate(pos.p);
let params = self.render_params(px, py, 0.3, dir_to_angle(pos.dir), c);
let batch = self.creature_batch;
self.render_batch(&batch, ¶ms);
}
}
/// linearly interpolate between two values
fn mix<F : FloatMath> (x : F, y : F, a : F) -> F {
assert!(a >= zero());
assert!(a <= one());
y * a + x * (one::<F>() - a)
}
struct SmoothMovement<T> {
speed : f32,
destination: T,
pub current: T,
}
impl<V : cgmath::EuclideanVector<f32>, T : cgmath::Point<f32, V>> SmoothMovement<T> {
pub fn new(speed : f32) -> SmoothMovement<T> {
SmoothMovement {
speed: speed,
destination: cgmath::Point::origin(),
current: cgmath::Point::origin(),
}
}
pub fn update(&mut self, dt : f32) {
let d = self.destination.sub_p(&self.current);
self.current.add_self_v(&d.mul_s(dt * self.speed));
}
pub fn set_destination(&mut self, dest : T) {
self.destination = dest;
}
pub fn finish_immediately(&mut self) {
self.current = self.destination.clone();
}
}
pub struct PistonUI {
renderer : Renderer<GlCommandBuffer, GlDevice>,
render_controller : RenderController,
input_controller: InputController,
}
pub struct RenderController {
player_pos: Position,
camera_pos : SmoothMovement<Point3<f32>>,
camera_focus : SmoothMovement<Point3<f32>>,
}
pub struct InputController {
shift_pressed: bool,
alt_pressed: bool,
ctrl_pressed: bool,
is_running: bool,
action_queue: RingBuf<Action>,
}
impl InputController {
pub fn new() -> InputController {
InputController {
shift_pressed: false,
alt_pressed: false,
ctrl_pressed: false,
is_running: true,
action_queue: RingBuf::new(),
}
}
fn move_or_run(&self, dir : Direction) -> Action {
if self.is_running {
Run(dir)
} else {
Move(dir)
}
}
fn push_move_or_run(&mut self, dir : Direction) {
let a = self.move_or_run(dir);
self.action_queue.push_back(a)
}
fn push_turn(&mut self, dir : Direction) {
self.action_queue.push_back(Turn(dir))
}
fn push_melee(&mut self, dir : Direction) {
self.action_queue.push_back(Melee(dir))
}
fn push_wait(&mut self) {
self.action_queue.push_back(Wait)
}
pub fn push_input(&mut self, i : InputEvent) {
match i {
Press(Keyboard(k)) => {
match (k, self.shift_pressed, self.ctrl_pressed) {
(key::LShift, _, _) => self.shift_pressed = true,
(key::RShift, _, _) => self.shift_pressed = true,
(key::LAlt, _, _) => self.alt_pressed = true,
(key::RAlt, _, _) => self.alt_pressed = true,
(key::LCtrl, _, _) => self.ctrl_pressed = true,
(key::RCtrl, _, _) => self.ctrl_pressed = true,
(key::R, _, _) => self.is_running = !self.is_running,
(key::K, _, false) => self.push_move_or_run(Forward),
(key::L, true, false) => self.push_move_or_run(Right),
(key::H, true, false) => self.push_move_or_run(Left),
(key::J, _, false) => self.push_move_or_run(Backward),
(key::L, false, false) => self.push_turn(Right),
(key::H, false, false) => self.push_turn(Left),
(key::K, _, true) => self.push_melee(Forward),
(key::L, _, true) => self.push_melee(Right),
(key::H, _, true) => self.push_melee(Left),
(key::Period, _, _) => self.push_wait(),
_ => { }
}
},
Release(Keyboard(k)) => {
match k {
key::LShift|key::RShift => {
self.shift_pressed = false
},
key::LAlt|key::RAlt => {
self.alt_pressed = false
},
key::LCtrl|key::RCtrl=> {
self.ctrl_pressed = false
},
_ => {}
}
},
_ => {}
}
}
pub fn pop_action(&mut self) -> Option<Action> {
self.action_queue.pop_front()
}
}
impl RenderController {
fn new() -> RenderController {
let cp = SmoothMovement::new(1.0f32);
let cf = SmoothMovement::new(2.0f32);
RenderController {
player_pos: Position::new(Point::new(0,0), North),
camera_pos: cp,
camera_focus: cf,
}
}
pub fn render_map(
&self,
renderer : &mut Renderer<GlCommandBuffer, GlDevice>, game : &GameState) {
let &GameState {
ref player,
..
} = game;
let player = player.as_ref().and_then(|pl| pl.try_borrow());
game.map.for_each_point(|ap| {
if player.as_ref().map_or(true, |pl| pl.knows(ap) || HACK_PLAYER_KNOWS_ALL) {
let tiletype = game.map.at(ap).tiletype;
let (color, elevate) = match tiletype {
Wall => (WALL_COLOR, true),
GlassWall => (GLASSWALL_COLOR, true),
Floor => (FLOOR_COLOR, false),
Sand => (SAND_COLOR, false),
};
let color = if player.as_ref().map_or(
false, |pl| !pl.sees(ap) && pl.is_alive()
) {
grey_out(color)
} else {
color
};
renderer.render_tile(ap, color, elevate);
};
});
for creature in game.creatures_iter() {
let creature = creature.borrow();
let ap = creature.pos().p;
if !player.as_ref().map_or(
true, |pl| pl.sees(ap) || HACK_PLAYER_SEES_EVERYONE
) {
continue;
}
match self.creature_color(&*creature) {
Some(color) => renderer.render_creature(*creature.pos(), color),
None => {}
}
};
}
fn creature_color(&self, cr : &Creature) -> Option<Color> {
let now_ns = time::precise_time_ns();
let duration_s = 0.8f32;
let base_color = if cr.is_player() {
PLAYER_COLOR
} else {
match cr.race() {
Scout => SCOUT_COLOR,
Grunt => GRUNT_COLOR,
Heavy => HEAVY_COLOR,
Human => panic!(),
}
};
let color = base_color;
let since_s = (now_ns - cr.was_attacked_ns()) as f32 / BILLION;
let color = if since_s < duration_s {
let f = since_s / duration_s;
[
mix(1f32, color[0], f),
mix(0f32, color[1], f),
mix(0f32, color[2], f),
color[3],
]
} else {
color
};
let color = if !cr.is_alive() {
let since_s = (now_ns - cr.death_ns()) as f32 / BILLION;
let f = since_s / duration_s;
if f < 1.0 {
Some([
mix(color[0], FLOOR_COLOR[0], f),
mix(color[1], FLOOR_COLOR[1], f),
mix(color[2], FLOOR_COLOR[2], f),
color[3],
])
} else {
None
}
} else {
Some(color)
};
color
}
fn move_camera_to_destination(&mut self) {
self.camera_pos.finish_immediately();
self.camera_focus.finish_immediately();
}
fn set_player_pos(&mut self, pl: &Creature) {
let pos = *pl.pos();
if self.player_pos == pos {
return;
}
self.player_pos = pos;
let front = pos.p + pos.dir;
let (fx, fy) = point_to_coordinate(front);
let (x, y) = point_to_coordinate(pos.p);
let (dx, dy) = (fx - x, fy - y);
let how_much_behind = 5f32;
let how_much_front = 3f32;
let (dbx, dby) = (dx * how_much_behind, dy * how_much_behind);
let (dfx, dfy) = (dx * how_much_front, dy * how_much_front);
self.camera_pos.set_destination(Point3::new(x - dbx, y - dby, 8.0));
self.camera_focus.set_destination(Point3::new(x + dfx, y + dfy, 0.0));
}
fn up | mut self, dt : f32) {
self.camera_pos.update(dt);
self.camera_focus.update(dt);
}
fn update_camera(&mut self, renderer : &mut Renderer<GlCommandBuffer, GlDevice>) {
let view : AffineMatrix3<f32> = Transform::look_at(
&self.camera_pos.current,
&self.camera_focus.current,
&Vector3::unit_z(),
);
renderer.set_view(&view);
}
}
impl PistonUI {
pub fn new() -> (PistonUI, Window) {
let width = 800;
let height = 600;
let window = Window::new(
shader_version::opengl::OpenGL_3_2,
WindowSettings {
title: "Rustyhex".to_string(),
size: [width, height],
fullscreen: false,
exit_on_esc: true,
samples: 4,
}
);
let frame = gfx::Frame::new(width as u16, height as u16);
let device = gfx::GlDevice::new(|s| window.window.get_proc_address(s));
let renderer = Renderer::new(device, frame);
(PistonUI {
render_controller: RenderController::new(),
input_controller: InputController::new(),
renderer: renderer,
}, window)
}
fn game_update(&mut self, game : &mut GameState) {
loop {
if game.tick() {
match self.input_controller.pop_action() {
Some(action) => {
game.player.as_ref().map(|pl| pl.borrow_mut().action_set(action));
},
_ => {
break;
}
};
} else {
break;
}
}
match game.player {
Some(ref pl) => self.render_controller.set_player_pos(&*pl.borrow()),
None => {}
}
}
pub fn run (&mut self, window : Window, game : &mut GameState) {
game.update_player_los();
{
let ref pl = game.player.as_ref();
if pl.is_some() {
let pl = pl.unwrap();
self.render_controller.set_player_pos(&*pl.borrow());
self.render_controller.move_camera_to_destination();
let &PistonUI {
ref mut renderer,
ref mut render_controller,
..
} = self;
render_controller.update_camera(renderer);
}
}
let mut render_time = time::precise_time_ns();
let mut events = Events::new(window).set(Ups(60)).set(MaxFps(60));
for e in events {
match e {
Render(_) => {
let &PistonUI {
ref mut renderer,
ref mut render_controller,
..
} = self;
let t = time::precise_time_ns();
let dt = t - render_time;
render_time = t;
render_controller.update_movement(dt as f32 / BILLION as f32);
render_controller.update_camera(renderer);
renderer.clear();
render_controller.render_map(renderer, game);
renderer.end_frame();
},
Update(_) => {
self.game_update(game);
},
Input(i) => {
self.input_controller.push_input(i.clone());
}
}
}
}
}
| date_movement(& | identifier_name |
piston.rs | // Copyright 2014 Dawid Ciężarkiewicz
// See LICENSE file for more information
use cgmath;
use cgmath::FixedArray;
use cgmath::{Matrix, Matrix4, Matrix3, Point3, Vector3, Vector4, ToMatrix4};
use cgmath::rad;
use cgmath::Point as CgPoint;
use cgmath::{Transform, AffineMatrix3};
use cgmath::Vector;
use creature::Creature;
use creature::{Grunt, Scout, Heavy, Human};
use device;
use device::draw::CommandBuffer;
use gfx::GlCommandBuffer;
use gfx::GlDevice;
use game::Action;
use game::GameState;
use game::{Run, Move, Turn, Melee, Wait};
use gfx;
use gfx::{Device, DeviceHelper};
use hex2d::{Forward, Backward, Left, Right, Direction, AbsoluteDirection};
use hex2d::{North, Position, Point};
use input::keyboard as key;
use map::{Wall, Sand, GlassWall, Floor};
use std;
use glfw_window::GlfwWindow as Window;
use std::collections::{RingBuf};
use std::num::{zero, one};
use time;
use obj;
use genmesh;
use genmesh::Indexer;
use shader_version;
use gfx::ToSlice;
use piston::{
Events,
Render,
Update,
Input,
WindowSettings,
};
use current::Set;
use event::{
Ups, MaxFps,
};
use piston::input::{
InputEvent,
Press,
Release,
Keyboard,
};
use std::mem::size_of;
#[vertex_format]
struct Vertex {
#[as_float]
#[name = "a_Pos"]
pos: [f32, ..3],
#[as_float]
#[name = "a_Normal"]
normal: [f32, ..3],
}
impl std::cmp::PartialEq for Vertex {
fn eq(&self, other: &Vertex) -> bool {
self.pos.as_slice() == other.pos.as_slice() &&
self.normal.as_slice() == other.normal.as_slice()
}
}
impl std::clone::Clone for Vertex {
fn clone(&self) -> Vertex {
Vertex {
pos: self.pos,
normal: self.normal
}
}
}
// The shader_param attribute makes sure the following struct can be used to
// pass parameters to a shader. Its argument is the name of the type that will
// be generated to represent your the program. Search for `Batch` below, to
// see how it's used.
#[shader_param(Batch)]
struct Params {
#[name = "u_Projection"]
projection: [[f32, ..4], ..4],
#[name = "u_View"]
view: [[f32, ..4], ..4],
#[name = "u_Model"]
model: [[f32, ..4], ..4],
#[name = "u_Color"]
color: [f32, ..4],
#[name = "u_LightDirection"]
light: [f32, ..3],
}
static VERTEX_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
in vec3 a_Pos;
in vec3 a_Normal;
smooth out vec4 v_Color;
uniform mat4 u_Projection;
uniform mat4 u_View;
uniform mat4 u_Model;
uniform vec4 u_Color;
uniform vec3 u_LightDirection;
void main() {
vec3 normal = normalize(vec3(u_Model * vec4(a_Normal, 0.0)));
float dot = max(dot(normal, u_LightDirection), 0.0);
v_Color = u_Color * (dot + 2) / 3;
gl_Position = u_Projection * u_View * u_Model * vec4(a_Pos, 1.0);
}
"
};
static FRAGMENT_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
smooth in vec4 v_Color;
out vec4 o_Color;
void main() {
o_Color = v_Color;
}
"
};
struct Renderer<C : device::draw::CommandBuffer, D: gfx::Device<C>> {
graphics: gfx::Graphics<D, C>,
tile_batch: Batch,
creature_batch: Batch,
projection: Matrix4<f32>,
view: Matrix4<f32>,
frame: gfx::Frame,
cd: gfx::ClearData,
}
type Color = [f32, ..4];
static BACKGROUND_COLOR: Color = [0.0f32, 0.0, 0.0, 1.0];
static PLAYER_COLOR : Color = [0.0f32, 0.0, 1.0, 1.0];
static WALL_COLOR : Color = [0.3f32, 0.2, 0.0, 1.0];
static GLASSWALL_COLOR : Color = [0.7f32, 0.7, 0.95, 1.0];
static SAND_COLOR : Color = [1.0f32, 1.0, 0.8, 1.0];
static FLOOR_COLOR : Color = [1.0f32, 0.9, 0.9, 1.0];
static SCOUT_COLOR : Color = [0.0f32, 0.8, 0.0, 1.0];
static GRUNT_COLOR : Color = [0.0f32, 0.6, 0.0, 1.0];
static HEAVY_COLOR : Color = [0.0f32, 0.4, 0.0, 1.0];
static WALL_HEIGHT : f32 = 0.3f32;
static HACK_PLAYER_KNOWS_ALL : bool = false;
static HACK_PLAYER_SEES_EVERYONE : bool = false;
fn grey_out(c : Color) -> Color {
let [r, g, b, a] = c;
[ (r+0.4f32)/4.0f32, (g + 0.4f32)/4.0f32, (b + 0.4f32)/4.0f32, a]
}
static BILLION : f32 = 1000000000f32;
static TAU : f32 = std::f32::consts::PI_2;
static TILE_OUTER_R : f32 = 1.0f32;
//static tile_inner_r : f32 = TILE_OUTER_R * 3f32.sqrt() / 2f32;
fn tile_inner_r() -> f32 {
TILE_OUTER_R * 3f32.sqrt() / 2f32
}
#[allow(dead_code)]
fn edge_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32
}
#[allow(dead_code)]
fn side_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32 + TAU / 12f32
}
fn dir_to_angle(d : AbsoluteDirection) -> f32 {
-(d.to_uint() as f32 * TAU) / 6.0f32
}
type IndexVector = Vec<u8>;
type VertexVector = Vec<Vertex>;
pub fn load_hex(path : &str) -> (IndexVector, VertexVector) {
let obj = obj::load(&Path::new(path)).unwrap();
let mut index_data : Vec<u8> = vec!();
let mut vertex_data : Vec<Vertex> = vec!();
{
let mut indexer = genmesh::LruIndexer::new(16, |_, v| {
vertex_data.push(v);
});
for o in obj.object_iter() {
for g in o.group_iter() {
for i in g.indices().iter() {
match i {
&genmesh::PolyTri(poly) => {
for i in vec!(poly.x, poly.y, poly.z).iter() {
match i {
&(v, _, Some(n)) => {
let normal = obj.normal()[n];
let vertex = obj.position()[v];
let index = indexer.index(
Vertex {
pos: vertex,
normal: normal,
}
);
index_data.push(index as u8);
},
_ => { panic!() }
}
}
},
_ => { panic!() },
}
}
}
}
}
(index_data, vertex_data)
}
pub fn point_to_coordinate(p : Point) -> (f32, f32) {
(
p.x as f32 * TILE_OUTER_R * 3f32 / 2f32,
-((p.y * 2) as f32 + p.x as f32) * tile_inner_r()
)
}
impl<C : CommandBuffer, D: gfx::Device<C>> Renderer<C, D> {
fn new(mut device: D, frame: gfx::Frame) -> Renderer<C, D> {
let (w, h) = (frame.width, frame.height);
let (tile_index_data, tile_vertex_data) = load_hex("assets/hex.obj");
let (creature_index_data, creature_vertex_data) = load_hex("assets/creature.obj");
let tile_mesh = device.create_mesh(tile_vertex_data.as_slice());
let creature_mesh = device.create_mesh(creature_vertex_data.as_slice());
let tile_slice = device.create_buffer_static::<u8>(tile_index_data.as_slice())
.to_slice(gfx::TriangleList);
let creature_slice = device.create_buffer_static::<u8>(creature_index_data.as_slice())
.to_slice(gfx::TriangleList);
let program = device.link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone())
.unwrap();
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true).multi_sample();
let mut graphics = gfx::Graphics::new(device);
let tile : Batch = graphics.make_batch(&program, &tile_mesh, tile_slice, &state).unwrap();
let creature : Batch = graphics.make_batch(&program, &creature_mesh, creature_slice, &state).unwrap();
let aspect = w as f32 / h as f32;
let proj = cgmath::perspective(cgmath::deg(45.0f32), aspect, 1.0, 100.0);
Renderer {
graphics: graphics,
frame: frame,
tile_batch : tile,
creature_batch : creature,
projection: proj,
view: proj,
cd: gfx::ClearData {
color: BACKGROUND_COLOR,
depth: 1.0,
stencil: 0,
},
}
}
fn render_params(&self, px : f32, py : f32, pz : f32, rotation : f32, color : Color) -> Params {
let mut model = Matrix4::identity();
model[3] = Vector4::new(px, py, pz, 1.0f32);
let rot = Matrix3::from_angle_z(rad(rotation)).to_matrix4();
//
//model = rot.rotate_vector(&model);
let model = model.mul_m(&rot);
Params {
projection: self.projection.into_fixed(),
view: self.view.into_fixed(),
color : color,
model: model.into_fixed(),
light: Vector3::unit_z().into_fixed(),
}
}
fn set_view(&mut self, view: &AffineMatrix3<f32>) {
self.view = view.mat;
}
/// Clear
fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
fn end_frame(&mut self) {
self.graphics.end_frame();
}
fn render_batch(&mut self, batch : &Batch, params : &Params) {
self.graphics.draw(batch, params, &self.frame);
}
pub fn render_tile(&mut self, p : Point, c : Color, elevate : bool) {
let (px, py) = point_to_coordinate(p);
let params = self.render_params(px, py, if elevate {WALL_HEIGHT} else {0.0}, 0.0, c);
let batch = self.tile_batch;
self.render_batch(&batch, ¶ms);
}
pub fn render_creature(&mut self, pos : Position, c : Color) {
let (px, py) = point_to_coordinate(pos.p);
let params = self.render_params(px, py, 0.3, dir_to_angle(pos.dir), c);
let batch = self.creature_batch;
self.render_batch(&batch, ¶ms);
}
}
/// linearly interpolate between two values
fn mix<F : FloatMath> (x : F, y : F, a : F) -> F {
assert!(a >= zero());
assert!(a <= one());
y * a + x * (one::<F>() - a)
}
struct SmoothMovement<T> {
speed : f32,
destination: T,
pub current: T,
}
impl<V : cgmath::EuclideanVector<f32>, T : cgmath::Point<f32, V>> SmoothMovement<T> {
pub fn new(speed : f32) -> SmoothMovement<T> {
SmoothMovement {
speed: speed,
destination: cgmath::Point::origin(),
current: cgmath::Point::origin(),
}
}
pub fn update(&mut self, dt : f32) {
let d = self.destination.sub_p(&self.current);
self.current.add_self_v(&d.mul_s(dt * self.speed));
}
pub fn set_destination(&mut self, dest : T) {
| pub fn finish_immediately(&mut self) {
self.current = self.destination.clone();
}
}
pub struct PistonUI {
renderer : Renderer<GlCommandBuffer, GlDevice>,
render_controller : RenderController,
input_controller: InputController,
}
pub struct RenderController {
player_pos: Position,
camera_pos : SmoothMovement<Point3<f32>>,
camera_focus : SmoothMovement<Point3<f32>>,
}
pub struct InputController {
shift_pressed: bool,
alt_pressed: bool,
ctrl_pressed: bool,
is_running: bool,
action_queue: RingBuf<Action>,
}
impl InputController {
pub fn new() -> InputController {
InputController {
shift_pressed: false,
alt_pressed: false,
ctrl_pressed: false,
is_running: true,
action_queue: RingBuf::new(),
}
}
fn move_or_run(&self, dir : Direction) -> Action {
if self.is_running {
Run(dir)
} else {
Move(dir)
}
}
fn push_move_or_run(&mut self, dir : Direction) {
let a = self.move_or_run(dir);
self.action_queue.push_back(a)
}
fn push_turn(&mut self, dir : Direction) {
self.action_queue.push_back(Turn(dir))
}
fn push_melee(&mut self, dir : Direction) {
self.action_queue.push_back(Melee(dir))
}
fn push_wait(&mut self) {
self.action_queue.push_back(Wait)
}
pub fn push_input(&mut self, i : InputEvent) {
match i {
Press(Keyboard(k)) => {
match (k, self.shift_pressed, self.ctrl_pressed) {
(key::LShift, _, _) => self.shift_pressed = true,
(key::RShift, _, _) => self.shift_pressed = true,
(key::LAlt, _, _) => self.alt_pressed = true,
(key::RAlt, _, _) => self.alt_pressed = true,
(key::LCtrl, _, _) => self.ctrl_pressed = true,
(key::RCtrl, _, _) => self.ctrl_pressed = true,
(key::R, _, _) => self.is_running = !self.is_running,
(key::K, _, false) => self.push_move_or_run(Forward),
(key::L, true, false) => self.push_move_or_run(Right),
(key::H, true, false) => self.push_move_or_run(Left),
(key::J, _, false) => self.push_move_or_run(Backward),
(key::L, false, false) => self.push_turn(Right),
(key::H, false, false) => self.push_turn(Left),
(key::K, _, true) => self.push_melee(Forward),
(key::L, _, true) => self.push_melee(Right),
(key::H, _, true) => self.push_melee(Left),
(key::Period, _, _) => self.push_wait(),
_ => { }
}
},
Release(Keyboard(k)) => {
match k {
key::LShift|key::RShift => {
self.shift_pressed = false
},
key::LAlt|key::RAlt => {
self.alt_pressed = false
},
key::LCtrl|key::RCtrl=> {
self.ctrl_pressed = false
},
_ => {}
}
},
_ => {}
}
}
pub fn pop_action(&mut self) -> Option<Action> {
self.action_queue.pop_front()
}
}
impl RenderController {
fn new() -> RenderController {
let cp = SmoothMovement::new(1.0f32);
let cf = SmoothMovement::new(2.0f32);
RenderController {
player_pos: Position::new(Point::new(0,0), North),
camera_pos: cp,
camera_focus: cf,
}
}
pub fn render_map(
&self,
renderer : &mut Renderer<GlCommandBuffer, GlDevice>, game : &GameState) {
let &GameState {
ref player,
..
} = game;
let player = player.as_ref().and_then(|pl| pl.try_borrow());
game.map.for_each_point(|ap| {
if player.as_ref().map_or(true, |pl| pl.knows(ap) || HACK_PLAYER_KNOWS_ALL) {
let tiletype = game.map.at(ap).tiletype;
let (color, elevate) = match tiletype {
Wall => (WALL_COLOR, true),
GlassWall => (GLASSWALL_COLOR, true),
Floor => (FLOOR_COLOR, false),
Sand => (SAND_COLOR, false),
};
let color = if player.as_ref().map_or(
false, |pl| !pl.sees(ap) && pl.is_alive()
) {
grey_out(color)
} else {
color
};
renderer.render_tile(ap, color, elevate);
};
});
for creature in game.creatures_iter() {
let creature = creature.borrow();
let ap = creature.pos().p;
if !player.as_ref().map_or(
true, |pl| pl.sees(ap) || HACK_PLAYER_SEES_EVERYONE
) {
continue;
}
match self.creature_color(&*creature) {
Some(color) => renderer.render_creature(*creature.pos(), color),
None => {}
}
};
}
fn creature_color(&self, cr : &Creature) -> Option<Color> {
let now_ns = time::precise_time_ns();
let duration_s = 0.8f32;
let base_color = if cr.is_player() {
PLAYER_COLOR
} else {
match cr.race() {
Scout => SCOUT_COLOR,
Grunt => GRUNT_COLOR,
Heavy => HEAVY_COLOR,
Human => panic!(),
}
};
let color = base_color;
let since_s = (now_ns - cr.was_attacked_ns()) as f32 / BILLION;
let color = if since_s < duration_s {
let f = since_s / duration_s;
[
mix(1f32, color[0], f),
mix(0f32, color[1], f),
mix(0f32, color[2], f),
color[3],
]
} else {
color
};
let color = if !cr.is_alive() {
let since_s = (now_ns - cr.death_ns()) as f32 / BILLION;
let f = since_s / duration_s;
if f < 1.0 {
Some([
mix(color[0], FLOOR_COLOR[0], f),
mix(color[1], FLOOR_COLOR[1], f),
mix(color[2], FLOOR_COLOR[2], f),
color[3],
])
} else {
None
}
} else {
Some(color)
};
color
}
fn move_camera_to_destination(&mut self) {
self.camera_pos.finish_immediately();
self.camera_focus.finish_immediately();
}
fn set_player_pos(&mut self, pl: &Creature) {
let pos = *pl.pos();
if self.player_pos == pos {
return;
}
self.player_pos = pos;
let front = pos.p + pos.dir;
let (fx, fy) = point_to_coordinate(front);
let (x, y) = point_to_coordinate(pos.p);
let (dx, dy) = (fx - x, fy - y);
let how_much_behind = 5f32;
let how_much_front = 3f32;
let (dbx, dby) = (dx * how_much_behind, dy * how_much_behind);
let (dfx, dfy) = (dx * how_much_front, dy * how_much_front);
self.camera_pos.set_destination(Point3::new(x - dbx, y - dby, 8.0));
self.camera_focus.set_destination(Point3::new(x + dfx, y + dfy, 0.0));
}
fn update_movement(&mut self, dt : f32) {
self.camera_pos.update(dt);
self.camera_focus.update(dt);
}
fn update_camera(&mut self, renderer : &mut Renderer<GlCommandBuffer, GlDevice>) {
let view : AffineMatrix3<f32> = Transform::look_at(
&self.camera_pos.current,
&self.camera_focus.current,
&Vector3::unit_z(),
);
renderer.set_view(&view);
}
}
impl PistonUI {
pub fn new() -> (PistonUI, Window) {
let width = 800;
let height = 600;
let window = Window::new(
shader_version::opengl::OpenGL_3_2,
WindowSettings {
title: "Rustyhex".to_string(),
size: [width, height],
fullscreen: false,
exit_on_esc: true,
samples: 4,
}
);
let frame = gfx::Frame::new(width as u16, height as u16);
let device = gfx::GlDevice::new(|s| window.window.get_proc_address(s));
let renderer = Renderer::new(device, frame);
(PistonUI {
render_controller: RenderController::new(),
input_controller: InputController::new(),
renderer: renderer,
}, window)
}
fn game_update(&mut self, game : &mut GameState) {
loop {
if game.tick() {
match self.input_controller.pop_action() {
Some(action) => {
game.player.as_ref().map(|pl| pl.borrow_mut().action_set(action));
},
_ => {
break;
}
};
} else {
break;
}
}
match game.player {
Some(ref pl) => self.render_controller.set_player_pos(&*pl.borrow()),
None => {}
}
}
pub fn run (&mut self, window : Window, game : &mut GameState) {
game.update_player_los();
{
let ref pl = game.player.as_ref();
if pl.is_some() {
let pl = pl.unwrap();
self.render_controller.set_player_pos(&*pl.borrow());
self.render_controller.move_camera_to_destination();
let &PistonUI {
ref mut renderer,
ref mut render_controller,
..
} = self;
render_controller.update_camera(renderer);
}
}
let mut render_time = time::precise_time_ns();
let mut events = Events::new(window).set(Ups(60)).set(MaxFps(60));
for e in events {
match e {
Render(_) => {
let &PistonUI {
ref mut renderer,
ref mut render_controller,
..
} = self;
let t = time::precise_time_ns();
let dt = t - render_time;
render_time = t;
render_controller.update_movement(dt as f32 / BILLION as f32);
render_controller.update_camera(renderer);
renderer.clear();
render_controller.render_map(renderer, game);
renderer.end_frame();
},
Update(_) => {
self.game_update(game);
},
Input(i) => {
self.input_controller.push_input(i.clone());
}
}
}
}
}
| self.destination = dest;
}
| identifier_body |
piston.rs | // Copyright 2014 Dawid Ciężarkiewicz
// See LICENSE file for more information
use cgmath;
use cgmath::FixedArray;
use cgmath::{Matrix, Matrix4, Matrix3, Point3, Vector3, Vector4, ToMatrix4};
use cgmath::rad;
use cgmath::Point as CgPoint;
use cgmath::{Transform, AffineMatrix3};
use cgmath::Vector;
use creature::Creature;
use creature::{Grunt, Scout, Heavy, Human};
use device;
use device::draw::CommandBuffer;
use gfx::GlCommandBuffer;
use gfx::GlDevice;
use game::Action;
use game::GameState;
use game::{Run, Move, Turn, Melee, Wait};
use gfx;
use gfx::{Device, DeviceHelper};
use hex2d::{Forward, Backward, Left, Right, Direction, AbsoluteDirection};
use hex2d::{North, Position, Point};
use input::keyboard as key;
use map::{Wall, Sand, GlassWall, Floor};
use std;
use glfw_window::GlfwWindow as Window;
use std::collections::{RingBuf};
use std::num::{zero, one};
use time;
use obj;
use genmesh;
use genmesh::Indexer;
use shader_version;
use gfx::ToSlice;
use piston::{
Events,
Render,
Update,
Input,
WindowSettings,
};
use current::Set;
use event::{
Ups, MaxFps,
};
use piston::input::{
InputEvent,
Press,
Release,
Keyboard,
};
use std::mem::size_of;
#[vertex_format]
struct Vertex {
#[as_float]
#[name = "a_Pos"]
pos: [f32, ..3],
#[as_float]
#[name = "a_Normal"]
normal: [f32, ..3],
}
impl std::cmp::PartialEq for Vertex {
fn eq(&self, other: &Vertex) -> bool {
self.pos.as_slice() == other.pos.as_slice() &&
self.normal.as_slice() == other.normal.as_slice()
}
}
impl std::clone::Clone for Vertex {
fn clone(&self) -> Vertex {
Vertex {
pos: self.pos,
normal: self.normal
}
}
}
// The shader_param attribute makes sure the following struct can be used to
// pass parameters to a shader. Its argument is the name of the type that will
// be generated to represent your the program. Search for `Batch` below, to
// see how it's used.
#[shader_param(Batch)]
struct Params {
#[name = "u_Projection"]
projection: [[f32, ..4], ..4],
#[name = "u_View"]
view: [[f32, ..4], ..4],
#[name = "u_Model"]
model: [[f32, ..4], ..4],
#[name = "u_Color"]
color: [f32, ..4],
#[name = "u_LightDirection"]
light: [f32, ..3],
}
static VERTEX_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
in vec3 a_Pos;
in vec3 a_Normal;
smooth out vec4 v_Color;
uniform mat4 u_Projection;
uniform mat4 u_View;
uniform mat4 u_Model;
uniform vec4 u_Color;
uniform vec3 u_LightDirection;
void main() {
vec3 normal = normalize(vec3(u_Model * vec4(a_Normal, 0.0)));
float dot = max(dot(normal, u_LightDirection), 0.0);
v_Color = u_Color * (dot + 2) / 3;
gl_Position = u_Projection * u_View * u_Model * vec4(a_Pos, 1.0);
}
"
};
static FRAGMENT_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
smooth in vec4 v_Color;
out vec4 o_Color;
void main() {
o_Color = v_Color;
}
"
};
struct Renderer<C : device::draw::CommandBuffer, D: gfx::Device<C>> {
graphics: gfx::Graphics<D, C>,
tile_batch: Batch,
creature_batch: Batch,
projection: Matrix4<f32>,
view: Matrix4<f32>,
frame: gfx::Frame,
cd: gfx::ClearData,
}
type Color = [f32, ..4];
static BACKGROUND_COLOR: Color = [0.0f32, 0.0, 0.0, 1.0];
static PLAYER_COLOR : Color = [0.0f32, 0.0, 1.0, 1.0];
static WALL_COLOR : Color = [0.3f32, 0.2, 0.0, 1.0];
static GLASSWALL_COLOR : Color = [0.7f32, 0.7, 0.95, 1.0];
static SAND_COLOR : Color = [1.0f32, 1.0, 0.8, 1.0];
static FLOOR_COLOR : Color = [1.0f32, 0.9, 0.9, 1.0];
static SCOUT_COLOR : Color = [0.0f32, 0.8, 0.0, 1.0];
static GRUNT_COLOR : Color = [0.0f32, 0.6, 0.0, 1.0];
static HEAVY_COLOR : Color = [0.0f32, 0.4, 0.0, 1.0];
static WALL_HEIGHT : f32 = 0.3f32;
static HACK_PLAYER_KNOWS_ALL : bool = false;
static HACK_PLAYER_SEES_EVERYONE : bool = false;
fn grey_out(c : Color) -> Color {
let [r, g, b, a] = c;
[ (r+0.4f32)/4.0f32, (g + 0.4f32)/4.0f32, (b + 0.4f32)/4.0f32, a]
}
static BILLION : f32 = 1000000000f32;
static TAU : f32 = std::f32::consts::PI_2;
static TILE_OUTER_R : f32 = 1.0f32;
//static tile_inner_r : f32 = TILE_OUTER_R * 3f32.sqrt() / 2f32;
fn tile_inner_r() -> f32 {
TILE_OUTER_R * 3f32.sqrt() / 2f32
}
#[allow(dead_code)]
fn edge_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32
}
#[allow(dead_code)]
fn side_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32 + TAU / 12f32
}
fn dir_to_angle(d : AbsoluteDirection) -> f32 {
-(d.to_uint() as f32 * TAU) / 6.0f32
}
type IndexVector = Vec<u8>;
type VertexVector = Vec<Vertex>;
pub fn load_hex(path : &str) -> (IndexVector, VertexVector) {
let obj = obj::load(&Path::new(path)).unwrap();
let mut index_data : Vec<u8> = vec!();
let mut vertex_data : Vec<Vertex> = vec!();
{
let mut indexer = genmesh::LruIndexer::new(16, |_, v| {
vertex_data.push(v);
});
for o in obj.object_iter() {
for g in o.group_iter() {
for i in g.indices().iter() {
match i {
&genmesh::PolyTri(poly) => {
for i in vec!(poly.x, poly.y, poly.z).iter() {
match i {
&(v, _, Some(n)) => {
let normal = obj.normal()[n];
let vertex = obj.position()[v];
let index = indexer.index(
Vertex {
pos: vertex,
normal: normal,
}
);
index_data.push(index as u8);
},
_ => { panic!() }
}
}
},
_ => { panic!() },
}
}
}
}
}
(index_data, vertex_data)
}
pub fn point_to_coordinate(p : Point) -> (f32, f32) {
(
p.x as f32 * TILE_OUTER_R * 3f32 / 2f32,
-((p.y * 2) as f32 + p.x as f32) * tile_inner_r()
)
}
impl<C : CommandBuffer, D: gfx::Device<C>> Renderer<C, D> {
fn new(mut device: D, frame: gfx::Frame) -> Renderer<C, D> {
let (w, h) = (frame.width, frame.height);
let (tile_index_data, tile_vertex_data) = load_hex("assets/hex.obj");
let (creature_index_data, creature_vertex_data) = load_hex("assets/creature.obj");
let tile_mesh = device.create_mesh(tile_vertex_data.as_slice());
let creature_mesh = device.create_mesh(creature_vertex_data.as_slice());
let tile_slice = device.create_buffer_static::<u8>(tile_index_data.as_slice())
.to_slice(gfx::TriangleList);
let creature_slice = device.create_buffer_static::<u8>(creature_index_data.as_slice())
.to_slice(gfx::TriangleList);
let program = device.link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone())
.unwrap();
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true).multi_sample();
let mut graphics = gfx::Graphics::new(device);
let tile : Batch = graphics.make_batch(&program, &tile_mesh, tile_slice, &state).unwrap();
let creature : Batch = graphics.make_batch(&program, &creature_mesh, creature_slice, &state).unwrap();
let aspect = w as f32 / h as f32;
let proj = cgmath::perspective(cgmath::deg(45.0f32), aspect, 1.0, 100.0);
Renderer {
graphics: graphics,
frame: frame,
tile_batch : tile,
creature_batch : creature,
projection: proj,
view: proj,
cd: gfx::ClearData {
color: BACKGROUND_COLOR,
depth: 1.0,
stencil: 0,
},
}
}
fn render_params(&self, px : f32, py : f32, pz : f32, rotation : f32, color : Color) -> Params {
let mut model = Matrix4::identity();
model[3] = Vector4::new(px, py, pz, 1.0f32);
let rot = Matrix3::from_angle_z(rad(rotation)).to_matrix4();
//
//model = rot.rotate_vector(&model);
let model = model.mul_m(&rot);
Params {
projection: self.projection.into_fixed(),
view: self.view.into_fixed(),
color : color,
model: model.into_fixed(),
light: Vector3::unit_z().into_fixed(),
}
}
fn set_view(&mut self, view: &AffineMatrix3<f32>) {
self.view = view.mat;
}
/// Clear
fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
fn end_frame(&mut self) {
self.graphics.end_frame();
}
fn render_batch(&mut self, batch : &Batch, params : &Params) {
self.graphics.draw(batch, params, &self.frame);
}
pub fn render_tile(&mut self, p : Point, c : Color, elevate : bool) {
let (px, py) = point_to_coordinate(p);
let params = self.render_params(px, py, if elevate {WALL_HEIGHT} else {0.0}, 0.0, c);
let batch = self.tile_batch;
self.render_batch(&batch, ¶ms);
}
pub fn render_creature(&mut self, pos : Position, c : Color) {
let (px, py) = point_to_coordinate(pos.p);
let params = self.render_params(px, py, 0.3, dir_to_angle(pos.dir), c);
let batch = self.creature_batch;
self.render_batch(&batch, ¶ms);
}
}
/// linearly interpolate between two values
fn mix<F : FloatMath> (x : F, y : F, a : F) -> F {
assert!(a >= zero());
assert!(a <= one());
y * a + x * (one::<F>() - a)
}
struct SmoothMovement<T> {
speed : f32,
destination: T,
pub current: T,
}
impl<V : cgmath::EuclideanVector<f32>, T : cgmath::Point<f32, V>> SmoothMovement<T> {
pub fn new(speed : f32) -> SmoothMovement<T> {
SmoothMovement {
speed: speed,
destination: cgmath::Point::origin(),
current: cgmath::Point::origin(),
}
}
pub fn update(&mut self, dt : f32) {
let d = self.destination.sub_p(&self.current);
self.current.add_self_v(&d.mul_s(dt * self.speed));
}
pub fn set_destination(&mut self, dest : T) {
self.destination = dest;
}
pub fn finish_immediately(&mut self) {
self.current = self.destination.clone();
}
}
pub struct PistonUI {
renderer : Renderer<GlCommandBuffer, GlDevice>,
render_controller : RenderController,
input_controller: InputController,
}
pub struct RenderController {
player_pos: Position,
camera_pos : SmoothMovement<Point3<f32>>,
camera_focus : SmoothMovement<Point3<f32>>,
}
pub struct InputController {
shift_pressed: bool,
alt_pressed: bool,
ctrl_pressed: bool,
is_running: bool,
action_queue: RingBuf<Action>,
}
impl InputController {
pub fn new() -> InputController {
InputController {
shift_pressed: false,
alt_pressed: false,
ctrl_pressed: false,
is_running: true,
action_queue: RingBuf::new(),
}
}
fn move_or_run(&self, dir : Direction) -> Action {
if self.is_running {
Run(dir)
} else {
Move(dir)
}
}
fn push_move_or_run(&mut self, dir : Direction) {
let a = self.move_or_run(dir);
self.action_queue.push_back(a)
}
fn push_turn(&mut self, dir : Direction) {
self.action_queue.push_back(Turn(dir))
}
fn push_melee(&mut self, dir : Direction) {
self.action_queue.push_back(Melee(dir))
}
fn push_wait(&mut self) {
self.action_queue.push_back(Wait)
}
pub fn push_input(&mut self, i : InputEvent) {
match i {
Press(Keyboard(k)) => {
match (k, self.shift_pressed, self.ctrl_pressed) {
(key::LShift, _, _) => self.shift_pressed = true,
(key::RShift, _, _) => self.shift_pressed = true,
(key::LAlt, _, _) => self.alt_pressed = true,
(key::RAlt, _, _) => self.alt_pressed = true,
(key::LCtrl, _, _) => self.ctrl_pressed = true,
(key::RCtrl, _, _) => self.ctrl_pressed = true,
(key::R, _, _) => self.is_running = !self.is_running,
(key::K, _, false) => self.push_move_or_run(Forward),
(key::L, true, false) => self.push_move_or_run(Right),
(key::H, true, false) => self.push_move_or_run(Left),
(key::J, _, false) => self.push_move_or_run(Backward),
(key::L, false, false) => self.push_turn(Right),
(key::H, false, false) => self.push_turn(Left),
(key::K, _, true) => self.push_melee(Forward),
(key::L, _, true) => self.push_melee(Right),
(key::H, _, true) => self.push_melee(Left),
(key::Period, _, _) => self.push_wait(),
_ => { }
}
},
Release(Keyboard(k)) => {
match k {
key::LShift|key::RShift => {
self.shift_pressed = false
},
key::LAlt|key::RAlt => {
self.alt_pressed = false
},
key::LCtrl|key::RCtrl=> {
self.ctrl_pressed = false
},
_ => {}
}
},
_ => {}
}
}
pub fn pop_action(&mut self) -> Option<Action> {
self.action_queue.pop_front()
}
}
impl RenderController {
fn new() -> RenderController {
let cp = SmoothMovement::new(1.0f32);
let cf = SmoothMovement::new(2.0f32);
RenderController {
player_pos: Position::new(Point::new(0,0), North),
camera_pos: cp,
camera_focus: cf,
}
}
pub fn render_map(
&self,
renderer : &mut Renderer<GlCommandBuffer, GlDevice>, game : &GameState) {
let &GameState {
ref player,
..
} = game;
let player = player.as_ref().and_then(|pl| pl.try_borrow());
game.map.for_each_point(|ap| {
if player.as_ref().map_or(true, |pl| pl.knows(ap) || HACK_PLAYER_KNOWS_ALL) {
let tiletype = game.map.at(ap).tiletype;
let (color, elevate) = match tiletype {
Wall => (WALL_COLOR, true),
GlassWall => (GLASSWALL_COLOR, true),
Floor => (FLOOR_COLOR, false),
Sand => (SAND_COLOR, false),
};
let color = if player.as_ref().map_or(
false, |pl| !pl.sees(ap) && pl.is_alive()
) {
grey_out(color)
} else {
color
};
renderer.render_tile(ap, color, elevate);
};
});
for creature in game.creatures_iter() {
let creature = creature.borrow();
let ap = creature.pos().p;
if !player.as_ref().map_or(
true, |pl| pl.sees(ap) || HACK_PLAYER_SEES_EVERYONE
) {
continue;
}
match self.creature_color(&*creature) {
Some(color) => renderer.render_creature(*creature.pos(), color),
None => {}
}
};
}
fn creature_color(&self, cr : &Creature) -> Option<Color> {
let now_ns = time::precise_time_ns();
let duration_s = 0.8f32;
let base_color = if cr.is_player() {
PLAYER_COLOR
} else {
match cr.race() {
Scout => SCOUT_COLOR,
Grunt => GRUNT_COLOR,
Heavy => HEAVY_COLOR,
Human => panic!(),
}
};
let color = base_color;
let since_s = (now_ns - cr.was_attacked_ns()) as f32 / BILLION;
let color = if since_s < duration_s {
let f = since_s / duration_s;
[
mix(1f32, color[0], f),
mix(0f32, color[1], f),
mix(0f32, color[2], f),
color[3],
]
} else {
color
};
let color = if !cr.is_alive() {
let since_s = (now_ns - cr.death_ns()) as f32 / BILLION;
let f = since_s / duration_s;
if f < 1.0 {
Some([
mix(color[0], FLOOR_COLOR[0], f),
mix(color[1], FLOOR_COLOR[1], f),
mix(color[2], FLOOR_COLOR[2], f),
color[3],
])
} else {
None
}
} else {
Some(color)
};
color
}
fn move_camera_to_destination(&mut self) {
self.camera_pos.finish_immediately();
self.camera_focus.finish_immediately();
}
fn set_player_pos(&mut self, pl: &Creature) {
let pos = *pl.pos();
if self.player_pos == pos {
return;
}
self.player_pos = pos;
let front = pos.p + pos.dir;
let (fx, fy) = point_to_coordinate(front);
let (x, y) = point_to_coordinate(pos.p);
let (dx, dy) = (fx - x, fy - y);
let how_much_behind = 5f32;
let how_much_front = 3f32;
let (dbx, dby) = (dx * how_much_behind, dy * how_much_behind);
let (dfx, dfy) = (dx * how_much_front, dy * how_much_front);
self.camera_pos.set_destination(Point3::new(x - dbx, y - dby, 8.0));
self.camera_focus.set_destination(Point3::new(x + dfx, y + dfy, 0.0));
}
fn update_movement(&mut self, dt : f32) {
self.camera_pos.update(dt);
self.camera_focus.update(dt);
}
fn update_camera(&mut self, renderer : &mut Renderer<GlCommandBuffer, GlDevice>) {
let view : AffineMatrix3<f32> = Transform::look_at(
&self.camera_pos.current,
&self.camera_focus.current,
&Vector3::unit_z(),
);
renderer.set_view(&view);
}
}
impl PistonUI {
pub fn new() -> (PistonUI, Window) {
let width = 800;
let height = 600;
let window = Window::new(
shader_version::opengl::OpenGL_3_2,
WindowSettings {
title: "Rustyhex".to_string(),
size: [width, height],
fullscreen: false,
exit_on_esc: true,
samples: 4,
}
);
let frame = gfx::Frame::new(width as u16, height as u16);
let device = gfx::GlDevice::new(|s| window.window.get_proc_address(s));
let renderer = Renderer::new(device, frame);
(PistonUI {
render_controller: RenderController::new(),
input_controller: InputController::new(),
renderer: renderer,
}, window)
}
fn game_update(&mut self, game : &mut GameState) {
loop {
if game.tick() {
match self.input_controller.pop_action() {
Some(action) => {
game.player.as_ref().map(|pl| pl.borrow_mut().action_set(action));
},
_ => {
break;
}
};
} else {
break;
}
}
match game.player {
Some(ref pl) => self.render_controller.set_player_pos(&*pl.borrow()),
None => {} | }
}
pub fn run (&mut self, window : Window, game : &mut GameState) {
game.update_player_los();
{
let ref pl = game.player.as_ref();
if pl.is_some() {
let pl = pl.unwrap();
self.render_controller.set_player_pos(&*pl.borrow());
self.render_controller.move_camera_to_destination();
let &PistonUI {
ref mut renderer,
ref mut render_controller,
..
} = self;
render_controller.update_camera(renderer);
}
}
let mut render_time = time::precise_time_ns();
let mut events = Events::new(window).set(Ups(60)).set(MaxFps(60));
for e in events {
match e {
Render(_) => {
let &PistonUI {
ref mut renderer,
ref mut render_controller,
..
} = self;
let t = time::precise_time_ns();
let dt = t - render_time;
render_time = t;
render_controller.update_movement(dt as f32 / BILLION as f32);
render_controller.update_camera(renderer);
renderer.clear();
render_controller.render_map(renderer, game);
renderer.end_frame();
},
Update(_) => {
self.game_update(game);
},
Input(i) => {
self.input_controller.push_input(i.clone());
}
}
}
}
}
| conditional_block | |
EnaRest.py | __author__ = 'fshaw'
import gzip
import hashlib
import os
import uuid
import json
import jsonpickle
from chunked_upload.models import ChunkedUpload
from chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView
from django.conf import settings
from django.core import serializers
from django.core.files.base import ContentFile
from django.http import HttpResponse
from django.template.context_processors import csrf
from rest_framework.renderers import JSONRenderer
import web.apps.web_copo.schemas.utils.data_utils as d_utils
import web.apps.web_copo.utils.EnaUtils as u
from dal.broker_da import BrokerDA
from dal.copo_da import DataFile
from web.apps.web_copo.rest.models import CopoChunkedUpload
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
'''
'''
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
# this method is called for writing smaller files (<= 260MB) to disk, larger files use the
# upload method in ChunkedUpload class
from django.utils import timezone
# need to make a chunked upload record to store deails of the file
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
# file starts empty
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
# create output structure to pass back to jquery-upload
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
# retrieve incomplete file for user with this name
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(
'-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def get_partial_uploads(request):
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
|
def hash_upload(request):
# utility method to create an md5 hash of a given file path
# open uploaded file
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
# now hash opened file
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_hash")] = file_obj.hash
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
# utility method to examine a file and return meta-data to the frontend
output_dict = {'file_type': 'unknown', 'do_compress': False}
# get reference to file
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
# size threshold to determine if a file should be compressed
zip_threshold = 200000000 # size in bytes
# check if file is compressed
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
# check for file type
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else: # make file type same as extension
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]
except:
output_dict['file_type'] = 'unknown'
# add datafile schema
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
# ...and obtain the inserted record
profile_id = request.session['profile_id']
component = "datafile"
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_id")] = file_id
auto_fields[DataFile().get_qualified_field("file_type")] = output_dict['file_type']
auto_fields[DataFile().get_qualified_field("file_location")] = file_name
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field("name")] = chunked_upload.filename
# get default type from schema
type = [f for f in d_utils.get_copo_schema(component) if f.get("id").split(".")[-1] == "type"]
if type:
type = type[0]["default_value"]
auto_fields[DataFile().get_qualified_field("type")] = type
df = BrokerDA(context=dict(),
profile_id=profile_id,
component=component,
auto_fields=auto_fields,
visualize="last_record"
).do_save_edit().get("record_object", dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
# need to get a reference to the file to zip
file_id = request.GET['file_id']
print("zip started " + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
# get the name of the file to zip and change its suffix to .gz
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
output_file_name = file_obj.filename + '.gz'
try:
# open the file as gzip acrchive...set compression level
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
# write input file to gzip archive in n byte chunks
n = 100000000
for chunk in iter(lambda: src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
# now need to delete the old file and update the file record with the new file
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
# calculate new file size
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
# update filename
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
# update file size
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field("name")] = output_file_name
auto_fields[DataFile().get_qualified_field("file_location")] = new_file_name
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json')
| return HttpResponse(jsonpickle.encode('')) | conditional_block |
EnaRest.py | __author__ = 'fshaw'
import gzip
import hashlib
import os
import uuid
import json
import jsonpickle
from chunked_upload.models import ChunkedUpload
from chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView
from django.conf import settings
from django.core import serializers
from django.core.files.base import ContentFile
from django.http import HttpResponse
from django.template.context_processors import csrf
from rest_framework.renderers import JSONRenderer
import web.apps.web_copo.schemas.utils.data_utils as d_utils
import web.apps.web_copo.utils.EnaUtils as u
from dal.broker_da import BrokerDA
from dal.copo_da import DataFile
from web.apps.web_copo.rest.models import CopoChunkedUpload
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
'''
'''
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
# this method is called for writing smaller files (<= 260MB) to disk, larger files use the
# upload method in ChunkedUpload class
from django.utils import timezone
# need to make a chunked upload record to store deails of the file
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
# file starts empty
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
# create output structure to pass back to jquery-upload
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
# retrieve incomplete file for user with this name
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(
'-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def get_partial_uploads(request):
|
def hash_upload(request):
# utility method to create an md5 hash of a given file path
# open uploaded file
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
# now hash opened file
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_hash")] = file_obj.hash
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
# utility method to examine a file and return meta-data to the frontend
output_dict = {'file_type': 'unknown', 'do_compress': False}
# get reference to file
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
# size threshold to determine if a file should be compressed
zip_threshold = 200000000 # size in bytes
# check if file is compressed
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
# check for file type
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else: # make file type same as extension
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]
except:
output_dict['file_type'] = 'unknown'
# add datafile schema
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
# ...and obtain the inserted record
profile_id = request.session['profile_id']
component = "datafile"
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_id")] = file_id
auto_fields[DataFile().get_qualified_field("file_type")] = output_dict['file_type']
auto_fields[DataFile().get_qualified_field("file_location")] = file_name
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field("name")] = chunked_upload.filename
# get default type from schema
type = [f for f in d_utils.get_copo_schema(component) if f.get("id").split(".")[-1] == "type"]
if type:
type = type[0]["default_value"]
auto_fields[DataFile().get_qualified_field("type")] = type
df = BrokerDA(context=dict(),
profile_id=profile_id,
component=component,
auto_fields=auto_fields,
visualize="last_record"
).do_save_edit().get("record_object", dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
# need to get a reference to the file to zip
file_id = request.GET['file_id']
print("zip started " + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
# get the name of the file to zip and change its suffix to .gz
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
output_file_name = file_obj.filename + '.gz'
try:
# open the file as gzip acrchive...set compression level
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
# write input file to gzip archive in n byte chunks
n = 100000000
for chunk in iter(lambda: src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
# now need to delete the old file and update the file record with the new file
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
# calculate new file size
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
# update filename
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
# update file size
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field("name")] = output_file_name
auto_fields[DataFile().get_qualified_field("file_location")] = new_file_name
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json')
| user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode('')) | identifier_body |
EnaRest.py | __author__ = 'fshaw'
import gzip
import hashlib
import os
import uuid
import json
import jsonpickle
from chunked_upload.models import ChunkedUpload
from chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView
from django.conf import settings
from django.core import serializers
from django.core.files.base import ContentFile
from django.http import HttpResponse
from django.template.context_processors import csrf
from rest_framework.renderers import JSONRenderer
import web.apps.web_copo.schemas.utils.data_utils as d_utils
import web.apps.web_copo.utils.EnaUtils as u
from dal.broker_da import BrokerDA
from dal.copo_da import DataFile
from web.apps.web_copo.rest.models import CopoChunkedUpload
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
'''
'''
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
# this method is called for writing smaller files (<= 260MB) to disk, larger files use the
# upload method in ChunkedUpload class
from django.utils import timezone
# need to make a chunked upload record to store deails of the file
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
# file starts empty
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
# create output structure to pass back to jquery-upload
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
# retrieve incomplete file for user with this name
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(
'-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def get_partial_uploads(request):
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def hash_upload(request):
# utility method to create an md5 hash of a given file path
# open uploaded file
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
# now hash opened file
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
| # update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_hash")] = file_obj.hash
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
# utility method to examine a file and return meta-data to the frontend
output_dict = {'file_type': 'unknown', 'do_compress': False}
# get reference to file
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
# size threshold to determine if a file should be compressed
zip_threshold = 200000000 # size in bytes
# check if file is compressed
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
# check for file type
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else: # make file type same as extension
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]
except:
output_dict['file_type'] = 'unknown'
# add datafile schema
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
# ...and obtain the inserted record
profile_id = request.session['profile_id']
component = "datafile"
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_id")] = file_id
auto_fields[DataFile().get_qualified_field("file_type")] = output_dict['file_type']
auto_fields[DataFile().get_qualified_field("file_location")] = file_name
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field("name")] = chunked_upload.filename
# get default type from schema
type = [f for f in d_utils.get_copo_schema(component) if f.get("id").split(".")[-1] == "type"]
if type:
type = type[0]["default_value"]
auto_fields[DataFile().get_qualified_field("type")] = type
df = BrokerDA(context=dict(),
profile_id=profile_id,
component=component,
auto_fields=auto_fields,
visualize="last_record"
).do_save_edit().get("record_object", dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
# need to get a reference to the file to zip
file_id = request.GET['file_id']
print("zip started " + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
# get the name of the file to zip and change its suffix to .gz
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
output_file_name = file_obj.filename + '.gz'
try:
# open the file as gzip acrchive...set compression level
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
# write input file to gzip archive in n byte chunks
n = 100000000
for chunk in iter(lambda: src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
# now need to delete the old file and update the file record with the new file
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
# calculate new file size
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
# update filename
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
# update file size
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field("name")] = output_file_name
auto_fields[DataFile().get_qualified_field("file_location")] = new_file_name
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json') | file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
| random_line_split |
EnaRest.py | __author__ = 'fshaw'
import gzip
import hashlib
import os
import uuid
import json
import jsonpickle
from chunked_upload.models import ChunkedUpload
from chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView
from django.conf import settings
from django.core import serializers
from django.core.files.base import ContentFile
from django.http import HttpResponse
from django.template.context_processors import csrf
from rest_framework.renderers import JSONRenderer
import web.apps.web_copo.schemas.utils.data_utils as d_utils
import web.apps.web_copo.utils.EnaUtils as u
from dal.broker_da import BrokerDA
from dal.copo_da import DataFile
from web.apps.web_copo.rest.models import CopoChunkedUpload
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
'''
'''
class | (HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
# this method is called for writing smaller files (<= 260MB) to disk, larger files use the
# upload method in ChunkedUpload class
from django.utils import timezone
# need to make a chunked upload record to store deails of the file
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
# file starts empty
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
# create output structure to pass back to jquery-upload
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
# retrieve incomplete file for user with this name
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(
'-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def get_partial_uploads(request):
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def hash_upload(request):
# utility method to create an md5 hash of a given file path
# open uploaded file
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
# now hash opened file
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_hash")] = file_obj.hash
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
# utility method to examine a file and return meta-data to the frontend
output_dict = {'file_type': 'unknown', 'do_compress': False}
# get reference to file
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
# size threshold to determine if a file should be compressed
zip_threshold = 200000000 # size in bytes
# check if file is compressed
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
# check for file type
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else: # make file type same as extension
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]
except:
output_dict['file_type'] = 'unknown'
# add datafile schema
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
# ...and obtain the inserted record
profile_id = request.session['profile_id']
component = "datafile"
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_id")] = file_id
auto_fields[DataFile().get_qualified_field("file_type")] = output_dict['file_type']
auto_fields[DataFile().get_qualified_field("file_location")] = file_name
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field("name")] = chunked_upload.filename
# get default type from schema
type = [f for f in d_utils.get_copo_schema(component) if f.get("id").split(".")[-1] == "type"]
if type:
type = type[0]["default_value"]
auto_fields[DataFile().get_qualified_field("type")] = type
df = BrokerDA(context=dict(),
profile_id=profile_id,
component=component,
auto_fields=auto_fields,
visualize="last_record"
).do_save_edit().get("record_object", dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
# need to get a reference to the file to zip
file_id = request.GET['file_id']
print("zip started " + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
# get the name of the file to zip and change its suffix to .gz
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
output_file_name = file_obj.filename + '.gz'
try:
# open the file as gzip acrchive...set compression level
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
# write input file to gzip archive in n byte chunks
n = 100000000
for chunk in iter(lambda: src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
# now need to delete the old file and update the file record with the new file
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
# calculate new file size
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
# update filename
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
# update file size
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field("name")] = output_file_name
auto_fields[DataFile().get_qualified_field("file_location")] = new_file_name
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json')
| JSONResponse | identifier_name |
context.rs | // #![warn(missing_docs)]
use std::sync::{
atomic::{AtomicU32, Ordering::SeqCst},
Arc,
};
use crate::{
animation_manager::AnimationManager,
data::output::Output,
frame_state::FrameState,
input_state::*,
layers::GraphicLayers,
mutex::{Mutex, MutexGuard},
*,
};
use epaint::{stats::*, text::Fonts, *};
// ----------------------------------------------------------------------------
/// A wrapper around [`Arc`](std::sync::Arc)`<`[`Context`]`>`.
/// This is how you will normally create and access a [`Context`].
///
/// Almost all methods are marked `&self`, `Context` has interior mutability (protected by mutexes).
///
/// [`CtxRef`] is cheap to clone, and any clones refers to the same mutable data.
///
/// # Example:
///
/// ``` no_run
/// # fn handle_output(_: egui::Output) {}
/// # fn paint(_: Vec<egui::ClippedMesh>) {}
/// let mut ctx = egui::CtxRef::default();
///
/// // Game loop:
/// loop {
/// let raw_input = egui::RawInput::default();
/// ctx.begin_frame(raw_input);
///
/// egui::CentralPanel::default().show(&ctx, |ui| {
/// ui.label("Hello world!");
/// if ui.button("Click me").clicked() {
/// /* take some action here */
/// }
/// });
///
/// let (output, shapes) = ctx.end_frame();
/// let clipped_meshes = ctx.tessellate(shapes); // create triangles to paint
/// handle_output(output);
/// paint(clipped_meshes);
/// }
/// ```
///
#[derive(Clone)]
pub struct CtxRef(std::sync::Arc<Context>);
impl std::ops::Deref for CtxRef {
type Target = Context;
fn deref(&self) -> &Context {
&*self.0
}
}
impl AsRef<Context> for CtxRef {
fn | (&self) -> &Context {
self.0.as_ref()
}
}
impl std::borrow::Borrow<Context> for CtxRef {
fn borrow(&self) -> &Context {
self.0.borrow()
}
}
impl std::cmp::PartialEq for CtxRef {
fn eq(&self, other: &CtxRef) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
impl Default for CtxRef {
fn default() -> Self {
Self(Arc::new(Context {
// Start with painting an extra frame to compensate for some widgets
// that take two frames before they "settle":
repaint_requests: AtomicU32::new(1),
..Context::default()
}))
}
}
impl CtxRef {
/// Call at the start of every frame. Match with a call to [`Context::end_frame`].
///
/// This will modify the internal reference to point to a new generation of [`Context`].
/// Any old clones of this [`CtxRef`] will refer to the old [`Context`], which will not get new input.
///
/// Put your widgets into a [`SidePanel`], [`TopBottomPanel`], [`CentralPanel`], [`Window`] or [`Area`].
pub fn begin_frame(&mut self, new_input: RawInput) {
let mut self_: Context = (*self.0).clone();
self_.begin_frame_mut(new_input);
*self = Self(Arc::new(self_));
}
// ---------------------------------------------------------------------
/// If the given [`Id`] is not unique, an error will be printed at the given position.
/// Call this for [`Id`]:s that need interaction or persistence.
pub(crate) fn register_interaction_id(&self, id: Id, new_rect: Rect) {
let prev_rect = self.frame_state().used_ids.insert(id, new_rect);
if let Some(prev_rect) = prev_rect {
// it is ok to reuse the same ID for e.g. a frame around a widget,
// or to check for interaction with the same widget twice:
if prev_rect.expand(0.1).contains_rect(new_rect)
|| new_rect.expand(0.1).contains_rect(prev_rect)
{
return;
}
let show_error = |pos: Pos2, text: String| {
let painter = self.debug_painter();
let rect = painter.error(pos, text);
if let Some(pointer_pos) = self.input.pointer.hover_pos() {
if rect.contains(pointer_pos) {
painter.error(
rect.left_bottom() + vec2(2.0, 4.0),
"ID clashes happens when things like Windows or CollapsingHeaders share names,\n\
or when things like ScrollAreas and Resize areas aren't given unique id_source:s.",
);
}
}
};
let id_str = id.short_debug_format();
if prev_rect.min.distance(new_rect.min) < 4.0 {
show_error(new_rect.min, format!("Double use of ID {}", id_str));
} else {
show_error(prev_rect.min, format!("First use of ID {}", id_str));
show_error(new_rect.min, format!("Second use of ID {}", id_str));
}
}
}
// ---------------------------------------------------------------------
/// Use `ui.interact` instead
#[allow(clippy::too_many_arguments)]
pub(crate) fn interact(
&self,
clip_rect: Rect,
item_spacing: Vec2,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
) -> Response {
let gap = 0.5; // Just to make sure we don't accidentally hover two things at once (a small eps should be sufficient).
// Make it easier to click things:
let interact_rect = rect.expand2(
(0.5 * item_spacing - Vec2::splat(gap))
.at_least(Vec2::splat(0.0))
.at_most(Vec2::splat(5.0)),
); // make it easier to click
let hovered = self.rect_contains_pointer(layer_id, clip_rect.intersect(interact_rect));
self.interact_with_hovered(layer_id, id, rect, sense, enabled, hovered)
}
/// You specify if a thing is hovered, and the function gives a `Response`.
pub(crate) fn interact_with_hovered(
&self,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
hovered: bool,
) -> Response {
let hovered = hovered && enabled; // can't even hover disabled widgets
let mut response = Response {
ctx: self.clone(),
layer_id,
id,
rect,
sense,
enabled,
hovered,
clicked: Default::default(),
double_clicked: Default::default(),
dragged: false,
drag_released: false,
is_pointer_button_down_on: false,
interact_pointer_pos: None,
changed: false, // must be set by the widget itself
};
if !enabled || !sense.focusable || !layer_id.allow_interaction() {
// Not interested or allowed input:
self.memory().surrender_focus(id);
return response;
}
// We only want to focus labels if the screen reader is on.
let interested_in_focus =
sense.interactive() || sense.focusable && self.memory().options.screen_reader;
if interested_in_focus {
self.memory().interested_in_focus(id);
}
if sense.click
&& response.has_focus()
&& (self.input().key_pressed(Key::Space) || self.input().key_pressed(Key::Enter))
{
// Space/enter works like a primary click for e.g. selected buttons
response.clicked[PointerButton::Primary as usize] = true;
}
self.register_interaction_id(id, rect);
if sense.click || sense.drag {
let mut memory = self.memory();
memory.interaction.click_interest |= hovered && sense.click;
memory.interaction.drag_interest |= hovered && sense.drag;
response.dragged = memory.interaction.drag_id == Some(id);
response.is_pointer_button_down_on =
memory.interaction.click_id == Some(id) || response.dragged;
for pointer_event in &self.input.pointer.pointer_events {
match pointer_event {
PointerEvent::Moved(_) => {}
PointerEvent::Pressed(_) => {
if hovered {
if sense.click && memory.interaction.click_id.is_none() {
// potential start of a click
memory.interaction.click_id = Some(id);
response.is_pointer_button_down_on = true;
}
// HACK: windows have low priority on dragging.
// This is so that if you drag a slider in a window,
// the slider will steal the drag away from the window.
// This is needed because we do window interaction first (to prevent frame delay),
// and then do content layout.
if sense.drag
&& (memory.interaction.drag_id.is_none()
|| memory.interaction.drag_is_window)
{
// potential start of a drag
memory.interaction.drag_id = Some(id);
memory.interaction.drag_is_window = false;
memory.window_interaction = None; // HACK: stop moving windows (if any)
response.is_pointer_button_down_on = true;
response.dragged = true;
}
}
}
PointerEvent::Released(click) => {
response.drag_released = response.dragged;
response.dragged = false;
if hovered && response.is_pointer_button_down_on {
if let Some(click) = click {
let clicked = hovered && response.is_pointer_button_down_on;
response.clicked[click.button as usize] = clicked;
response.double_clicked[click.button as usize] =
clicked && click.is_double();
}
}
}
}
}
}
if response.is_pointer_button_down_on {
response.interact_pointer_pos = self.input().pointer.interact_pos();
}
if self.input.pointer.any_down() {
response.hovered &= response.is_pointer_button_down_on; // we don't hover widgets while interacting with *other* widgets
}
if response.has_focus() && response.clicked_elsewhere() {
self.memory().surrender_focus(id);
}
response
}
/// Get a full-screen painter for a new or existing layer
pub fn layer_painter(&self, layer_id: LayerId) -> Painter {
Painter::new(self.clone(), layer_id, self.input.screen_rect())
}
/// Paint on top of everything else
pub fn debug_painter(&self) -> Painter {
Self::layer_painter(self, LayerId::debug())
}
}
// ----------------------------------------------------------------------------
/// This is the first thing you need when working with egui. Create using [`CtxRef`].
///
/// Contains the [`InputState`], [`Memory`], [`Output`], and more.
///
/// Your handle to Egui.
///
/// Almost all methods are marked `&self`, `Context` has interior mutability (protected by mutexes).
/// Multi-threaded access to a [`Context`] is behind the feature flag `multi_threaded`.
/// Normally you'd always do all ui work on one thread, or perhaps use multiple contexts,
/// but if you really want to access the same Context from multiple threads, it *SHOULD* be fine,
/// but you are likely the first person to try it.
#[derive(Default)]
pub struct Context {
// We clone the Context each frame so we can set a new `input`.
// This is so we can avoid a mutex lock to access the `InputState`.
// This means everything else needs to be behind an Arc.
// We can probably come up with a nicer design.
//
/// None until first call to `begin_frame`.
fonts: Option<Arc<Fonts>>,
memory: Arc<Mutex<Memory>>,
animation_manager: Arc<Mutex<AnimationManager>>,
input: InputState,
/// State that is collected during a frame and then cleared
frame_state: Arc<Mutex<FrameState>>,
// The output of a frame:
graphics: Arc<Mutex<GraphicLayers>>,
output: Arc<Mutex<Output>>,
paint_stats: Arc<Mutex<PaintStats>>,
/// While positive, keep requesting repaints. Decrement at the end of each frame.
repaint_requests: AtomicU32,
}
impl Clone for Context {
fn clone(&self) -> Self {
Context {
fonts: self.fonts.clone(),
memory: self.memory.clone(),
animation_manager: self.animation_manager.clone(),
input: self.input.clone(),
frame_state: self.frame_state.clone(),
graphics: self.graphics.clone(),
output: self.output.clone(),
paint_stats: self.paint_stats.clone(),
repaint_requests: self.repaint_requests.load(SeqCst).into(),
}
}
}
impl Context {
/// How much space is still available after panels has been added.
/// This is the "background" area, what egui doesn't cover with panels (but may cover with windows).
/// This is also the area to which windows are constrained.
pub fn available_rect(&self) -> Rect {
self.frame_state.lock().available_rect()
}
/// Stores all the egui state.
/// If you want to store/restore egui, serialize this.
pub fn memory(&self) -> MutexGuard<'_, Memory> {
self.memory.lock()
}
pub(crate) fn graphics(&self) -> MutexGuard<'_, GraphicLayers> {
self.graphics.lock()
}
/// What egui outputs each frame.
pub fn output(&self) -> MutexGuard<'_, Output> {
self.output.lock()
}
pub(crate) fn frame_state(&self) -> MutexGuard<'_, FrameState> {
self.frame_state.lock()
}
/// Call this if there is need to repaint the UI, i.e. if you are showing an animation.
/// If this is called at least once in a frame, then there will be another frame right after this.
/// Call as many times as you wish, only one repaint will be issued.
pub fn request_repaint(&self) {
// request two frames of repaint, just to cover some corner cases (frame delays):
let times_to_repaint = 2;
self.repaint_requests.store(times_to_repaint, SeqCst);
}
#[inline(always)]
pub fn input(&self) -> &InputState {
&self.input
}
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn fonts(&self) -> &Fonts {
&*self
.fonts
.as_ref()
.expect("No fonts available until first call to CtxRef::begin_frame()")
}
/// The egui texture, containing font characters etc.
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn texture(&self) -> Arc<epaint::Texture> {
self.fonts().texture()
}
/// Tell `egui` which fonts to use.
///
/// The default `egui` fonts only support latin and cyrillic alphabets,
/// but you can call this to install additional fonts that support e.g. korean characters.
///
/// The new fonts will become active at the start of the next frame.
pub fn set_fonts(&self, font_definitions: FontDefinitions) {
if let Some(current_fonts) = &self.fonts {
// NOTE: this comparison is expensive since it checks TTF data for equality
if current_fonts.definitions() == &font_definitions {
return; // no change - save us from reloading font textures
}
}
self.memory().new_font_definitions = Some(font_definitions);
}
/// The [`Style`] used by all subsequent windows, panels etc.
pub fn style(&self) -> Arc<Style> {
self.memory().options.style.clone()
}
/// The [`Style`] used by all new windows, panels etc.
///
/// You can also use [`Ui::style_mut`] to change the style of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// let mut style: egui::Style = (*ctx.style()).clone();
/// style.spacing.item_spacing = egui::vec2(10.0, 20.0);
/// ctx.set_style(style);
/// ```
pub fn set_style(&self, style: impl Into<Arc<Style>>) {
self.memory().options.style = style.into();
}
/// The [`Visuals`] used by all subsequent windows, panels etc.
///
/// You can also use [`Ui::visuals_mut`] to change the visuals of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// ctx.set_visuals(egui::Visuals::light()); // Switch to light mode
/// ```
pub fn set_visuals(&self, visuals: crate::Visuals) {
std::sync::Arc::make_mut(&mut self.memory().options.style).visuals = visuals;
}
/// The number of physical pixels for each logical point.
#[inline(always)]
pub fn pixels_per_point(&self) -> f32 {
self.input.pixels_per_point()
}
/// Set the number of physical pixels for each logical point.
/// Will become active at the start of the next frame.
///
/// Note that this may be overwritten by input from the integration via [`RawInput::pixels_per_point`].
/// For instance, when using `egui_web` the browsers native zoom level will always be used.
pub fn set_pixels_per_point(&self, pixels_per_point: f32) {
self.memory().new_pixels_per_point = Some(pixels_per_point);
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_to_pixel(&self, point: f32) -> f32 {
let pixels_per_point = self.pixels_per_point();
(point * pixels_per_point).round() / pixels_per_point
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_pos_to_pixels(&self, pos: Pos2) -> Pos2 {
pos2(self.round_to_pixel(pos.x), self.round_to_pixel(pos.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_vec_to_pixels(&self, vec: Vec2) -> Vec2 {
vec2(self.round_to_pixel(vec.x), self.round_to_pixel(vec.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_rect_to_pixels(&self, rect: Rect) -> Rect {
Rect {
min: self.round_pos_to_pixels(rect.min),
max: self.round_pos_to_pixels(rect.max),
}
}
// ---------------------------------------------------------------------
/// Constrain the position of a window/area so it fits within the provided boundary.
///
/// If area is `None`, will constrain to [`Self::available_rect`].
pub(crate) fn constrain_window_rect_to_area(&self, window: Rect, area: Option<Rect>) -> Rect {
let mut area = area.unwrap_or_else(|| self.available_rect());
if window.width() > area.width() {
// Allow overlapping side bars.
// This is important for small screens, e.g. mobiles running the web demo.
area.max.x = self.input().screen_rect().max.x;
area.min.x = self.input().screen_rect().min.x;
}
if window.height() > area.height() {
// Allow overlapping top/bottom bars:
area.max.y = self.input().screen_rect().max.y;
area.min.y = self.input().screen_rect().min.y;
}
let mut pos = window.min;
// Constrain to screen, unless window is too large to fit:
let margin_x = (window.width() - area.width()).at_least(0.0);
let margin_y = (window.height() - area.height()).at_least(0.0);
pos.x = pos.x.at_most(area.right() + margin_x - window.width()); // move left if needed
pos.x = pos.x.at_least(area.left() - margin_x); // move right if needed
pos.y = pos.y.at_most(area.bottom() + margin_y - window.height()); // move right if needed
pos.y = pos.y.at_least(area.top() - margin_y); // move down if needed
pos = self.round_pos_to_pixels(pos);
Rect::from_min_size(pos, window.size())
}
// ---------------------------------------------------------------------
fn begin_frame_mut(&mut self, new_raw_input: RawInput) {
self.memory().begin_frame(&self.input, &new_raw_input);
let mut input = std::mem::take(&mut self.input);
if let Some(new_pixels_per_point) = self.memory().new_pixels_per_point.take() {
input.pixels_per_point = new_pixels_per_point;
}
self.input = input.begin_frame(new_raw_input);
self.frame_state.lock().begin_frame(&self.input);
{
// Load new fonts if required:
let new_font_definitions = self.memory().new_font_definitions.take();
let pixels_per_point = self.input.pixels_per_point();
let pixels_per_point_changed = match &self.fonts {
None => true,
Some(current_fonts) => {
(current_fonts.pixels_per_point() - pixels_per_point).abs() > 1e-3
}
};
if self.fonts.is_none() || new_font_definitions.is_some() || pixels_per_point_changed {
self.fonts = Some(Arc::new(Fonts::new(
pixels_per_point,
new_font_definitions.unwrap_or_else(|| {
self.fonts
.as_ref()
.map(|font| font.definitions().clone())
.unwrap_or_default()
}),
)));
}
}
// Ensure we register the background area so panels and background ui can catch clicks:
let screen_rect = self.input.screen_rect();
self.memory().areas.set_state(
LayerId::background(),
containers::area::State {
pos: screen_rect.min,
size: screen_rect.size(),
interactable: true,
},
);
}
/// Call at the end of each frame.
/// Returns what has happened this frame [`crate::Output`] as well as what you need to paint.
/// You can transform the returned shapes into triangles with a call to [`Context::tessellate`].
#[must_use]
pub fn end_frame(&self) -> (Output, Vec<ClippedShape>) {
if self.input.wants_repaint() {
self.request_repaint();
}
self.memory()
.end_frame(&self.input, &self.frame_state().used_ids);
self.fonts().end_frame();
let mut output: Output = std::mem::take(&mut self.output());
if self.repaint_requests.load(SeqCst) > 0 {
self.repaint_requests.fetch_sub(1, SeqCst);
output.needs_repaint = true;
}
let shapes = self.drain_paint_lists();
(output, shapes)
}
fn drain_paint_lists(&self) -> Vec<ClippedShape> {
let memory = self.memory();
self.graphics().drain(memory.areas.order()).collect()
}
/// Tessellate the given shapes into triangle meshes.
pub fn tessellate(&self, shapes: Vec<ClippedShape>) -> Vec<ClippedMesh> {
// A tempting optimization is to reuse the tessellation from last frame if the
// shapes are the same, but just comparing the shapes takes about 50% of the time
// it takes to tessellate them, so it is not a worth optimization.
let mut tessellation_options = self.memory().options.tessellation_options;
tessellation_options.pixels_per_point = self.pixels_per_point();
tessellation_options.aa_size = 1.0 / self.pixels_per_point();
let paint_stats = PaintStats::from_shapes(&shapes);
let clipped_meshes = tessellator::tessellate_shapes(
shapes,
tessellation_options,
self.fonts().texture().size(),
);
*self.paint_stats.lock() = paint_stats.with_clipped_meshes(&clipped_meshes);
clipped_meshes
}
// ---------------------------------------------------------------------
/// How much space is used by panels and windows.
pub fn used_rect(&self) -> Rect {
let mut used = self.frame_state().used_by_panels;
for window in self.memory().areas.visible_windows() {
used = used.union(window.rect());
}
used
}
/// How much space is used by panels and windows.
/// You can shrink your egui area to this size and still fit all egui components.
pub fn used_size(&self) -> Vec2 {
self.used_rect().max - Pos2::new(0.0, 0.0)
}
// ---------------------------------------------------------------------
/// Is the pointer (mouse/touch) over any egui area?
pub fn is_pointer_over_area(&self) -> bool {
if let Some(pointer_pos) = self.input.pointer.interact_pos() {
if let Some(layer) = self.layer_id_at(pointer_pos) {
if layer.order == Order::Background {
!self.frame_state().unused_rect.contains(pointer_pos)
} else {
true
}
} else {
false
}
} else {
false
}
}
/// True if egui is currently interested in the pointer (mouse or touch).
/// Could be the pointer is hovering over a [`Window`] or the user is dragging a widget.
/// If `false`, the pointer is outside of any egui area and so
/// you may be interested in what it is doing (e.g. controlling your game).
/// Returns `false` if a drag started outside of egui and then moved over an egui area.
pub fn wants_pointer_input(&self) -> bool {
self.is_using_pointer() || (self.is_pointer_over_area() && !self.input().pointer.any_down())
}
/// Is egui currently using the pointer position (e.g. dragging a slider).
/// NOTE: this will return `false` if the pointer is just hovering over an egui area.
pub fn is_using_pointer(&self) -> bool {
self.memory().interaction.is_using_pointer()
}
/// If `true`, egui is currently listening on text input (e.g. typing text in a [`TextEdit`]).
pub fn wants_keyboard_input(&self) -> bool {
self.memory().interaction.focus.focused().is_some()
}
// ---------------------------------------------------------------------
/// Move all the graphics at the given layer.
/// Can be used to implement drag-and-drop (see relevant demo).
pub fn translate_layer(&self, layer_id: LayerId, delta: Vec2) {
if delta != Vec2::ZERO {
self.graphics().list(layer_id).lock().translate(delta);
}
}
/// Top-most layer at the given position.
pub fn layer_id_at(&self, pos: Pos2) -> Option<LayerId> {
let resize_grab_radius_side = self.style().interaction.resize_grab_radius_side;
self.memory().layer_id_at(pos, resize_grab_radius_side)
}
pub(crate) fn rect_contains_pointer(&self, layer_id: LayerId, rect: Rect) -> bool {
if let Some(pointer_pos) = self.input.pointer.interact_pos() {
rect.contains(pointer_pos) && self.layer_id_at(pointer_pos) == Some(layer_id)
} else {
false
}
}
// ---------------------------------------------------------------------
/// Wether or not to debug widget layout on hover.
pub fn debug_on_hover(&self) -> bool {
self.memory().options.style.debug.debug_on_hover
}
/// Turn on/off wether or not to debug widget layout on hover.
pub fn set_debug_on_hover(&self, debug_on_hover: bool) {
let mut style = (*self.memory().options.style).clone();
style.debug.debug_on_hover = debug_on_hover;
self.set_style(style);
}
}
/// ## Animation
impl Context {
/// Returns a value in the range [0, 1], to indicate "how on" this thing is.
///
/// The first time called it will return `if value { 1.0 } else { 0.0 }`
/// Calling this with `value = true` will always yield a number larger than zero, quickly going towards one.
/// Calling this with `value = false` will always yield a number less than one, quickly going towards zero.
///
/// The function will call [`Self::request_repaint()`] when appropriate.
pub fn animate_bool(&self, id: Id, value: bool) -> f32 {
let animation_time = self.style().animation_time;
let animated_value =
self.animation_manager
.lock()
.animate_bool(&self.input, animation_time, id, value);
let animation_in_progress = 0.0 < animated_value && animated_value < 1.0;
if animation_in_progress {
self.request_repaint();
}
animated_value
}
/// Clear memory of any animations.
pub fn clear_animations(&self) {
*self.animation_manager.lock() = Default::default();
}
}
impl Context {
pub fn settings_ui(&self, ui: &mut Ui) {
use crate::containers::*;
CollapsingHeader::new("🎑 Style")
.default_open(true)
.show(ui, |ui| {
self.style_ui(ui);
});
CollapsingHeader::new("🔠 Fonts")
.default_open(false)
.show(ui, |ui| {
let mut font_definitions = self.fonts().definitions().clone();
font_definitions.ui(ui);
self.fonts().texture().ui(ui);
self.set_fonts(font_definitions);
});
CollapsingHeader::new("✒ Painting")
.default_open(true)
.show(ui, |ui| {
let mut tessellation_options = self.memory().options.tessellation_options;
tessellation_options.ui(ui);
ui.vertical_centered(|ui| reset_button(ui, &mut tessellation_options));
self.memory().options.tessellation_options = tessellation_options;
});
}
pub fn inspection_ui(&self, ui: &mut Ui) {
use crate::containers::*;
crate::trace!(ui);
ui.label(format!("Is using pointer: {}", self.is_using_pointer()))
.on_hover_text(
"Is egui currently using the pointer actively (e.g. dragging a slider)?",
);
ui.label(format!("Wants pointer input: {}", self.wants_pointer_input()))
.on_hover_text("Is egui currently interested in the location of the pointer (either because it is in use, or because it is hovering over a window).");
ui.label(format!(
"Wants keyboard input: {}",
self.wants_keyboard_input()
))
.on_hover_text("Is egui currently listening for text input?");
ui.label(format!(
"Keyboard focus widget: {}",
self.memory()
.interaction
.focus
.focused()
.as_ref()
.map(Id::short_debug_format)
.unwrap_or_default()
))
.on_hover_text("Is egui currently listening for text input?");
let pointer_pos = self
.input()
.pointer
.hover_pos()
.map_or_else(String::new, |pos| format!("{:?}", pos));
ui.label(format!("Pointer pos: {}", pointer_pos));
let top_layer = self
.input()
.pointer
.hover_pos()
.and_then(|pos| self.layer_id_at(pos))
.map_or_else(String::new, |layer| layer.short_debug_format());
ui.label(format!("Top layer under mouse: {}", top_layer));
ui.add_space(16.0);
ui.label(format!(
"There are {} text galleys in the layout cache",
self.fonts().num_galleys_in_cache()
))
.on_hover_text("This is approximately the number of text strings on screen");
ui.add_space(16.0);
CollapsingHeader::new("📥 Input")
.default_open(false)
.show(ui, |ui| ui.input().clone().ui(ui));
CollapsingHeader::new("📊 Paint stats")
.default_open(true)
.show(ui, |ui| {
self.paint_stats.lock().ui(ui);
});
}
pub fn memory_ui(&self, ui: &mut crate::Ui) {
if ui
.button("Reset all")
.on_hover_text("Reset all egui state")
.clicked()
{
*self.memory() = Default::default();
}
ui.horizontal(|ui| {
ui.label(format!(
"{} areas (panels, windows, popups, …)",
self.memory().areas.count()
));
if ui.button("Reset").clicked() {
self.memory().areas = Default::default();
}
});
ui.indent("areas", |ui| {
ui.label("Visible areas, ordered back to front.");
ui.label("Hover to highlight");
let layers_ids: Vec<LayerId> = self.memory().areas.order().to_vec();
for layer_id in layers_ids {
let area = self.memory().areas.get(layer_id.id).cloned();
if let Some(area) = area {
let is_visible = self.memory().areas.is_visible(&layer_id);
if !is_visible {
continue;
}
let text = format!("{} - {:?}", layer_id.short_debug_format(), area.rect(),);
// TODO: `Sense::hover_highlight()`
if ui
.add(Label::new(text).monospace().sense(Sense::click()))
.hovered
&& is_visible
{
ui.ctx()
.debug_painter()
.debug_rect(area.rect(), Color32::RED, "");
}
}
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} collapsing headers",
self.memory()
.id_data
.count::<containers::collapsing_header::State>()
));
if ui.button("Reset").clicked() {
self.memory()
.id_data
.remove_by_type::<containers::collapsing_header::State>();
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} menu bars",
self.memory().id_data_temp.count::<menu::BarState>()
));
if ui.button("Reset").clicked() {
self.memory()
.id_data_temp
.remove_by_type::<menu::BarState>();
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} scroll areas",
self.memory().id_data.count::<scroll_area::State>()
));
if ui.button("Reset").clicked() {
self.memory().id_data.remove_by_type::<scroll_area::State>();
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} resize areas",
self.memory().id_data.count::<resize::State>()
));
if ui.button("Reset").clicked() {
self.memory().id_data.remove_by_type::<resize::State>();
}
});
ui.shrink_width_to_current(); // don't let the text below grow this window wider
ui.label("NOTE: the position of this window cannot be reset from within itself.");
ui.collapsing("Interaction", |ui| {
let interaction = self.memory().interaction.clone();
interaction.ui(ui);
});
}
}
impl Context {
pub fn style_ui(&self, ui: &mut Ui) {
let mut style: Style = (*self.style()).clone();
style.ui(ui);
self.set_style(style);
}
}
| as_ref | identifier_name |
context.rs | // #![warn(missing_docs)]
use std::sync::{
atomic::{AtomicU32, Ordering::SeqCst},
Arc,
};
use crate::{
animation_manager::AnimationManager,
data::output::Output,
frame_state::FrameState,
input_state::*,
layers::GraphicLayers,
mutex::{Mutex, MutexGuard},
*,
};
use epaint::{stats::*, text::Fonts, *};
// ----------------------------------------------------------------------------
/// A wrapper around [`Arc`](std::sync::Arc)`<`[`Context`]`>`.
/// This is how you will normally create and access a [`Context`].
///
/// Almost all methods are marked `&self`, `Context` has interior mutability (protected by mutexes).
///
/// [`CtxRef`] is cheap to clone, and any clones refers to the same mutable data.
///
/// # Example:
///
/// ``` no_run
/// # fn handle_output(_: egui::Output) {}
/// # fn paint(_: Vec<egui::ClippedMesh>) {}
/// let mut ctx = egui::CtxRef::default();
///
/// // Game loop:
/// loop {
/// let raw_input = egui::RawInput::default();
/// ctx.begin_frame(raw_input);
///
/// egui::CentralPanel::default().show(&ctx, |ui| {
/// ui.label("Hello world!");
/// if ui.button("Click me").clicked() {
/// /* take some action here */
/// }
/// });
///
/// let (output, shapes) = ctx.end_frame();
/// let clipped_meshes = ctx.tessellate(shapes); // create triangles to paint
/// handle_output(output);
/// paint(clipped_meshes);
/// }
/// ```
///
#[derive(Clone)]
pub struct CtxRef(std::sync::Arc<Context>);
impl std::ops::Deref for CtxRef {
type Target = Context;
fn deref(&self) -> &Context {
&*self.0
}
}
impl AsRef<Context> for CtxRef {
fn as_ref(&self) -> &Context {
self.0.as_ref()
}
}
impl std::borrow::Borrow<Context> for CtxRef {
fn borrow(&self) -> &Context {
self.0.borrow()
}
}
impl std::cmp::PartialEq for CtxRef {
fn eq(&self, other: &CtxRef) -> bool {
Arc::ptr_eq(&self.0, &other.0)
} | }
impl Default for CtxRef {
fn default() -> Self {
Self(Arc::new(Context {
// Start with painting an extra frame to compensate for some widgets
// that take two frames before they "settle":
repaint_requests: AtomicU32::new(1),
..Context::default()
}))
}
}
impl CtxRef {
/// Call at the start of every frame. Match with a call to [`Context::end_frame`].
///
/// This will modify the internal reference to point to a new generation of [`Context`].
/// Any old clones of this [`CtxRef`] will refer to the old [`Context`], which will not get new input.
///
/// Put your widgets into a [`SidePanel`], [`TopBottomPanel`], [`CentralPanel`], [`Window`] or [`Area`].
pub fn begin_frame(&mut self, new_input: RawInput) {
let mut self_: Context = (*self.0).clone();
self_.begin_frame_mut(new_input);
*self = Self(Arc::new(self_));
}
// ---------------------------------------------------------------------
/// If the given [`Id`] is not unique, an error will be printed at the given position.
/// Call this for [`Id`]:s that need interaction or persistence.
pub(crate) fn register_interaction_id(&self, id: Id, new_rect: Rect) {
let prev_rect = self.frame_state().used_ids.insert(id, new_rect);
if let Some(prev_rect) = prev_rect {
// it is ok to reuse the same ID for e.g. a frame around a widget,
// or to check for interaction with the same widget twice:
if prev_rect.expand(0.1).contains_rect(new_rect)
|| new_rect.expand(0.1).contains_rect(prev_rect)
{
return;
}
let show_error = |pos: Pos2, text: String| {
let painter = self.debug_painter();
let rect = painter.error(pos, text);
if let Some(pointer_pos) = self.input.pointer.hover_pos() {
if rect.contains(pointer_pos) {
painter.error(
rect.left_bottom() + vec2(2.0, 4.0),
"ID clashes happens when things like Windows or CollapsingHeaders share names,\n\
or when things like ScrollAreas and Resize areas aren't given unique id_source:s.",
);
}
}
};
let id_str = id.short_debug_format();
if prev_rect.min.distance(new_rect.min) < 4.0 {
show_error(new_rect.min, format!("Double use of ID {}", id_str));
} else {
show_error(prev_rect.min, format!("First use of ID {}", id_str));
show_error(new_rect.min, format!("Second use of ID {}", id_str));
}
}
}
// ---------------------------------------------------------------------
/// Use `ui.interact` instead
#[allow(clippy::too_many_arguments)]
pub(crate) fn interact(
&self,
clip_rect: Rect,
item_spacing: Vec2,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
) -> Response {
let gap = 0.5; // Just to make sure we don't accidentally hover two things at once (a small eps should be sufficient).
// Make it easier to click things:
let interact_rect = rect.expand2(
(0.5 * item_spacing - Vec2::splat(gap))
.at_least(Vec2::splat(0.0))
.at_most(Vec2::splat(5.0)),
); // make it easier to click
let hovered = self.rect_contains_pointer(layer_id, clip_rect.intersect(interact_rect));
self.interact_with_hovered(layer_id, id, rect, sense, enabled, hovered)
}
/// You specify if a thing is hovered, and the function gives a `Response`.
pub(crate) fn interact_with_hovered(
&self,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
hovered: bool,
) -> Response {
let hovered = hovered && enabled; // can't even hover disabled widgets
let mut response = Response {
ctx: self.clone(),
layer_id,
id,
rect,
sense,
enabled,
hovered,
clicked: Default::default(),
double_clicked: Default::default(),
dragged: false,
drag_released: false,
is_pointer_button_down_on: false,
interact_pointer_pos: None,
changed: false, // must be set by the widget itself
};
if !enabled || !sense.focusable || !layer_id.allow_interaction() {
// Not interested or allowed input:
self.memory().surrender_focus(id);
return response;
}
// We only want to focus labels if the screen reader is on.
let interested_in_focus =
sense.interactive() || sense.focusable && self.memory().options.screen_reader;
if interested_in_focus {
self.memory().interested_in_focus(id);
}
if sense.click
&& response.has_focus()
&& (self.input().key_pressed(Key::Space) || self.input().key_pressed(Key::Enter))
{
// Space/enter works like a primary click for e.g. selected buttons
response.clicked[PointerButton::Primary as usize] = true;
}
self.register_interaction_id(id, rect);
if sense.click || sense.drag {
let mut memory = self.memory();
memory.interaction.click_interest |= hovered && sense.click;
memory.interaction.drag_interest |= hovered && sense.drag;
response.dragged = memory.interaction.drag_id == Some(id);
response.is_pointer_button_down_on =
memory.interaction.click_id == Some(id) || response.dragged;
for pointer_event in &self.input.pointer.pointer_events {
match pointer_event {
PointerEvent::Moved(_) => {}
PointerEvent::Pressed(_) => {
if hovered {
if sense.click && memory.interaction.click_id.is_none() {
// potential start of a click
memory.interaction.click_id = Some(id);
response.is_pointer_button_down_on = true;
}
// HACK: windows have low priority on dragging.
// This is so that if you drag a slider in a window,
// the slider will steal the drag away from the window.
// This is needed because we do window interaction first (to prevent frame delay),
// and then do content layout.
if sense.drag
&& (memory.interaction.drag_id.is_none()
|| memory.interaction.drag_is_window)
{
// potential start of a drag
memory.interaction.drag_id = Some(id);
memory.interaction.drag_is_window = false;
memory.window_interaction = None; // HACK: stop moving windows (if any)
response.is_pointer_button_down_on = true;
response.dragged = true;
}
}
}
PointerEvent::Released(click) => {
response.drag_released = response.dragged;
response.dragged = false;
if hovered && response.is_pointer_button_down_on {
if let Some(click) = click {
let clicked = hovered && response.is_pointer_button_down_on;
response.clicked[click.button as usize] = clicked;
response.double_clicked[click.button as usize] =
clicked && click.is_double();
}
}
}
}
}
}
if response.is_pointer_button_down_on {
response.interact_pointer_pos = self.input().pointer.interact_pos();
}
if self.input.pointer.any_down() {
response.hovered &= response.is_pointer_button_down_on; // we don't hover widgets while interacting with *other* widgets
}
if response.has_focus() && response.clicked_elsewhere() {
self.memory().surrender_focus(id);
}
response
}
/// Get a full-screen painter for a new or existing layer
pub fn layer_painter(&self, layer_id: LayerId) -> Painter {
Painter::new(self.clone(), layer_id, self.input.screen_rect())
}
/// Paint on top of everything else
pub fn debug_painter(&self) -> Painter {
Self::layer_painter(self, LayerId::debug())
}
}
// ----------------------------------------------------------------------------
/// This is the first thing you need when working with egui. Create using [`CtxRef`].
///
/// Contains the [`InputState`], [`Memory`], [`Output`], and more.
///
/// Your handle to Egui.
///
/// Almost all methods are marked `&self`, `Context` has interior mutability (protected by mutexes).
/// Multi-threaded access to a [`Context`] is behind the feature flag `multi_threaded`.
/// Normally you'd always do all ui work on one thread, or perhaps use multiple contexts,
/// but if you really want to access the same Context from multiple threads, it *SHOULD* be fine,
/// but you are likely the first person to try it.
#[derive(Default)]
pub struct Context {
// We clone the Context each frame so we can set a new `input`.
// This is so we can avoid a mutex lock to access the `InputState`.
// This means everything else needs to be behind an Arc.
// We can probably come up with a nicer design.
//
/// None until first call to `begin_frame`.
fonts: Option<Arc<Fonts>>,
memory: Arc<Mutex<Memory>>,
animation_manager: Arc<Mutex<AnimationManager>>,
input: InputState,
/// State that is collected during a frame and then cleared
frame_state: Arc<Mutex<FrameState>>,
// The output of a frame:
graphics: Arc<Mutex<GraphicLayers>>,
output: Arc<Mutex<Output>>,
paint_stats: Arc<Mutex<PaintStats>>,
/// While positive, keep requesting repaints. Decrement at the end of each frame.
repaint_requests: AtomicU32,
}
impl Clone for Context {
fn clone(&self) -> Self {
Context {
fonts: self.fonts.clone(),
memory: self.memory.clone(),
animation_manager: self.animation_manager.clone(),
input: self.input.clone(),
frame_state: self.frame_state.clone(),
graphics: self.graphics.clone(),
output: self.output.clone(),
paint_stats: self.paint_stats.clone(),
repaint_requests: self.repaint_requests.load(SeqCst).into(),
}
}
}
impl Context {
/// How much space is still available after panels has been added.
/// This is the "background" area, what egui doesn't cover with panels (but may cover with windows).
/// This is also the area to which windows are constrained.
pub fn available_rect(&self) -> Rect {
self.frame_state.lock().available_rect()
}
/// Stores all the egui state.
/// If you want to store/restore egui, serialize this.
pub fn memory(&self) -> MutexGuard<'_, Memory> {
self.memory.lock()
}
pub(crate) fn graphics(&self) -> MutexGuard<'_, GraphicLayers> {
self.graphics.lock()
}
/// What egui outputs each frame.
pub fn output(&self) -> MutexGuard<'_, Output> {
self.output.lock()
}
pub(crate) fn frame_state(&self) -> MutexGuard<'_, FrameState> {
self.frame_state.lock()
}
/// Call this if there is need to repaint the UI, i.e. if you are showing an animation.
/// If this is called at least once in a frame, then there will be another frame right after this.
/// Call as many times as you wish, only one repaint will be issued.
pub fn request_repaint(&self) {
// request two frames of repaint, just to cover some corner cases (frame delays):
let times_to_repaint = 2;
self.repaint_requests.store(times_to_repaint, SeqCst);
}
#[inline(always)]
pub fn input(&self) -> &InputState {
&self.input
}
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn fonts(&self) -> &Fonts {
&*self
.fonts
.as_ref()
.expect("No fonts available until first call to CtxRef::begin_frame()")
}
/// The egui texture, containing font characters etc.
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn texture(&self) -> Arc<epaint::Texture> {
self.fonts().texture()
}
/// Tell `egui` which fonts to use.
///
/// The default `egui` fonts only support latin and cyrillic alphabets,
/// but you can call this to install additional fonts that support e.g. korean characters.
///
/// The new fonts will become active at the start of the next frame.
pub fn set_fonts(&self, font_definitions: FontDefinitions) {
if let Some(current_fonts) = &self.fonts {
// NOTE: this comparison is expensive since it checks TTF data for equality
if current_fonts.definitions() == &font_definitions {
return; // no change - save us from reloading font textures
}
}
self.memory().new_font_definitions = Some(font_definitions);
}
/// The [`Style`] used by all subsequent windows, panels etc.
pub fn style(&self) -> Arc<Style> {
self.memory().options.style.clone()
}
/// The [`Style`] used by all new windows, panels etc.
///
/// You can also use [`Ui::style_mut`] to change the style of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// let mut style: egui::Style = (*ctx.style()).clone();
/// style.spacing.item_spacing = egui::vec2(10.0, 20.0);
/// ctx.set_style(style);
/// ```
pub fn set_style(&self, style: impl Into<Arc<Style>>) {
self.memory().options.style = style.into();
}
/// The [`Visuals`] used by all subsequent windows, panels etc.
///
/// You can also use [`Ui::visuals_mut`] to change the visuals of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// ctx.set_visuals(egui::Visuals::light()); // Switch to light mode
/// ```
pub fn set_visuals(&self, visuals: crate::Visuals) {
std::sync::Arc::make_mut(&mut self.memory().options.style).visuals = visuals;
}
/// The number of physical pixels for each logical point.
#[inline(always)]
pub fn pixels_per_point(&self) -> f32 {
self.input.pixels_per_point()
}
/// Set the number of physical pixels for each logical point.
/// Will become active at the start of the next frame.
///
/// Note that this may be overwritten by input from the integration via [`RawInput::pixels_per_point`].
/// For instance, when using `egui_web` the browsers native zoom level will always be used.
pub fn set_pixels_per_point(&self, pixels_per_point: f32) {
self.memory().new_pixels_per_point = Some(pixels_per_point);
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_to_pixel(&self, point: f32) -> f32 {
let pixels_per_point = self.pixels_per_point();
(point * pixels_per_point).round() / pixels_per_point
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_pos_to_pixels(&self, pos: Pos2) -> Pos2 {
pos2(self.round_to_pixel(pos.x), self.round_to_pixel(pos.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_vec_to_pixels(&self, vec: Vec2) -> Vec2 {
vec2(self.round_to_pixel(vec.x), self.round_to_pixel(vec.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_rect_to_pixels(&self, rect: Rect) -> Rect {
Rect {
min: self.round_pos_to_pixels(rect.min),
max: self.round_pos_to_pixels(rect.max),
}
}
// ---------------------------------------------------------------------
/// Constrain the position of a window/area so it fits within the provided boundary.
///
/// If area is `None`, will constrain to [`Self::available_rect`].
pub(crate) fn constrain_window_rect_to_area(&self, window: Rect, area: Option<Rect>) -> Rect {
let mut area = area.unwrap_or_else(|| self.available_rect());
if window.width() > area.width() {
// Allow overlapping side bars.
// This is important for small screens, e.g. mobiles running the web demo.
area.max.x = self.input().screen_rect().max.x;
area.min.x = self.input().screen_rect().min.x;
}
if window.height() > area.height() {
// Allow overlapping top/bottom bars:
area.max.y = self.input().screen_rect().max.y;
area.min.y = self.input().screen_rect().min.y;
}
let mut pos = window.min;
// Constrain to screen, unless window is too large to fit:
let margin_x = (window.width() - area.width()).at_least(0.0);
let margin_y = (window.height() - area.height()).at_least(0.0);
pos.x = pos.x.at_most(area.right() + margin_x - window.width()); // move left if needed
pos.x = pos.x.at_least(area.left() - margin_x); // move right if needed
pos.y = pos.y.at_most(area.bottom() + margin_y - window.height()); // move right if needed
pos.y = pos.y.at_least(area.top() - margin_y); // move down if needed
pos = self.round_pos_to_pixels(pos);
Rect::from_min_size(pos, window.size())
}
// ---------------------------------------------------------------------
fn begin_frame_mut(&mut self, new_raw_input: RawInput) {
self.memory().begin_frame(&self.input, &new_raw_input);
let mut input = std::mem::take(&mut self.input);
if let Some(new_pixels_per_point) = self.memory().new_pixels_per_point.take() {
input.pixels_per_point = new_pixels_per_point;
}
self.input = input.begin_frame(new_raw_input);
self.frame_state.lock().begin_frame(&self.input);
{
// Load new fonts if required:
let new_font_definitions = self.memory().new_font_definitions.take();
let pixels_per_point = self.input.pixels_per_point();
let pixels_per_point_changed = match &self.fonts {
None => true,
Some(current_fonts) => {
(current_fonts.pixels_per_point() - pixels_per_point).abs() > 1e-3
}
};
if self.fonts.is_none() || new_font_definitions.is_some() || pixels_per_point_changed {
self.fonts = Some(Arc::new(Fonts::new(
pixels_per_point,
new_font_definitions.unwrap_or_else(|| {
self.fonts
.as_ref()
.map(|font| font.definitions().clone())
.unwrap_or_default()
}),
)));
}
}
// Ensure we register the background area so panels and background ui can catch clicks:
let screen_rect = self.input.screen_rect();
self.memory().areas.set_state(
LayerId::background(),
containers::area::State {
pos: screen_rect.min,
size: screen_rect.size(),
interactable: true,
},
);
}
/// Call at the end of each frame.
/// Returns what has happened this frame [`crate::Output`] as well as what you need to paint.
/// You can transform the returned shapes into triangles with a call to [`Context::tessellate`].
#[must_use]
pub fn end_frame(&self) -> (Output, Vec<ClippedShape>) {
if self.input.wants_repaint() {
self.request_repaint();
}
self.memory()
.end_frame(&self.input, &self.frame_state().used_ids);
self.fonts().end_frame();
let mut output: Output = std::mem::take(&mut self.output());
if self.repaint_requests.load(SeqCst) > 0 {
self.repaint_requests.fetch_sub(1, SeqCst);
output.needs_repaint = true;
}
let shapes = self.drain_paint_lists();
(output, shapes)
}
fn drain_paint_lists(&self) -> Vec<ClippedShape> {
let memory = self.memory();
self.graphics().drain(memory.areas.order()).collect()
}
/// Tessellate the given shapes into triangle meshes.
pub fn tessellate(&self, shapes: Vec<ClippedShape>) -> Vec<ClippedMesh> {
// A tempting optimization is to reuse the tessellation from last frame if the
// shapes are the same, but just comparing the shapes takes about 50% of the time
// it takes to tessellate them, so it is not a worth optimization.
let mut tessellation_options = self.memory().options.tessellation_options;
tessellation_options.pixels_per_point = self.pixels_per_point();
tessellation_options.aa_size = 1.0 / self.pixels_per_point();
let paint_stats = PaintStats::from_shapes(&shapes);
let clipped_meshes = tessellator::tessellate_shapes(
shapes,
tessellation_options,
self.fonts().texture().size(),
);
*self.paint_stats.lock() = paint_stats.with_clipped_meshes(&clipped_meshes);
clipped_meshes
}
// ---------------------------------------------------------------------
/// How much space is used by panels and windows.
pub fn used_rect(&self) -> Rect {
let mut used = self.frame_state().used_by_panels;
for window in self.memory().areas.visible_windows() {
used = used.union(window.rect());
}
used
}
/// How much space is used by panels and windows.
/// You can shrink your egui area to this size and still fit all egui components.
pub fn used_size(&self) -> Vec2 {
self.used_rect().max - Pos2::new(0.0, 0.0)
}
// ---------------------------------------------------------------------
/// Is the pointer (mouse/touch) over any egui area?
pub fn is_pointer_over_area(&self) -> bool {
if let Some(pointer_pos) = self.input.pointer.interact_pos() {
if let Some(layer) = self.layer_id_at(pointer_pos) {
if layer.order == Order::Background {
!self.frame_state().unused_rect.contains(pointer_pos)
} else {
true
}
} else {
false
}
} else {
false
}
}
/// True if egui is currently interested in the pointer (mouse or touch).
/// Could be the pointer is hovering over a [`Window`] or the user is dragging a widget.
/// If `false`, the pointer is outside of any egui area and so
/// you may be interested in what it is doing (e.g. controlling your game).
/// Returns `false` if a drag started outside of egui and then moved over an egui area.
pub fn wants_pointer_input(&self) -> bool {
self.is_using_pointer() || (self.is_pointer_over_area() && !self.input().pointer.any_down())
}
/// Is egui currently using the pointer position (e.g. dragging a slider).
/// NOTE: this will return `false` if the pointer is just hovering over an egui area.
pub fn is_using_pointer(&self) -> bool {
self.memory().interaction.is_using_pointer()
}
/// If `true`, egui is currently listening on text input (e.g. typing text in a [`TextEdit`]).
pub fn wants_keyboard_input(&self) -> bool {
self.memory().interaction.focus.focused().is_some()
}
// ---------------------------------------------------------------------
/// Move all the graphics at the given layer.
/// Can be used to implement drag-and-drop (see relevant demo).
pub fn translate_layer(&self, layer_id: LayerId, delta: Vec2) {
if delta != Vec2::ZERO {
self.graphics().list(layer_id).lock().translate(delta);
}
}
/// Top-most layer at the given position.
pub fn layer_id_at(&self, pos: Pos2) -> Option<LayerId> {
let resize_grab_radius_side = self.style().interaction.resize_grab_radius_side;
self.memory().layer_id_at(pos, resize_grab_radius_side)
}
pub(crate) fn rect_contains_pointer(&self, layer_id: LayerId, rect: Rect) -> bool {
if let Some(pointer_pos) = self.input.pointer.interact_pos() {
rect.contains(pointer_pos) && self.layer_id_at(pointer_pos) == Some(layer_id)
} else {
false
}
}
// ---------------------------------------------------------------------
/// Wether or not to debug widget layout on hover.
pub fn debug_on_hover(&self) -> bool {
self.memory().options.style.debug.debug_on_hover
}
/// Turn on/off wether or not to debug widget layout on hover.
pub fn set_debug_on_hover(&self, debug_on_hover: bool) {
let mut style = (*self.memory().options.style).clone();
style.debug.debug_on_hover = debug_on_hover;
self.set_style(style);
}
}
/// ## Animation
impl Context {
/// Returns a value in the range [0, 1], to indicate "how on" this thing is.
///
/// The first time called it will return `if value { 1.0 } else { 0.0 }`
/// Calling this with `value = true` will always yield a number larger than zero, quickly going towards one.
/// Calling this with `value = false` will always yield a number less than one, quickly going towards zero.
///
/// The function will call [`Self::request_repaint()`] when appropriate.
pub fn animate_bool(&self, id: Id, value: bool) -> f32 {
let animation_time = self.style().animation_time;
let animated_value =
self.animation_manager
.lock()
.animate_bool(&self.input, animation_time, id, value);
let animation_in_progress = 0.0 < animated_value && animated_value < 1.0;
if animation_in_progress {
self.request_repaint();
}
animated_value
}
/// Clear memory of any animations.
pub fn clear_animations(&self) {
*self.animation_manager.lock() = Default::default();
}
}
impl Context {
pub fn settings_ui(&self, ui: &mut Ui) {
use crate::containers::*;
CollapsingHeader::new("🎑 Style")
.default_open(true)
.show(ui, |ui| {
self.style_ui(ui);
});
CollapsingHeader::new("🔠 Fonts")
.default_open(false)
.show(ui, |ui| {
let mut font_definitions = self.fonts().definitions().clone();
font_definitions.ui(ui);
self.fonts().texture().ui(ui);
self.set_fonts(font_definitions);
});
CollapsingHeader::new("✒ Painting")
.default_open(true)
.show(ui, |ui| {
let mut tessellation_options = self.memory().options.tessellation_options;
tessellation_options.ui(ui);
ui.vertical_centered(|ui| reset_button(ui, &mut tessellation_options));
self.memory().options.tessellation_options = tessellation_options;
});
}
pub fn inspection_ui(&self, ui: &mut Ui) {
use crate::containers::*;
crate::trace!(ui);
ui.label(format!("Is using pointer: {}", self.is_using_pointer()))
.on_hover_text(
"Is egui currently using the pointer actively (e.g. dragging a slider)?",
);
ui.label(format!("Wants pointer input: {}", self.wants_pointer_input()))
.on_hover_text("Is egui currently interested in the location of the pointer (either because it is in use, or because it is hovering over a window).");
ui.label(format!(
"Wants keyboard input: {}",
self.wants_keyboard_input()
))
.on_hover_text("Is egui currently listening for text input?");
ui.label(format!(
"Keyboard focus widget: {}",
self.memory()
.interaction
.focus
.focused()
.as_ref()
.map(Id::short_debug_format)
.unwrap_or_default()
))
.on_hover_text("Is egui currently listening for text input?");
let pointer_pos = self
.input()
.pointer
.hover_pos()
.map_or_else(String::new, |pos| format!("{:?}", pos));
ui.label(format!("Pointer pos: {}", pointer_pos));
let top_layer = self
.input()
.pointer
.hover_pos()
.and_then(|pos| self.layer_id_at(pos))
.map_or_else(String::new, |layer| layer.short_debug_format());
ui.label(format!("Top layer under mouse: {}", top_layer));
ui.add_space(16.0);
ui.label(format!(
"There are {} text galleys in the layout cache",
self.fonts().num_galleys_in_cache()
))
.on_hover_text("This is approximately the number of text strings on screen");
ui.add_space(16.0);
CollapsingHeader::new("📥 Input")
.default_open(false)
.show(ui, |ui| ui.input().clone().ui(ui));
CollapsingHeader::new("📊 Paint stats")
.default_open(true)
.show(ui, |ui| {
self.paint_stats.lock().ui(ui);
});
}
pub fn memory_ui(&self, ui: &mut crate::Ui) {
if ui
.button("Reset all")
.on_hover_text("Reset all egui state")
.clicked()
{
*self.memory() = Default::default();
}
ui.horizontal(|ui| {
ui.label(format!(
"{} areas (panels, windows, popups, …)",
self.memory().areas.count()
));
if ui.button("Reset").clicked() {
self.memory().areas = Default::default();
}
});
ui.indent("areas", |ui| {
ui.label("Visible areas, ordered back to front.");
ui.label("Hover to highlight");
let layers_ids: Vec<LayerId> = self.memory().areas.order().to_vec();
for layer_id in layers_ids {
let area = self.memory().areas.get(layer_id.id).cloned();
if let Some(area) = area {
let is_visible = self.memory().areas.is_visible(&layer_id);
if !is_visible {
continue;
}
let text = format!("{} - {:?}", layer_id.short_debug_format(), area.rect(),);
// TODO: `Sense::hover_highlight()`
if ui
.add(Label::new(text).monospace().sense(Sense::click()))
.hovered
&& is_visible
{
ui.ctx()
.debug_painter()
.debug_rect(area.rect(), Color32::RED, "");
}
}
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} collapsing headers",
self.memory()
.id_data
.count::<containers::collapsing_header::State>()
));
if ui.button("Reset").clicked() {
self.memory()
.id_data
.remove_by_type::<containers::collapsing_header::State>();
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} menu bars",
self.memory().id_data_temp.count::<menu::BarState>()
));
if ui.button("Reset").clicked() {
self.memory()
.id_data_temp
.remove_by_type::<menu::BarState>();
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} scroll areas",
self.memory().id_data.count::<scroll_area::State>()
));
if ui.button("Reset").clicked() {
self.memory().id_data.remove_by_type::<scroll_area::State>();
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} resize areas",
self.memory().id_data.count::<resize::State>()
));
if ui.button("Reset").clicked() {
self.memory().id_data.remove_by_type::<resize::State>();
}
});
ui.shrink_width_to_current(); // don't let the text below grow this window wider
ui.label("NOTE: the position of this window cannot be reset from within itself.");
ui.collapsing("Interaction", |ui| {
let interaction = self.memory().interaction.clone();
interaction.ui(ui);
});
}
}
impl Context {
pub fn style_ui(&self, ui: &mut Ui) {
let mut style: Style = (*self.style()).clone();
style.ui(ui);
self.set_style(style);
}
} | random_line_split | |
context.rs | // #![warn(missing_docs)]
use std::sync::{
atomic::{AtomicU32, Ordering::SeqCst},
Arc,
};
use crate::{
animation_manager::AnimationManager,
data::output::Output,
frame_state::FrameState,
input_state::*,
layers::GraphicLayers,
mutex::{Mutex, MutexGuard},
*,
};
use epaint::{stats::*, text::Fonts, *};
// ----------------------------------------------------------------------------
/// A wrapper around [`Arc`](std::sync::Arc)`<`[`Context`]`>`.
/// This is how you will normally create and access a [`Context`].
///
/// Almost all methods are marked `&self`, `Context` has interior mutability (protected by mutexes).
///
/// [`CtxRef`] is cheap to clone, and any clones refers to the same mutable data.
///
/// # Example:
///
/// ``` no_run
/// # fn handle_output(_: egui::Output) {}
/// # fn paint(_: Vec<egui::ClippedMesh>) {}
/// let mut ctx = egui::CtxRef::default();
///
/// // Game loop:
/// loop {
/// let raw_input = egui::RawInput::default();
/// ctx.begin_frame(raw_input);
///
/// egui::CentralPanel::default().show(&ctx, |ui| {
/// ui.label("Hello world!");
/// if ui.button("Click me").clicked() {
/// /* take some action here */
/// }
/// });
///
/// let (output, shapes) = ctx.end_frame();
/// let clipped_meshes = ctx.tessellate(shapes); // create triangles to paint
/// handle_output(output);
/// paint(clipped_meshes);
/// }
/// ```
///
#[derive(Clone)]
pub struct CtxRef(std::sync::Arc<Context>);
impl std::ops::Deref for CtxRef {
type Target = Context;
fn deref(&self) -> &Context {
&*self.0
}
}
impl AsRef<Context> for CtxRef {
fn as_ref(&self) -> &Context |
}
impl std::borrow::Borrow<Context> for CtxRef {
fn borrow(&self) -> &Context {
self.0.borrow()
}
}
impl std::cmp::PartialEq for CtxRef {
fn eq(&self, other: &CtxRef) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
impl Default for CtxRef {
fn default() -> Self {
Self(Arc::new(Context {
// Start with painting an extra frame to compensate for some widgets
// that take two frames before they "settle":
repaint_requests: AtomicU32::new(1),
..Context::default()
}))
}
}
impl CtxRef {
/// Call at the start of every frame. Match with a call to [`Context::end_frame`].
///
/// This will modify the internal reference to point to a new generation of [`Context`].
/// Any old clones of this [`CtxRef`] will refer to the old [`Context`], which will not get new input.
///
/// Put your widgets into a [`SidePanel`], [`TopBottomPanel`], [`CentralPanel`], [`Window`] or [`Area`].
pub fn begin_frame(&mut self, new_input: RawInput) {
let mut self_: Context = (*self.0).clone();
self_.begin_frame_mut(new_input);
*self = Self(Arc::new(self_));
}
// ---------------------------------------------------------------------
/// If the given [`Id`] is not unique, an error will be printed at the given position.
/// Call this for [`Id`]:s that need interaction or persistence.
pub(crate) fn register_interaction_id(&self, id: Id, new_rect: Rect) {
let prev_rect = self.frame_state().used_ids.insert(id, new_rect);
if let Some(prev_rect) = prev_rect {
// it is ok to reuse the same ID for e.g. a frame around a widget,
// or to check for interaction with the same widget twice:
if prev_rect.expand(0.1).contains_rect(new_rect)
|| new_rect.expand(0.1).contains_rect(prev_rect)
{
return;
}
let show_error = |pos: Pos2, text: String| {
let painter = self.debug_painter();
let rect = painter.error(pos, text);
if let Some(pointer_pos) = self.input.pointer.hover_pos() {
if rect.contains(pointer_pos) {
painter.error(
rect.left_bottom() + vec2(2.0, 4.0),
"ID clashes happens when things like Windows or CollapsingHeaders share names,\n\
or when things like ScrollAreas and Resize areas aren't given unique id_source:s.",
);
}
}
};
let id_str = id.short_debug_format();
if prev_rect.min.distance(new_rect.min) < 4.0 {
show_error(new_rect.min, format!("Double use of ID {}", id_str));
} else {
show_error(prev_rect.min, format!("First use of ID {}", id_str));
show_error(new_rect.min, format!("Second use of ID {}", id_str));
}
}
}
// ---------------------------------------------------------------------
/// Use `ui.interact` instead
#[allow(clippy::too_many_arguments)]
pub(crate) fn interact(
&self,
clip_rect: Rect,
item_spacing: Vec2,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
) -> Response {
let gap = 0.5; // Just to make sure we don't accidentally hover two things at once (a small eps should be sufficient).
// Make it easier to click things:
let interact_rect = rect.expand2(
(0.5 * item_spacing - Vec2::splat(gap))
.at_least(Vec2::splat(0.0))
.at_most(Vec2::splat(5.0)),
); // make it easier to click
let hovered = self.rect_contains_pointer(layer_id, clip_rect.intersect(interact_rect));
self.interact_with_hovered(layer_id, id, rect, sense, enabled, hovered)
}
/// You specify if a thing is hovered, and the function gives a `Response`.
pub(crate) fn interact_with_hovered(
&self,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
hovered: bool,
) -> Response {
let hovered = hovered && enabled; // can't even hover disabled widgets
let mut response = Response {
ctx: self.clone(),
layer_id,
id,
rect,
sense,
enabled,
hovered,
clicked: Default::default(),
double_clicked: Default::default(),
dragged: false,
drag_released: false,
is_pointer_button_down_on: false,
interact_pointer_pos: None,
changed: false, // must be set by the widget itself
};
if !enabled || !sense.focusable || !layer_id.allow_interaction() {
// Not interested or allowed input:
self.memory().surrender_focus(id);
return response;
}
// We only want to focus labels if the screen reader is on.
let interested_in_focus =
sense.interactive() || sense.focusable && self.memory().options.screen_reader;
if interested_in_focus {
self.memory().interested_in_focus(id);
}
if sense.click
&& response.has_focus()
&& (self.input().key_pressed(Key::Space) || self.input().key_pressed(Key::Enter))
{
// Space/enter works like a primary click for e.g. selected buttons
response.clicked[PointerButton::Primary as usize] = true;
}
self.register_interaction_id(id, rect);
if sense.click || sense.drag {
let mut memory = self.memory();
memory.interaction.click_interest |= hovered && sense.click;
memory.interaction.drag_interest |= hovered && sense.drag;
response.dragged = memory.interaction.drag_id == Some(id);
response.is_pointer_button_down_on =
memory.interaction.click_id == Some(id) || response.dragged;
for pointer_event in &self.input.pointer.pointer_events {
match pointer_event {
PointerEvent::Moved(_) => {}
PointerEvent::Pressed(_) => {
if hovered {
if sense.click && memory.interaction.click_id.is_none() {
// potential start of a click
memory.interaction.click_id = Some(id);
response.is_pointer_button_down_on = true;
}
// HACK: windows have low priority on dragging.
// This is so that if you drag a slider in a window,
// the slider will steal the drag away from the window.
// This is needed because we do window interaction first (to prevent frame delay),
// and then do content layout.
if sense.drag
&& (memory.interaction.drag_id.is_none()
|| memory.interaction.drag_is_window)
{
// potential start of a drag
memory.interaction.drag_id = Some(id);
memory.interaction.drag_is_window = false;
memory.window_interaction = None; // HACK: stop moving windows (if any)
response.is_pointer_button_down_on = true;
response.dragged = true;
}
}
}
PointerEvent::Released(click) => {
response.drag_released = response.dragged;
response.dragged = false;
if hovered && response.is_pointer_button_down_on {
if let Some(click) = click {
let clicked = hovered && response.is_pointer_button_down_on;
response.clicked[click.button as usize] = clicked;
response.double_clicked[click.button as usize] =
clicked && click.is_double();
}
}
}
}
}
}
if response.is_pointer_button_down_on {
response.interact_pointer_pos = self.input().pointer.interact_pos();
}
if self.input.pointer.any_down() {
response.hovered &= response.is_pointer_button_down_on; // we don't hover widgets while interacting with *other* widgets
}
if response.has_focus() && response.clicked_elsewhere() {
self.memory().surrender_focus(id);
}
response
}
/// Get a full-screen painter for a new or existing layer
pub fn layer_painter(&self, layer_id: LayerId) -> Painter {
Painter::new(self.clone(), layer_id, self.input.screen_rect())
}
/// Paint on top of everything else
pub fn debug_painter(&self) -> Painter {
Self::layer_painter(self, LayerId::debug())
}
}
// ----------------------------------------------------------------------------
/// This is the first thing you need when working with egui. Create using [`CtxRef`].
///
/// Contains the [`InputState`], [`Memory`], [`Output`], and more.
///
/// Your handle to Egui.
///
/// Almost all methods are marked `&self`, `Context` has interior mutability (protected by mutexes).
/// Multi-threaded access to a [`Context`] is behind the feature flag `multi_threaded`.
/// Normally you'd always do all ui work on one thread, or perhaps use multiple contexts,
/// but if you really want to access the same Context from multiple threads, it *SHOULD* be fine,
/// but you are likely the first person to try it.
#[derive(Default)]
pub struct Context {
// We clone the Context each frame so we can set a new `input`.
// This is so we can avoid a mutex lock to access the `InputState`.
// This means everything else needs to be behind an Arc.
// We can probably come up with a nicer design.
//
/// None until first call to `begin_frame`.
fonts: Option<Arc<Fonts>>,
memory: Arc<Mutex<Memory>>,
animation_manager: Arc<Mutex<AnimationManager>>,
input: InputState,
/// State that is collected during a frame and then cleared
frame_state: Arc<Mutex<FrameState>>,
// The output of a frame:
graphics: Arc<Mutex<GraphicLayers>>,
output: Arc<Mutex<Output>>,
paint_stats: Arc<Mutex<PaintStats>>,
/// While positive, keep requesting repaints. Decrement at the end of each frame.
repaint_requests: AtomicU32,
}
impl Clone for Context {
fn clone(&self) -> Self {
Context {
fonts: self.fonts.clone(),
memory: self.memory.clone(),
animation_manager: self.animation_manager.clone(),
input: self.input.clone(),
frame_state: self.frame_state.clone(),
graphics: self.graphics.clone(),
output: self.output.clone(),
paint_stats: self.paint_stats.clone(),
repaint_requests: self.repaint_requests.load(SeqCst).into(),
}
}
}
impl Context {
/// How much space is still available after panels has been added.
/// This is the "background" area, what egui doesn't cover with panels (but may cover with windows).
/// This is also the area to which windows are constrained.
pub fn available_rect(&self) -> Rect {
self.frame_state.lock().available_rect()
}
/// Stores all the egui state.
/// If you want to store/restore egui, serialize this.
pub fn memory(&self) -> MutexGuard<'_, Memory> {
self.memory.lock()
}
pub(crate) fn graphics(&self) -> MutexGuard<'_, GraphicLayers> {
self.graphics.lock()
}
/// What egui outputs each frame.
pub fn output(&self) -> MutexGuard<'_, Output> {
self.output.lock()
}
pub(crate) fn frame_state(&self) -> MutexGuard<'_, FrameState> {
self.frame_state.lock()
}
/// Call this if there is need to repaint the UI, i.e. if you are showing an animation.
/// If this is called at least once in a frame, then there will be another frame right after this.
/// Call as many times as you wish, only one repaint will be issued.
pub fn request_repaint(&self) {
// request two frames of repaint, just to cover some corner cases (frame delays):
let times_to_repaint = 2;
self.repaint_requests.store(times_to_repaint, SeqCst);
}
#[inline(always)]
pub fn input(&self) -> &InputState {
&self.input
}
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn fonts(&self) -> &Fonts {
&*self
.fonts
.as_ref()
.expect("No fonts available until first call to CtxRef::begin_frame()")
}
/// The egui texture, containing font characters etc.
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn texture(&self) -> Arc<epaint::Texture> {
self.fonts().texture()
}
/// Tell `egui` which fonts to use.
///
/// The default `egui` fonts only support latin and cyrillic alphabets,
/// but you can call this to install additional fonts that support e.g. korean characters.
///
/// The new fonts will become active at the start of the next frame.
pub fn set_fonts(&self, font_definitions: FontDefinitions) {
if let Some(current_fonts) = &self.fonts {
// NOTE: this comparison is expensive since it checks TTF data for equality
if current_fonts.definitions() == &font_definitions {
return; // no change - save us from reloading font textures
}
}
self.memory().new_font_definitions = Some(font_definitions);
}
/// The [`Style`] used by all subsequent windows, panels etc.
pub fn style(&self) -> Arc<Style> {
self.memory().options.style.clone()
}
/// The [`Style`] used by all new windows, panels etc.
///
/// You can also use [`Ui::style_mut`] to change the style of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// let mut style: egui::Style = (*ctx.style()).clone();
/// style.spacing.item_spacing = egui::vec2(10.0, 20.0);
/// ctx.set_style(style);
/// ```
pub fn set_style(&self, style: impl Into<Arc<Style>>) {
self.memory().options.style = style.into();
}
/// The [`Visuals`] used by all subsequent windows, panels etc.
///
/// You can also use [`Ui::visuals_mut`] to change the visuals of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// ctx.set_visuals(egui::Visuals::light()); // Switch to light mode
/// ```
pub fn set_visuals(&self, visuals: crate::Visuals) {
std::sync::Arc::make_mut(&mut self.memory().options.style).visuals = visuals;
}
/// The number of physical pixels for each logical point.
#[inline(always)]
pub fn pixels_per_point(&self) -> f32 {
self.input.pixels_per_point()
}
/// Set the number of physical pixels for each logical point.
/// Will become active at the start of the next frame.
///
/// Note that this may be overwritten by input from the integration via [`RawInput::pixels_per_point`].
/// For instance, when using `egui_web` the browsers native zoom level will always be used.
pub fn set_pixels_per_point(&self, pixels_per_point: f32) {
self.memory().new_pixels_per_point = Some(pixels_per_point);
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_to_pixel(&self, point: f32) -> f32 {
let pixels_per_point = self.pixels_per_point();
(point * pixels_per_point).round() / pixels_per_point
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_pos_to_pixels(&self, pos: Pos2) -> Pos2 {
pos2(self.round_to_pixel(pos.x), self.round_to_pixel(pos.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_vec_to_pixels(&self, vec: Vec2) -> Vec2 {
vec2(self.round_to_pixel(vec.x), self.round_to_pixel(vec.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_rect_to_pixels(&self, rect: Rect) -> Rect {
Rect {
min: self.round_pos_to_pixels(rect.min),
max: self.round_pos_to_pixels(rect.max),
}
}
// ---------------------------------------------------------------------
/// Constrain the position of a window/area so it fits within the provided boundary.
///
/// If area is `None`, will constrain to [`Self::available_rect`].
pub(crate) fn constrain_window_rect_to_area(&self, window: Rect, area: Option<Rect>) -> Rect {
let mut area = area.unwrap_or_else(|| self.available_rect());
if window.width() > area.width() {
// Allow overlapping side bars.
// This is important for small screens, e.g. mobiles running the web demo.
area.max.x = self.input().screen_rect().max.x;
area.min.x = self.input().screen_rect().min.x;
}
if window.height() > area.height() {
// Allow overlapping top/bottom bars:
area.max.y = self.input().screen_rect().max.y;
area.min.y = self.input().screen_rect().min.y;
}
let mut pos = window.min;
// Constrain to screen, unless window is too large to fit:
let margin_x = (window.width() - area.width()).at_least(0.0);
let margin_y = (window.height() - area.height()).at_least(0.0);
pos.x = pos.x.at_most(area.right() + margin_x - window.width()); // move left if needed
pos.x = pos.x.at_least(area.left() - margin_x); // move right if needed
pos.y = pos.y.at_most(area.bottom() + margin_y - window.height()); // move right if needed
pos.y = pos.y.at_least(area.top() - margin_y); // move down if needed
pos = self.round_pos_to_pixels(pos);
Rect::from_min_size(pos, window.size())
}
// ---------------------------------------------------------------------
fn begin_frame_mut(&mut self, new_raw_input: RawInput) {
self.memory().begin_frame(&self.input, &new_raw_input);
let mut input = std::mem::take(&mut self.input);
if let Some(new_pixels_per_point) = self.memory().new_pixels_per_point.take() {
input.pixels_per_point = new_pixels_per_point;
}
self.input = input.begin_frame(new_raw_input);
self.frame_state.lock().begin_frame(&self.input);
{
// Load new fonts if required:
let new_font_definitions = self.memory().new_font_definitions.take();
let pixels_per_point = self.input.pixels_per_point();
let pixels_per_point_changed = match &self.fonts {
None => true,
Some(current_fonts) => {
(current_fonts.pixels_per_point() - pixels_per_point).abs() > 1e-3
}
};
if self.fonts.is_none() || new_font_definitions.is_some() || pixels_per_point_changed {
self.fonts = Some(Arc::new(Fonts::new(
pixels_per_point,
new_font_definitions.unwrap_or_else(|| {
self.fonts
.as_ref()
.map(|font| font.definitions().clone())
.unwrap_or_default()
}),
)));
}
}
// Ensure we register the background area so panels and background ui can catch clicks:
let screen_rect = self.input.screen_rect();
self.memory().areas.set_state(
LayerId::background(),
containers::area::State {
pos: screen_rect.min,
size: screen_rect.size(),
interactable: true,
},
);
}
/// Call at the end of each frame.
/// Returns what has happened this frame [`crate::Output`] as well as what you need to paint.
/// You can transform the returned shapes into triangles with a call to [`Context::tessellate`].
#[must_use]
pub fn end_frame(&self) -> (Output, Vec<ClippedShape>) {
if self.input.wants_repaint() {
self.request_repaint();
}
self.memory()
.end_frame(&self.input, &self.frame_state().used_ids);
self.fonts().end_frame();
let mut output: Output = std::mem::take(&mut self.output());
if self.repaint_requests.load(SeqCst) > 0 {
self.repaint_requests.fetch_sub(1, SeqCst);
output.needs_repaint = true;
}
let shapes = self.drain_paint_lists();
(output, shapes)
}
fn drain_paint_lists(&self) -> Vec<ClippedShape> {
let memory = self.memory();
self.graphics().drain(memory.areas.order()).collect()
}
/// Tessellate the given shapes into triangle meshes.
pub fn tessellate(&self, shapes: Vec<ClippedShape>) -> Vec<ClippedMesh> {
// A tempting optimization is to reuse the tessellation from last frame if the
// shapes are the same, but just comparing the shapes takes about 50% of the time
// it takes to tessellate them, so it is not a worth optimization.
let mut tessellation_options = self.memory().options.tessellation_options;
tessellation_options.pixels_per_point = self.pixels_per_point();
tessellation_options.aa_size = 1.0 / self.pixels_per_point();
let paint_stats = PaintStats::from_shapes(&shapes);
let clipped_meshes = tessellator::tessellate_shapes(
shapes,
tessellation_options,
self.fonts().texture().size(),
);
*self.paint_stats.lock() = paint_stats.with_clipped_meshes(&clipped_meshes);
clipped_meshes
}
// ---------------------------------------------------------------------
/// How much space is used by panels and windows.
pub fn used_rect(&self) -> Rect {
let mut used = self.frame_state().used_by_panels;
for window in self.memory().areas.visible_windows() {
used = used.union(window.rect());
}
used
}
/// How much space is used by panels and windows.
/// You can shrink your egui area to this size and still fit all egui components.
pub fn used_size(&self) -> Vec2 {
self.used_rect().max - Pos2::new(0.0, 0.0)
}
// ---------------------------------------------------------------------
/// Is the pointer (mouse/touch) over any egui area?
pub fn is_pointer_over_area(&self) -> bool {
if let Some(pointer_pos) = self.input.pointer.interact_pos() {
if let Some(layer) = self.layer_id_at(pointer_pos) {
if layer.order == Order::Background {
!self.frame_state().unused_rect.contains(pointer_pos)
} else {
true
}
} else {
false
}
} else {
false
}
}
/// True if egui is currently interested in the pointer (mouse or touch).
/// Could be the pointer is hovering over a [`Window`] or the user is dragging a widget.
/// If `false`, the pointer is outside of any egui area and so
/// you may be interested in what it is doing (e.g. controlling your game).
/// Returns `false` if a drag started outside of egui and then moved over an egui area.
pub fn wants_pointer_input(&self) -> bool {
self.is_using_pointer() || (self.is_pointer_over_area() && !self.input().pointer.any_down())
}
/// Is egui currently using the pointer position (e.g. dragging a slider).
/// NOTE: this will return `false` if the pointer is just hovering over an egui area.
pub fn is_using_pointer(&self) -> bool {
self.memory().interaction.is_using_pointer()
}
/// If `true`, egui is currently listening on text input (e.g. typing text in a [`TextEdit`]).
pub fn wants_keyboard_input(&self) -> bool {
self.memory().interaction.focus.focused().is_some()
}
// ---------------------------------------------------------------------
/// Move all the graphics at the given layer.
/// Can be used to implement drag-and-drop (see relevant demo).
pub fn translate_layer(&self, layer_id: LayerId, delta: Vec2) {
if delta != Vec2::ZERO {
self.graphics().list(layer_id).lock().translate(delta);
}
}
/// Top-most layer at the given position.
pub fn layer_id_at(&self, pos: Pos2) -> Option<LayerId> {
let resize_grab_radius_side = self.style().interaction.resize_grab_radius_side;
self.memory().layer_id_at(pos, resize_grab_radius_side)
}
pub(crate) fn rect_contains_pointer(&self, layer_id: LayerId, rect: Rect) -> bool {
if let Some(pointer_pos) = self.input.pointer.interact_pos() {
rect.contains(pointer_pos) && self.layer_id_at(pointer_pos) == Some(layer_id)
} else {
false
}
}
// ---------------------------------------------------------------------
/// Wether or not to debug widget layout on hover.
pub fn debug_on_hover(&self) -> bool {
self.memory().options.style.debug.debug_on_hover
}
/// Turn on/off wether or not to debug widget layout on hover.
pub fn set_debug_on_hover(&self, debug_on_hover: bool) {
let mut style = (*self.memory().options.style).clone();
style.debug.debug_on_hover = debug_on_hover;
self.set_style(style);
}
}
/// ## Animation
impl Context {
/// Returns a value in the range [0, 1], to indicate "how on" this thing is.
///
/// The first time called it will return `if value { 1.0 } else { 0.0 }`
/// Calling this with `value = true` will always yield a number larger than zero, quickly going towards one.
/// Calling this with `value = false` will always yield a number less than one, quickly going towards zero.
///
/// The function will call [`Self::request_repaint()`] when appropriate.
pub fn animate_bool(&self, id: Id, value: bool) -> f32 {
let animation_time = self.style().animation_time;
let animated_value =
self.animation_manager
.lock()
.animate_bool(&self.input, animation_time, id, value);
let animation_in_progress = 0.0 < animated_value && animated_value < 1.0;
if animation_in_progress {
self.request_repaint();
}
animated_value
}
/// Clear memory of any animations.
pub fn clear_animations(&self) {
*self.animation_manager.lock() = Default::default();
}
}
impl Context {
pub fn settings_ui(&self, ui: &mut Ui) {
use crate::containers::*;
CollapsingHeader::new("🎑 Style")
.default_open(true)
.show(ui, |ui| {
self.style_ui(ui);
});
CollapsingHeader::new("🔠 Fonts")
.default_open(false)
.show(ui, |ui| {
let mut font_definitions = self.fonts().definitions().clone();
font_definitions.ui(ui);
self.fonts().texture().ui(ui);
self.set_fonts(font_definitions);
});
CollapsingHeader::new("✒ Painting")
.default_open(true)
.show(ui, |ui| {
let mut tessellation_options = self.memory().options.tessellation_options;
tessellation_options.ui(ui);
ui.vertical_centered(|ui| reset_button(ui, &mut tessellation_options));
self.memory().options.tessellation_options = tessellation_options;
});
}
pub fn inspection_ui(&self, ui: &mut Ui) {
use crate::containers::*;
crate::trace!(ui);
ui.label(format!("Is using pointer: {}", self.is_using_pointer()))
.on_hover_text(
"Is egui currently using the pointer actively (e.g. dragging a slider)?",
);
ui.label(format!("Wants pointer input: {}", self.wants_pointer_input()))
.on_hover_text("Is egui currently interested in the location of the pointer (either because it is in use, or because it is hovering over a window).");
ui.label(format!(
"Wants keyboard input: {}",
self.wants_keyboard_input()
))
.on_hover_text("Is egui currently listening for text input?");
ui.label(format!(
"Keyboard focus widget: {}",
self.memory()
.interaction
.focus
.focused()
.as_ref()
.map(Id::short_debug_format)
.unwrap_or_default()
))
.on_hover_text("Is egui currently listening for text input?");
let pointer_pos = self
.input()
.pointer
.hover_pos()
.map_or_else(String::new, |pos| format!("{:?}", pos));
ui.label(format!("Pointer pos: {}", pointer_pos));
let top_layer = self
.input()
.pointer
.hover_pos()
.and_then(|pos| self.layer_id_at(pos))
.map_or_else(String::new, |layer| layer.short_debug_format());
ui.label(format!("Top layer under mouse: {}", top_layer));
ui.add_space(16.0);
ui.label(format!(
"There are {} text galleys in the layout cache",
self.fonts().num_galleys_in_cache()
))
.on_hover_text("This is approximately the number of text strings on screen");
ui.add_space(16.0);
CollapsingHeader::new("📥 Input")
.default_open(false)
.show(ui, |ui| ui.input().clone().ui(ui));
CollapsingHeader::new("📊 Paint stats")
.default_open(true)
.show(ui, |ui| {
self.paint_stats.lock().ui(ui);
});
}
pub fn memory_ui(&self, ui: &mut crate::Ui) {
if ui
.button("Reset all")
.on_hover_text("Reset all egui state")
.clicked()
{
*self.memory() = Default::default();
}
ui.horizontal(|ui| {
ui.label(format!(
"{} areas (panels, windows, popups, …)",
self.memory().areas.count()
));
if ui.button("Reset").clicked() {
self.memory().areas = Default::default();
}
});
ui.indent("areas", |ui| {
ui.label("Visible areas, ordered back to front.");
ui.label("Hover to highlight");
let layers_ids: Vec<LayerId> = self.memory().areas.order().to_vec();
for layer_id in layers_ids {
let area = self.memory().areas.get(layer_id.id).cloned();
if let Some(area) = area {
let is_visible = self.memory().areas.is_visible(&layer_id);
if !is_visible {
continue;
}
let text = format!("{} - {:?}", layer_id.short_debug_format(), area.rect(),);
// TODO: `Sense::hover_highlight()`
if ui
.add(Label::new(text).monospace().sense(Sense::click()))
.hovered
&& is_visible
{
ui.ctx()
.debug_painter()
.debug_rect(area.rect(), Color32::RED, "");
}
}
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} collapsing headers",
self.memory()
.id_data
.count::<containers::collapsing_header::State>()
));
if ui.button("Reset").clicked() {
self.memory()
.id_data
.remove_by_type::<containers::collapsing_header::State>();
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} menu bars",
self.memory().id_data_temp.count::<menu::BarState>()
));
if ui.button("Reset").clicked() {
self.memory()
.id_data_temp
.remove_by_type::<menu::BarState>();
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} scroll areas",
self.memory().id_data.count::<scroll_area::State>()
));
if ui.button("Reset").clicked() {
self.memory().id_data.remove_by_type::<scroll_area::State>();
}
});
ui.horizontal(|ui| {
ui.label(format!(
"{} resize areas",
self.memory().id_data.count::<resize::State>()
));
if ui.button("Reset").clicked() {
self.memory().id_data.remove_by_type::<resize::State>();
}
});
ui.shrink_width_to_current(); // don't let the text below grow this window wider
ui.label("NOTE: the position of this window cannot be reset from within itself.");
ui.collapsing("Interaction", |ui| {
let interaction = self.memory().interaction.clone();
interaction.ui(ui);
});
}
}
impl Context {
pub fn style_ui(&self, ui: &mut Ui) {
let mut style: Style = (*self.style()).clone();
style.ui(ui);
self.set_style(style);
}
}
| {
self.0.as_ref()
} | identifier_body |
flash.go | //
// Copyright (c) 2014-2019 Cesanta Software Limited
// All rights reserved
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package flasher
import (
"crypto/md5"
"encoding/hex"
"io/ioutil"
"math/bits"
"sort"
"strings"
"time"
"github.com/juju/errors"
moscommon "github.com/mongoose-os/mos/cli/common"
"github.com/mongoose-os/mos/cli/flash/common"
"github.com/mongoose-os/mos/cli/flash/esp"
"github.com/mongoose-os/mos/cli/flash/esp32"
"github.com/mongoose-os/mos/common/fwbundle"
glog "k8s.io/klog/v2"
)
const (
flashSectorSize = 0x1000
flashBlockSize = 0x10000
// Pre-3.0 SDK, location of sys_params is hard-coded.
sysParamsPartType = "sys_params"
// 3.0+ SDK control placement of sys_params through partition table,
// no need for special handling.
sysParams3PartType = "sys_params3"
sysParamsAreaSize = 4 * flashSectorSize
espImageMagicByte = 0xe9
)
type image struct {
Name string
Type string
Addr uint32
Data []byte
ESP32Encrypt bool
}
type imagesByAddr []*image
func (pp imagesByAddr) Len() int { return len(pp) }
func (pp imagesByAddr) Swap(i, j int) { pp[i], pp[j] = pp[j], pp[i] }
func (pp imagesByAddr) Less(i, j int) bool {
return pp[i].Addr < pp[j].Addr
}
func enDis(enabled bool) string {
if enabled {
return "enabled"
}
return "disabled"
}
func Flash(ct esp.ChipType, fw *fwbundle.FirmwareBundle, opts *esp.FlashOpts) error {
if opts.KeepFS && opts.EraseChip {
return errors.Errorf("--keep-fs and --esp-erase-chip are incompatible")
}
cfr, err := ConnectToFlasherClient(ct, opts)
if err != nil {
return errors.Trace(err)
}
defer cfr.rc.Disconnect()
if ct == esp.ChipESP8266 {
// Based on our knowledge of flash size, adjust type=sys_params image.
adjustSysParamsLocation(fw, cfr.flashParams.Size())
}
// Sort images by address
var images []*image
for _, p := range fw.Parts {
if p.Type == fwbundle.FSPartType && opts.KeepFS {
continue
}
// For ESP32, resolve partition name to address
if p.ESP32PartitionName != "" {
pti, err := esp32.GetPartitionInfo(fw, p.ESP32PartitionName)
if err != nil {
return errors.Annotatef(err, "%s: failed to get respolve partition %q", p.Name, p.ESP32PartitionName)
}
// If partition is specified, addr can be optionally used to specify offset within the partition.
// The exception is app partition - these had both set fro compatibility since Feb 2018
// (https://github.com/cesanta/mongoose-os/commit/b8960587f4d564542c903f854e4fe1cef7bbde33)
// It's been removed in Oct 2021
// (https://github.com/cesanta/mongoose-os/commit/8d9a53f76898d736dcac96594bd4eae0cb6b83a0)
newAddr, newSize := p.Addr, p.Size
if p.Type != "app" {
newAddr = pti.Pos.Offset + p.Addr
} else {
newAddr = pti.Pos.Offset
}
if p.Size == 0 { // size = 0 -> until the end of the partition.
newSize = pti.Pos.Offset + pti.Pos.Size - newAddr
}
glog.V(1).Infof("%s: %s 0x%x %d -> 0x%x %d", p.Name, p.ESP32PartitionName, p.Addr, p.Size, newAddr, newSize)
p.Addr, p.Size = newAddr, newSize
}
data, err := fw.GetPartData(p.Name)
if err != nil {
return errors.Annotatef(err, "%s: failed to get data", p.Name)
}
im := &image{
Name: p.Name,
Type: p.Type,
Addr: p.Addr,
Data: data,
ESP32Encrypt: p.ESP32Encrypt,
}
images = append(images, im)
}
return errors.Trace(writeImages(ct, cfr, images, opts, true))
}
func writeImages(ct esp.ChipType, cfr *cfResult, images []*image, opts *esp.FlashOpts, sanityCheck bool) error {
var err error
common.Reportf("Flash size: %d, params: %s", cfr.flashParams.Size(), cfr.flashParams)
encryptionEnabled := false
secureBootEnabled := false
var esp32EncryptionKey []byte
var fusesByName map[string]*esp32.Fuse
kcs := esp32.KeyEncodingSchemeNone
if ct == esp.ChipESP32 { // TODO(rojer): Flash encryption support for ESP32-C3
_, _, fusesByName, err = esp32.ReadFuses(cfr.fc)
if err == nil {
if fcnt, err := fusesByName[esp32.FlashCryptCntFuseName].Value(true /* withDiffs */); err == nil {
encryptionEnabled = (bits.OnesCount64(fcnt.Uint64())%2 != 0)
kcs = esp32.GetKeyEncodingScheme(fusesByName)
common.Reportf("Flash encryption: %s, scheme: %s", enDis(encryptionEnabled), kcs)
}
if abs0, err := fusesByName[esp32.AbstractDone0FuseName].Value(true /* withDiffs */); err == nil {
secureBootEnabled = (abs0.Int64() != 0)
common.Reportf("Secure boot: %s", enDis(secureBootEnabled))
}
} else {
// Some boards (ARDUINO NANO 33 IOT) do not support memory reading commands to read efuses.
// Allow to proceed anyway.
common.Reportf("Failed to read eFuses, assuming no flash encryption")
}
}
for _, im := range images {
if im.Addr == 0 || im.Addr == 0x1000 && len(im.Data) >= 4 && im.Data[0] == 0xe9 {
im.Data[2], im.Data[3] = cfr.flashParams.Bytes()
}
if ct == esp.ChipESP32 && im.ESP32Encrypt && encryptionEnabled {
if esp32EncryptionKey == nil {
if opts.ESP32EncryptionKeyFile != "" {
mac := strings.ToUpper(strings.Replace(fusesByName[esp32.MACAddressFuseName].MACAddressString(), ":", "", -1))
ekf := moscommon.ExpandPlaceholders(opts.ESP32EncryptionKeyFile, "?", mac)
common.Reportf("Flash encryption key: %s", ekf)
esp32EncryptionKey, err = ioutil.ReadFile(ekf)
if err != nil {
return errors.Annotatef(err, "failed to read encryption key")
}
} else {
return errors.Errorf("flash encryption is enabled but encryption key is not provided")
}
}
encrKey := esp32EncryptionKey[:]
switch kcs {
case esp32.KeyEncodingSchemeNone:
if len(esp32EncryptionKey) != 32 {
return errors.Errorf("encryption key must be 32 bytes, got %d", len(esp32EncryptionKey))
}
case esp32.KeyEncodingScheme34:
if len(esp32EncryptionKey) != 24 {
return errors.Errorf("encryption key must be 24 bytes, got %d", len(esp32EncryptionKey))
}
// Extend the key, per 3/4 encoding scheme.
encrKey = append(encrKey, encrKey[8:16]...)
}
encData, err := esp32.ESP32EncryptImageData(
im.Data, encrKey, im.Addr, opts.ESP32FlashCryptConf)
if err != nil {
return errors.Annotatef(err, "%s: failed to encrypt", im.Name)
}
im.Data = encData
}
}
sort.Sort(imagesByAddr(images))
if sanityCheck {
err = sanityCheckImages(ct, images, cfr.flashParams.Size(), flashSectorSize)
if err != nil {
return errors.Trace(err)
}
}
imagesToWrite := images
if opts.EraseChip {
common.Reportf("Erasing chip...")
if err = cfr.fc.EraseChip(); err != nil {
return errors.Annotatef(err, "failed to erase chip")
}
} else if opts.MinimizeWrites {
common.Reportf("Deduping...")
imagesToWrite, err = dedupImages(cfr.fc, images)
if err != nil {
return errors.Annotatef(err, "failed to dedup images")
}
}
if len(imagesToWrite) > 0 {
common.Reportf("Writing...")
start := time.Now()
totalBytesWritten := 0
for _, im := range imagesToWrite {
data := im.Data
numAttempts := 3
imageBytesWritten := 0
addr := im.Addr
if len(data)%flashSectorSize != 0 {
newData := make([]byte, len(data))
copy(newData, data)
paddingLen := flashSectorSize - len(data)%flashSectorSize
for i := 0; i < paddingLen; i++ {
newData = append(newData, 0xff)
}
data = newData
}
for i := 1; imageBytesWritten < len(im.Data); i++ {
common.Reportf(" %7d @ 0x%x", len(data), addr)
bytesWritten, err := cfr.fc.Write(addr, data, true /* erase */, opts.EnableCompression)
if err != nil {
if bytesWritten >= flashSectorSize {
// We made progress, restart the retry counter.
i = 1
}
err = errors.Annotatef(err, "write error (attempt %d/%d)", i, numAttempts)
if i >= numAttempts {
return errors.Annotatef(err, "%s: failed to write", im.Name)
}
glog.Warningf("%s", err)
if err := cfr.fc.Sync(); err != nil {
return errors.Annotatef(err, "lost connection with the flasher")
}
// Round down to sector boundary
bytesWritten = bytesWritten - (bytesWritten % flashSectorSize)
data = data[bytesWritten:]
}
imageBytesWritten += bytesWritten
addr += uint32(bytesWritten)
}
totalBytesWritten += len(im.Data)
}
seconds := time.Since(start).Seconds()
bytesPerSecond := float64(totalBytesWritten) / seconds
common.Reportf("Wrote %d bytes in %.2f seconds (%.2f KBit/sec)", totalBytesWritten, seconds, bytesPerSecond*8/1024)
}
if !opts.NoVerify {
common.Reportf("Verifying...")
numBytes := 0
start := time.Now()
for _, im := range images {
numBytes += len(im.Data)
common.Reportf(" %7d @ 0x%x", len(im.Data), im.Addr)
addr, done := im.Addr, 0
for done < len(im.Data) {
size := len(im.Data) - done
if size > 0x100000 {
size = 0x100000
}
data := im.Data[done : done+size]
digest, err := cfr.fc.Digest(addr, uint32(size), 0 /* blockSize */)
if err != nil {
return errors.Annotatef(err, "%s: failed to compute digest %d @ 0x%x", im.Name, size, addr)
}
if len(digest) != 1 || len(digest[0]) != 16 {
return errors.Errorf("unexpected digest packetresult %+v", digest)
}
digestHex := strings.ToLower(hex.EncodeToString(digest[0]))
expectedDigest := md5.Sum(data)
expectedDigestHex := strings.ToLower(hex.EncodeToString(expectedDigest[:]))
if digestHex != expectedDigestHex {
return errors.Errorf("%d @ 0x%x: digest mismatch: expected %s, got %s", size, addr, expectedDigestHex, digestHex)
}
addr += uint32(size)
done += size
}
}
elapsed := time.Since(start)
glog.Infof("Verified %d bytes in %s, %.2f Kbit/sec", numBytes, elapsed, float64(numBytes*8)/elapsed.Seconds()/1000)
}
if opts.BootFirmware {
common.Reportf("Booting firmware...")
if err = cfr.fc.BootFirmware(); err != nil {
return errors.Annotatef(err, "failed to reboot into firmware")
}
}
return nil
}
func adjustSysParamsLocation(fw *fwbundle.FirmwareBundle, flashSize int) {
sysParamsAddr := uint32(flashSize - sysParamsAreaSize)
for _, p := range fw.Parts {
if p.Type != sysParamsPartType {
continue
}
if p.Addr != sysParamsAddr {
glog.Infof("Sys params image moved from 0x%x to 0x%x", p.Addr, sysParamsAddr)
p.Addr = sysParamsAddr
}
}
}
func sanityCheckImages(ct esp.ChipType, images []*image, flashSize, flashSectorSize int) error {
// Note: we require that images are sorted by address.
sort.Sort(imagesByAddr(images))
esp8266CheckSysParams := true
for _, im := range images {
if im.Type == sysParams3PartType {
// No need to check, firmware controls palcement of sys_params.
esp8266CheckSysParams = false
}
}
for i, im := range images {
imageBegin := int(im.Addr)
imageEnd := imageBegin + len(im.Data)
if imageBegin >= flashSize || imageEnd > flashSize {
return errors.Errorf(
"Image %d @ 0x%x will not fit in flash (size %d)", len(im.Data), imageBegin, flashSize)
}
if imageBegin%flashSectorSize != 0 {
return errors.Errorf("Image starting address (0x%x) is not on flash sector boundary (sector size %d)",
imageBegin,
flashSectorSize)
}
if imageBegin == 0 && len(im.Data) > 0 {
if im.Data[0] != espImageMagicByte {
return errors.Errorf("Invalid magic byte in the first image")
}
}
if ct == esp.ChipESP8266 && esp8266CheckSysParams {
sysParamsBegin := flashSize - sysParamsAreaSize
if imageBegin == sysParamsBegin && im.Type == sysParamsPartType {
// Ok, a sys_params image.
} else if imageEnd > sysParamsBegin {
return errors.Errorf("Image 0x%x overlaps with system params area (%d @ 0x%x)",
imageBegin, sysParamsAreaSize, sysParamsBegin)
}
}
if i > 0 {
prevImageBegin := int(images[i-1].Addr)
prevImageEnd := prevImageBegin + len(images[i-1].Data)
// We traverse the list in order, so a simple check will suffice.
if prevImageEnd > imageBegin |
}
}
return nil
}
func dedupImages(fc *FlasherClient, images []*image) ([]*image, error) {
var dedupedImages []*image
for _, im := range images {
glog.V(2).Infof("%d @ 0x%x", len(im.Data), im.Addr)
imAddr := int(im.Addr)
digests, err := fc.Digest(im.Addr, uint32(len(im.Data)), flashSectorSize)
if err != nil {
return nil, errors.Annotatef(err, "%s: failed to compute digest %d @ 0x%x", im.Name, len(im.Data), im.Addr)
}
i, offset := 0, 0
var newImages []*image
newAddr, newLen, newTotalLen := imAddr, 0, 0
for offset < len(im.Data) {
blockLen := flashSectorSize
if offset+blockLen > len(im.Data) {
blockLen = len(im.Data) - offset
}
digestHex := strings.ToLower(hex.EncodeToString(digests[i]))
expectedDigest := md5.Sum(im.Data[offset : offset+blockLen])
expectedDigestHex := strings.ToLower(hex.EncodeToString(expectedDigest[:]))
glog.V(2).Infof("0x%06x %4d %s %s %t", imAddr+offset, blockLen, expectedDigestHex, digestHex, expectedDigestHex == digestHex)
if expectedDigestHex == digestHex {
// Found a matching sector. If we've been building an image, commit it.
if newLen > 0 {
nim := &image{
Name: im.Name,
Type: im.Type,
Addr: uint32(newAddr),
Data: im.Data[newAddr-imAddr : newAddr-imAddr+newLen],
ESP32Encrypt: im.ESP32Encrypt,
}
glog.V(2).Infof("%d @ 0x%x", len(nim.Data), nim.Addr)
newImages = append(newImages, nim)
newTotalLen += newLen
newAddr, newLen = 0, 0
}
} else {
// Found a sector that needs to be written. Start a new image or continue the existing one.
if newLen == 0 {
newAddr = imAddr + offset
}
newLen += blockLen
}
offset += blockLen
i++
}
if newLen > 0 {
nim := &image{
Name: im.Name,
Type: im.Type,
Addr: uint32(newAddr),
Data: im.Data[newAddr-imAddr : newAddr-imAddr+newLen],
ESP32Encrypt: im.ESP32Encrypt,
}
newImages = append(newImages, nim)
glog.V(2).Infof("%d @ %x", len(nim.Data), nim.Addr)
newTotalLen += newLen
newAddr, newLen = 0, 0
}
glog.V(2).Infof("%d @ 0x%x -> %d", len(im.Data), im.Addr, newTotalLen)
// There's a price for fragmenting a large image: erasing many individual
// sectors is slower than erasing a whole block. So unless the difference
// is substantial, don't bother.
if newTotalLen < len(im.Data) && (newTotalLen < flashBlockSize || len(im.Data)-newTotalLen >= flashBlockSize) {
dedupedImages = append(dedupedImages, newImages...)
common.Reportf(" %7d @ 0x%x -> %d", len(im.Data), im.Addr, newTotalLen)
} else {
dedupedImages = append(dedupedImages, im)
}
}
return dedupedImages, nil
}
| {
return errors.Errorf("Images 0x%x and 0x%x overlap", prevImageBegin, imageBegin)
} | conditional_block |
flash.go | //
// Copyright (c) 2014-2019 Cesanta Software Limited
// All rights reserved
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package flasher
import (
"crypto/md5"
"encoding/hex"
"io/ioutil"
"math/bits"
"sort"
"strings"
"time"
"github.com/juju/errors"
moscommon "github.com/mongoose-os/mos/cli/common"
"github.com/mongoose-os/mos/cli/flash/common"
"github.com/mongoose-os/mos/cli/flash/esp"
"github.com/mongoose-os/mos/cli/flash/esp32"
"github.com/mongoose-os/mos/common/fwbundle"
glog "k8s.io/klog/v2"
)
const (
flashSectorSize = 0x1000
flashBlockSize = 0x10000
// Pre-3.0 SDK, location of sys_params is hard-coded.
sysParamsPartType = "sys_params"
// 3.0+ SDK control placement of sys_params through partition table,
// no need for special handling.
sysParams3PartType = "sys_params3"
sysParamsAreaSize = 4 * flashSectorSize
espImageMagicByte = 0xe9
)
type image struct {
Name string
Type string
Addr uint32
Data []byte
ESP32Encrypt bool
}
type imagesByAddr []*image
func (pp imagesByAddr) Len() int { return len(pp) }
func (pp imagesByAddr) Swap(i, j int) { pp[i], pp[j] = pp[j], pp[i] }
func (pp imagesByAddr) Less(i, j int) bool {
return pp[i].Addr < pp[j].Addr
}
func enDis(enabled bool) string {
if enabled {
return "enabled"
}
return "disabled"
}
func | (ct esp.ChipType, fw *fwbundle.FirmwareBundle, opts *esp.FlashOpts) error {
if opts.KeepFS && opts.EraseChip {
return errors.Errorf("--keep-fs and --esp-erase-chip are incompatible")
}
cfr, err := ConnectToFlasherClient(ct, opts)
if err != nil {
return errors.Trace(err)
}
defer cfr.rc.Disconnect()
if ct == esp.ChipESP8266 {
// Based on our knowledge of flash size, adjust type=sys_params image.
adjustSysParamsLocation(fw, cfr.flashParams.Size())
}
// Sort images by address
var images []*image
for _, p := range fw.Parts {
if p.Type == fwbundle.FSPartType && opts.KeepFS {
continue
}
// For ESP32, resolve partition name to address
if p.ESP32PartitionName != "" {
pti, err := esp32.GetPartitionInfo(fw, p.ESP32PartitionName)
if err != nil {
return errors.Annotatef(err, "%s: failed to get respolve partition %q", p.Name, p.ESP32PartitionName)
}
// If partition is specified, addr can be optionally used to specify offset within the partition.
// The exception is app partition - these had both set fro compatibility since Feb 2018
// (https://github.com/cesanta/mongoose-os/commit/b8960587f4d564542c903f854e4fe1cef7bbde33)
// It's been removed in Oct 2021
// (https://github.com/cesanta/mongoose-os/commit/8d9a53f76898d736dcac96594bd4eae0cb6b83a0)
newAddr, newSize := p.Addr, p.Size
if p.Type != "app" {
newAddr = pti.Pos.Offset + p.Addr
} else {
newAddr = pti.Pos.Offset
}
if p.Size == 0 { // size = 0 -> until the end of the partition.
newSize = pti.Pos.Offset + pti.Pos.Size - newAddr
}
glog.V(1).Infof("%s: %s 0x%x %d -> 0x%x %d", p.Name, p.ESP32PartitionName, p.Addr, p.Size, newAddr, newSize)
p.Addr, p.Size = newAddr, newSize
}
data, err := fw.GetPartData(p.Name)
if err != nil {
return errors.Annotatef(err, "%s: failed to get data", p.Name)
}
im := &image{
Name: p.Name,
Type: p.Type,
Addr: p.Addr,
Data: data,
ESP32Encrypt: p.ESP32Encrypt,
}
images = append(images, im)
}
return errors.Trace(writeImages(ct, cfr, images, opts, true))
}
func writeImages(ct esp.ChipType, cfr *cfResult, images []*image, opts *esp.FlashOpts, sanityCheck bool) error {
var err error
common.Reportf("Flash size: %d, params: %s", cfr.flashParams.Size(), cfr.flashParams)
encryptionEnabled := false
secureBootEnabled := false
var esp32EncryptionKey []byte
var fusesByName map[string]*esp32.Fuse
kcs := esp32.KeyEncodingSchemeNone
if ct == esp.ChipESP32 { // TODO(rojer): Flash encryption support for ESP32-C3
_, _, fusesByName, err = esp32.ReadFuses(cfr.fc)
if err == nil {
if fcnt, err := fusesByName[esp32.FlashCryptCntFuseName].Value(true /* withDiffs */); err == nil {
encryptionEnabled = (bits.OnesCount64(fcnt.Uint64())%2 != 0)
kcs = esp32.GetKeyEncodingScheme(fusesByName)
common.Reportf("Flash encryption: %s, scheme: %s", enDis(encryptionEnabled), kcs)
}
if abs0, err := fusesByName[esp32.AbstractDone0FuseName].Value(true /* withDiffs */); err == nil {
secureBootEnabled = (abs0.Int64() != 0)
common.Reportf("Secure boot: %s", enDis(secureBootEnabled))
}
} else {
// Some boards (ARDUINO NANO 33 IOT) do not support memory reading commands to read efuses.
// Allow to proceed anyway.
common.Reportf("Failed to read eFuses, assuming no flash encryption")
}
}
for _, im := range images {
if im.Addr == 0 || im.Addr == 0x1000 && len(im.Data) >= 4 && im.Data[0] == 0xe9 {
im.Data[2], im.Data[3] = cfr.flashParams.Bytes()
}
if ct == esp.ChipESP32 && im.ESP32Encrypt && encryptionEnabled {
if esp32EncryptionKey == nil {
if opts.ESP32EncryptionKeyFile != "" {
mac := strings.ToUpper(strings.Replace(fusesByName[esp32.MACAddressFuseName].MACAddressString(), ":", "", -1))
ekf := moscommon.ExpandPlaceholders(opts.ESP32EncryptionKeyFile, "?", mac)
common.Reportf("Flash encryption key: %s", ekf)
esp32EncryptionKey, err = ioutil.ReadFile(ekf)
if err != nil {
return errors.Annotatef(err, "failed to read encryption key")
}
} else {
return errors.Errorf("flash encryption is enabled but encryption key is not provided")
}
}
encrKey := esp32EncryptionKey[:]
switch kcs {
case esp32.KeyEncodingSchemeNone:
if len(esp32EncryptionKey) != 32 {
return errors.Errorf("encryption key must be 32 bytes, got %d", len(esp32EncryptionKey))
}
case esp32.KeyEncodingScheme34:
if len(esp32EncryptionKey) != 24 {
return errors.Errorf("encryption key must be 24 bytes, got %d", len(esp32EncryptionKey))
}
// Extend the key, per 3/4 encoding scheme.
encrKey = append(encrKey, encrKey[8:16]...)
}
encData, err := esp32.ESP32EncryptImageData(
im.Data, encrKey, im.Addr, opts.ESP32FlashCryptConf)
if err != nil {
return errors.Annotatef(err, "%s: failed to encrypt", im.Name)
}
im.Data = encData
}
}
sort.Sort(imagesByAddr(images))
if sanityCheck {
err = sanityCheckImages(ct, images, cfr.flashParams.Size(), flashSectorSize)
if err != nil {
return errors.Trace(err)
}
}
imagesToWrite := images
if opts.EraseChip {
common.Reportf("Erasing chip...")
if err = cfr.fc.EraseChip(); err != nil {
return errors.Annotatef(err, "failed to erase chip")
}
} else if opts.MinimizeWrites {
common.Reportf("Deduping...")
imagesToWrite, err = dedupImages(cfr.fc, images)
if err != nil {
return errors.Annotatef(err, "failed to dedup images")
}
}
if len(imagesToWrite) > 0 {
common.Reportf("Writing...")
start := time.Now()
totalBytesWritten := 0
for _, im := range imagesToWrite {
data := im.Data
numAttempts := 3
imageBytesWritten := 0
addr := im.Addr
if len(data)%flashSectorSize != 0 {
newData := make([]byte, len(data))
copy(newData, data)
paddingLen := flashSectorSize - len(data)%flashSectorSize
for i := 0; i < paddingLen; i++ {
newData = append(newData, 0xff)
}
data = newData
}
for i := 1; imageBytesWritten < len(im.Data); i++ {
common.Reportf(" %7d @ 0x%x", len(data), addr)
bytesWritten, err := cfr.fc.Write(addr, data, true /* erase */, opts.EnableCompression)
if err != nil {
if bytesWritten >= flashSectorSize {
// We made progress, restart the retry counter.
i = 1
}
err = errors.Annotatef(err, "write error (attempt %d/%d)", i, numAttempts)
if i >= numAttempts {
return errors.Annotatef(err, "%s: failed to write", im.Name)
}
glog.Warningf("%s", err)
if err := cfr.fc.Sync(); err != nil {
return errors.Annotatef(err, "lost connection with the flasher")
}
// Round down to sector boundary
bytesWritten = bytesWritten - (bytesWritten % flashSectorSize)
data = data[bytesWritten:]
}
imageBytesWritten += bytesWritten
addr += uint32(bytesWritten)
}
totalBytesWritten += len(im.Data)
}
seconds := time.Since(start).Seconds()
bytesPerSecond := float64(totalBytesWritten) / seconds
common.Reportf("Wrote %d bytes in %.2f seconds (%.2f KBit/sec)", totalBytesWritten, seconds, bytesPerSecond*8/1024)
}
if !opts.NoVerify {
common.Reportf("Verifying...")
numBytes := 0
start := time.Now()
for _, im := range images {
numBytes += len(im.Data)
common.Reportf(" %7d @ 0x%x", len(im.Data), im.Addr)
addr, done := im.Addr, 0
for done < len(im.Data) {
size := len(im.Data) - done
if size > 0x100000 {
size = 0x100000
}
data := im.Data[done : done+size]
digest, err := cfr.fc.Digest(addr, uint32(size), 0 /* blockSize */)
if err != nil {
return errors.Annotatef(err, "%s: failed to compute digest %d @ 0x%x", im.Name, size, addr)
}
if len(digest) != 1 || len(digest[0]) != 16 {
return errors.Errorf("unexpected digest packetresult %+v", digest)
}
digestHex := strings.ToLower(hex.EncodeToString(digest[0]))
expectedDigest := md5.Sum(data)
expectedDigestHex := strings.ToLower(hex.EncodeToString(expectedDigest[:]))
if digestHex != expectedDigestHex {
return errors.Errorf("%d @ 0x%x: digest mismatch: expected %s, got %s", size, addr, expectedDigestHex, digestHex)
}
addr += uint32(size)
done += size
}
}
elapsed := time.Since(start)
glog.Infof("Verified %d bytes in %s, %.2f Kbit/sec", numBytes, elapsed, float64(numBytes*8)/elapsed.Seconds()/1000)
}
if opts.BootFirmware {
common.Reportf("Booting firmware...")
if err = cfr.fc.BootFirmware(); err != nil {
return errors.Annotatef(err, "failed to reboot into firmware")
}
}
return nil
}
func adjustSysParamsLocation(fw *fwbundle.FirmwareBundle, flashSize int) {
sysParamsAddr := uint32(flashSize - sysParamsAreaSize)
for _, p := range fw.Parts {
if p.Type != sysParamsPartType {
continue
}
if p.Addr != sysParamsAddr {
glog.Infof("Sys params image moved from 0x%x to 0x%x", p.Addr, sysParamsAddr)
p.Addr = sysParamsAddr
}
}
}
func sanityCheckImages(ct esp.ChipType, images []*image, flashSize, flashSectorSize int) error {
// Note: we require that images are sorted by address.
sort.Sort(imagesByAddr(images))
esp8266CheckSysParams := true
for _, im := range images {
if im.Type == sysParams3PartType {
// No need to check, firmware controls palcement of sys_params.
esp8266CheckSysParams = false
}
}
for i, im := range images {
imageBegin := int(im.Addr)
imageEnd := imageBegin + len(im.Data)
if imageBegin >= flashSize || imageEnd > flashSize {
return errors.Errorf(
"Image %d @ 0x%x will not fit in flash (size %d)", len(im.Data), imageBegin, flashSize)
}
if imageBegin%flashSectorSize != 0 {
return errors.Errorf("Image starting address (0x%x) is not on flash sector boundary (sector size %d)",
imageBegin,
flashSectorSize)
}
if imageBegin == 0 && len(im.Data) > 0 {
if im.Data[0] != espImageMagicByte {
return errors.Errorf("Invalid magic byte in the first image")
}
}
if ct == esp.ChipESP8266 && esp8266CheckSysParams {
sysParamsBegin := flashSize - sysParamsAreaSize
if imageBegin == sysParamsBegin && im.Type == sysParamsPartType {
// Ok, a sys_params image.
} else if imageEnd > sysParamsBegin {
return errors.Errorf("Image 0x%x overlaps with system params area (%d @ 0x%x)",
imageBegin, sysParamsAreaSize, sysParamsBegin)
}
}
if i > 0 {
prevImageBegin := int(images[i-1].Addr)
prevImageEnd := prevImageBegin + len(images[i-1].Data)
// We traverse the list in order, so a simple check will suffice.
if prevImageEnd > imageBegin {
return errors.Errorf("Images 0x%x and 0x%x overlap", prevImageBegin, imageBegin)
}
}
}
return nil
}
func dedupImages(fc *FlasherClient, images []*image) ([]*image, error) {
var dedupedImages []*image
for _, im := range images {
glog.V(2).Infof("%d @ 0x%x", len(im.Data), im.Addr)
imAddr := int(im.Addr)
digests, err := fc.Digest(im.Addr, uint32(len(im.Data)), flashSectorSize)
if err != nil {
return nil, errors.Annotatef(err, "%s: failed to compute digest %d @ 0x%x", im.Name, len(im.Data), im.Addr)
}
i, offset := 0, 0
var newImages []*image
newAddr, newLen, newTotalLen := imAddr, 0, 0
for offset < len(im.Data) {
blockLen := flashSectorSize
if offset+blockLen > len(im.Data) {
blockLen = len(im.Data) - offset
}
digestHex := strings.ToLower(hex.EncodeToString(digests[i]))
expectedDigest := md5.Sum(im.Data[offset : offset+blockLen])
expectedDigestHex := strings.ToLower(hex.EncodeToString(expectedDigest[:]))
glog.V(2).Infof("0x%06x %4d %s %s %t", imAddr+offset, blockLen, expectedDigestHex, digestHex, expectedDigestHex == digestHex)
if expectedDigestHex == digestHex {
// Found a matching sector. If we've been building an image, commit it.
if newLen > 0 {
nim := &image{
Name: im.Name,
Type: im.Type,
Addr: uint32(newAddr),
Data: im.Data[newAddr-imAddr : newAddr-imAddr+newLen],
ESP32Encrypt: im.ESP32Encrypt,
}
glog.V(2).Infof("%d @ 0x%x", len(nim.Data), nim.Addr)
newImages = append(newImages, nim)
newTotalLen += newLen
newAddr, newLen = 0, 0
}
} else {
// Found a sector that needs to be written. Start a new image or continue the existing one.
if newLen == 0 {
newAddr = imAddr + offset
}
newLen += blockLen
}
offset += blockLen
i++
}
if newLen > 0 {
nim := &image{
Name: im.Name,
Type: im.Type,
Addr: uint32(newAddr),
Data: im.Data[newAddr-imAddr : newAddr-imAddr+newLen],
ESP32Encrypt: im.ESP32Encrypt,
}
newImages = append(newImages, nim)
glog.V(2).Infof("%d @ %x", len(nim.Data), nim.Addr)
newTotalLen += newLen
newAddr, newLen = 0, 0
}
glog.V(2).Infof("%d @ 0x%x -> %d", len(im.Data), im.Addr, newTotalLen)
// There's a price for fragmenting a large image: erasing many individual
// sectors is slower than erasing a whole block. So unless the difference
// is substantial, don't bother.
if newTotalLen < len(im.Data) && (newTotalLen < flashBlockSize || len(im.Data)-newTotalLen >= flashBlockSize) {
dedupedImages = append(dedupedImages, newImages...)
common.Reportf(" %7d @ 0x%x -> %d", len(im.Data), im.Addr, newTotalLen)
} else {
dedupedImages = append(dedupedImages, im)
}
}
return dedupedImages, nil
}
| Flash | identifier_name |
flash.go | //
// Copyright (c) 2014-2019 Cesanta Software Limited
// All rights reserved
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package flasher
import (
"crypto/md5"
"encoding/hex"
"io/ioutil"
"math/bits"
"sort"
"strings"
"time"
"github.com/juju/errors"
moscommon "github.com/mongoose-os/mos/cli/common"
"github.com/mongoose-os/mos/cli/flash/common"
"github.com/mongoose-os/mos/cli/flash/esp"
"github.com/mongoose-os/mos/cli/flash/esp32"
"github.com/mongoose-os/mos/common/fwbundle"
glog "k8s.io/klog/v2"
)
const (
flashSectorSize = 0x1000
flashBlockSize = 0x10000
// Pre-3.0 SDK, location of sys_params is hard-coded.
sysParamsPartType = "sys_params"
// 3.0+ SDK control placement of sys_params through partition table,
// no need for special handling.
sysParams3PartType = "sys_params3"
sysParamsAreaSize = 4 * flashSectorSize
espImageMagicByte = 0xe9
)
type image struct {
Name string
Type string
Addr uint32
Data []byte
ESP32Encrypt bool
}
type imagesByAddr []*image
func (pp imagesByAddr) Len() int { return len(pp) }
func (pp imagesByAddr) Swap(i, j int) { pp[i], pp[j] = pp[j], pp[i] }
func (pp imagesByAddr) Less(i, j int) bool {
return pp[i].Addr < pp[j].Addr
}
func enDis(enabled bool) string {
if enabled {
return "enabled"
}
return "disabled"
}
func Flash(ct esp.ChipType, fw *fwbundle.FirmwareBundle, opts *esp.FlashOpts) error {
if opts.KeepFS && opts.EraseChip {
return errors.Errorf("--keep-fs and --esp-erase-chip are incompatible")
}
cfr, err := ConnectToFlasherClient(ct, opts)
if err != nil {
return errors.Trace(err)
}
defer cfr.rc.Disconnect()
if ct == esp.ChipESP8266 {
// Based on our knowledge of flash size, adjust type=sys_params image.
adjustSysParamsLocation(fw, cfr.flashParams.Size())
}
// Sort images by address
var images []*image
for _, p := range fw.Parts {
if p.Type == fwbundle.FSPartType && opts.KeepFS {
continue
}
// For ESP32, resolve partition name to address
if p.ESP32PartitionName != "" {
pti, err := esp32.GetPartitionInfo(fw, p.ESP32PartitionName)
if err != nil {
return errors.Annotatef(err, "%s: failed to get respolve partition %q", p.Name, p.ESP32PartitionName)
}
// If partition is specified, addr can be optionally used to specify offset within the partition.
// The exception is app partition - these had both set fro compatibility since Feb 2018
// (https://github.com/cesanta/mongoose-os/commit/b8960587f4d564542c903f854e4fe1cef7bbde33)
// It's been removed in Oct 2021
// (https://github.com/cesanta/mongoose-os/commit/8d9a53f76898d736dcac96594bd4eae0cb6b83a0)
newAddr, newSize := p.Addr, p.Size
if p.Type != "app" {
newAddr = pti.Pos.Offset + p.Addr
} else {
newAddr = pti.Pos.Offset
}
if p.Size == 0 { // size = 0 -> until the end of the partition.
newSize = pti.Pos.Offset + pti.Pos.Size - newAddr
}
glog.V(1).Infof("%s: %s 0x%x %d -> 0x%x %d", p.Name, p.ESP32PartitionName, p.Addr, p.Size, newAddr, newSize)
p.Addr, p.Size = newAddr, newSize
}
data, err := fw.GetPartData(p.Name)
if err != nil {
return errors.Annotatef(err, "%s: failed to get data", p.Name)
}
im := &image{
Name: p.Name,
Type: p.Type,
Addr: p.Addr,
Data: data,
ESP32Encrypt: p.ESP32Encrypt,
}
images = append(images, im)
}
return errors.Trace(writeImages(ct, cfr, images, opts, true))
}
func writeImages(ct esp.ChipType, cfr *cfResult, images []*image, opts *esp.FlashOpts, sanityCheck bool) error {
var err error
common.Reportf("Flash size: %d, params: %s", cfr.flashParams.Size(), cfr.flashParams)
encryptionEnabled := false
secureBootEnabled := false
var esp32EncryptionKey []byte
var fusesByName map[string]*esp32.Fuse
kcs := esp32.KeyEncodingSchemeNone
if ct == esp.ChipESP32 { // TODO(rojer): Flash encryption support for ESP32-C3
_, _, fusesByName, err = esp32.ReadFuses(cfr.fc)
if err == nil {
if fcnt, err := fusesByName[esp32.FlashCryptCntFuseName].Value(true /* withDiffs */); err == nil {
encryptionEnabled = (bits.OnesCount64(fcnt.Uint64())%2 != 0)
kcs = esp32.GetKeyEncodingScheme(fusesByName)
common.Reportf("Flash encryption: %s, scheme: %s", enDis(encryptionEnabled), kcs)
}
if abs0, err := fusesByName[esp32.AbstractDone0FuseName].Value(true /* withDiffs */); err == nil {
secureBootEnabled = (abs0.Int64() != 0)
common.Reportf("Secure boot: %s", enDis(secureBootEnabled))
}
} else {
// Some boards (ARDUINO NANO 33 IOT) do not support memory reading commands to read efuses.
// Allow to proceed anyway.
common.Reportf("Failed to read eFuses, assuming no flash encryption")
}
}
for _, im := range images {
if im.Addr == 0 || im.Addr == 0x1000 && len(im.Data) >= 4 && im.Data[0] == 0xe9 {
im.Data[2], im.Data[3] = cfr.flashParams.Bytes()
}
if ct == esp.ChipESP32 && im.ESP32Encrypt && encryptionEnabled {
if esp32EncryptionKey == nil {
if opts.ESP32EncryptionKeyFile != "" {
mac := strings.ToUpper(strings.Replace(fusesByName[esp32.MACAddressFuseName].MACAddressString(), ":", "", -1))
ekf := moscommon.ExpandPlaceholders(opts.ESP32EncryptionKeyFile, "?", mac)
common.Reportf("Flash encryption key: %s", ekf)
esp32EncryptionKey, err = ioutil.ReadFile(ekf)
if err != nil {
return errors.Annotatef(err, "failed to read encryption key")
}
} else {
return errors.Errorf("flash encryption is enabled but encryption key is not provided")
}
}
encrKey := esp32EncryptionKey[:]
switch kcs {
case esp32.KeyEncodingSchemeNone:
if len(esp32EncryptionKey) != 32 {
return errors.Errorf("encryption key must be 32 bytes, got %d", len(esp32EncryptionKey))
}
case esp32.KeyEncodingScheme34:
if len(esp32EncryptionKey) != 24 {
return errors.Errorf("encryption key must be 24 bytes, got %d", len(esp32EncryptionKey))
}
// Extend the key, per 3/4 encoding scheme.
encrKey = append(encrKey, encrKey[8:16]...)
}
encData, err := esp32.ESP32EncryptImageData(
im.Data, encrKey, im.Addr, opts.ESP32FlashCryptConf)
if err != nil {
return errors.Annotatef(err, "%s: failed to encrypt", im.Name)
}
im.Data = encData
}
}
sort.Sort(imagesByAddr(images))
if sanityCheck {
err = sanityCheckImages(ct, images, cfr.flashParams.Size(), flashSectorSize)
if err != nil {
return errors.Trace(err)
}
}
imagesToWrite := images
if opts.EraseChip {
common.Reportf("Erasing chip...")
if err = cfr.fc.EraseChip(); err != nil {
return errors.Annotatef(err, "failed to erase chip")
}
} else if opts.MinimizeWrites {
common.Reportf("Deduping...")
imagesToWrite, err = dedupImages(cfr.fc, images)
if err != nil {
return errors.Annotatef(err, "failed to dedup images")
}
}
if len(imagesToWrite) > 0 {
common.Reportf("Writing...")
start := time.Now()
totalBytesWritten := 0
for _, im := range imagesToWrite {
data := im.Data
numAttempts := 3
imageBytesWritten := 0
addr := im.Addr
if len(data)%flashSectorSize != 0 {
newData := make([]byte, len(data))
copy(newData, data)
paddingLen := flashSectorSize - len(data)%flashSectorSize
for i := 0; i < paddingLen; i++ {
newData = append(newData, 0xff)
}
data = newData
}
for i := 1; imageBytesWritten < len(im.Data); i++ {
common.Reportf(" %7d @ 0x%x", len(data), addr)
bytesWritten, err := cfr.fc.Write(addr, data, true /* erase */, opts.EnableCompression)
if err != nil {
if bytesWritten >= flashSectorSize {
// We made progress, restart the retry counter.
i = 1
}
err = errors.Annotatef(err, "write error (attempt %d/%d)", i, numAttempts)
if i >= numAttempts {
return errors.Annotatef(err, "%s: failed to write", im.Name)
}
glog.Warningf("%s", err)
if err := cfr.fc.Sync(); err != nil {
return errors.Annotatef(err, "lost connection with the flasher")
}
// Round down to sector boundary
bytesWritten = bytesWritten - (bytesWritten % flashSectorSize)
data = data[bytesWritten:]
}
imageBytesWritten += bytesWritten
addr += uint32(bytesWritten)
}
totalBytesWritten += len(im.Data)
}
seconds := time.Since(start).Seconds()
bytesPerSecond := float64(totalBytesWritten) / seconds
common.Reportf("Wrote %d bytes in %.2f seconds (%.2f KBit/sec)", totalBytesWritten, seconds, bytesPerSecond*8/1024)
}
if !opts.NoVerify {
common.Reportf("Verifying...")
numBytes := 0
start := time.Now()
for _, im := range images {
numBytes += len(im.Data)
common.Reportf(" %7d @ 0x%x", len(im.Data), im.Addr)
addr, done := im.Addr, 0
for done < len(im.Data) {
size := len(im.Data) - done
if size > 0x100000 {
size = 0x100000
}
data := im.Data[done : done+size]
digest, err := cfr.fc.Digest(addr, uint32(size), 0 /* blockSize */)
if err != nil {
return errors.Annotatef(err, "%s: failed to compute digest %d @ 0x%x", im.Name, size, addr)
}
if len(digest) != 1 || len(digest[0]) != 16 {
return errors.Errorf("unexpected digest packetresult %+v", digest)
}
digestHex := strings.ToLower(hex.EncodeToString(digest[0]))
expectedDigest := md5.Sum(data)
expectedDigestHex := strings.ToLower(hex.EncodeToString(expectedDigest[:]))
if digestHex != expectedDigestHex {
return errors.Errorf("%d @ 0x%x: digest mismatch: expected %s, got %s", size, addr, expectedDigestHex, digestHex)
}
addr += uint32(size)
done += size
}
}
elapsed := time.Since(start)
glog.Infof("Verified %d bytes in %s, %.2f Kbit/sec", numBytes, elapsed, float64(numBytes*8)/elapsed.Seconds()/1000)
}
if opts.BootFirmware {
common.Reportf("Booting firmware...")
if err = cfr.fc.BootFirmware(); err != nil {
return errors.Annotatef(err, "failed to reboot into firmware")
}
}
return nil
}
func adjustSysParamsLocation(fw *fwbundle.FirmwareBundle, flashSize int) |
func sanityCheckImages(ct esp.ChipType, images []*image, flashSize, flashSectorSize int) error {
// Note: we require that images are sorted by address.
sort.Sort(imagesByAddr(images))
esp8266CheckSysParams := true
for _, im := range images {
if im.Type == sysParams3PartType {
// No need to check, firmware controls palcement of sys_params.
esp8266CheckSysParams = false
}
}
for i, im := range images {
imageBegin := int(im.Addr)
imageEnd := imageBegin + len(im.Data)
if imageBegin >= flashSize || imageEnd > flashSize {
return errors.Errorf(
"Image %d @ 0x%x will not fit in flash (size %d)", len(im.Data), imageBegin, flashSize)
}
if imageBegin%flashSectorSize != 0 {
return errors.Errorf("Image starting address (0x%x) is not on flash sector boundary (sector size %d)",
imageBegin,
flashSectorSize)
}
if imageBegin == 0 && len(im.Data) > 0 {
if im.Data[0] != espImageMagicByte {
return errors.Errorf("Invalid magic byte in the first image")
}
}
if ct == esp.ChipESP8266 && esp8266CheckSysParams {
sysParamsBegin := flashSize - sysParamsAreaSize
if imageBegin == sysParamsBegin && im.Type == sysParamsPartType {
// Ok, a sys_params image.
} else if imageEnd > sysParamsBegin {
return errors.Errorf("Image 0x%x overlaps with system params area (%d @ 0x%x)",
imageBegin, sysParamsAreaSize, sysParamsBegin)
}
}
if i > 0 {
prevImageBegin := int(images[i-1].Addr)
prevImageEnd := prevImageBegin + len(images[i-1].Data)
// We traverse the list in order, so a simple check will suffice.
if prevImageEnd > imageBegin {
return errors.Errorf("Images 0x%x and 0x%x overlap", prevImageBegin, imageBegin)
}
}
}
return nil
}
func dedupImages(fc *FlasherClient, images []*image) ([]*image, error) {
var dedupedImages []*image
for _, im := range images {
glog.V(2).Infof("%d @ 0x%x", len(im.Data), im.Addr)
imAddr := int(im.Addr)
digests, err := fc.Digest(im.Addr, uint32(len(im.Data)), flashSectorSize)
if err != nil {
return nil, errors.Annotatef(err, "%s: failed to compute digest %d @ 0x%x", im.Name, len(im.Data), im.Addr)
}
i, offset := 0, 0
var newImages []*image
newAddr, newLen, newTotalLen := imAddr, 0, 0
for offset < len(im.Data) {
blockLen := flashSectorSize
if offset+blockLen > len(im.Data) {
blockLen = len(im.Data) - offset
}
digestHex := strings.ToLower(hex.EncodeToString(digests[i]))
expectedDigest := md5.Sum(im.Data[offset : offset+blockLen])
expectedDigestHex := strings.ToLower(hex.EncodeToString(expectedDigest[:]))
glog.V(2).Infof("0x%06x %4d %s %s %t", imAddr+offset, blockLen, expectedDigestHex, digestHex, expectedDigestHex == digestHex)
if expectedDigestHex == digestHex {
// Found a matching sector. If we've been building an image, commit it.
if newLen > 0 {
nim := &image{
Name: im.Name,
Type: im.Type,
Addr: uint32(newAddr),
Data: im.Data[newAddr-imAddr : newAddr-imAddr+newLen],
ESP32Encrypt: im.ESP32Encrypt,
}
glog.V(2).Infof("%d @ 0x%x", len(nim.Data), nim.Addr)
newImages = append(newImages, nim)
newTotalLen += newLen
newAddr, newLen = 0, 0
}
} else {
// Found a sector that needs to be written. Start a new image or continue the existing one.
if newLen == 0 {
newAddr = imAddr + offset
}
newLen += blockLen
}
offset += blockLen
i++
}
if newLen > 0 {
nim := &image{
Name: im.Name,
Type: im.Type,
Addr: uint32(newAddr),
Data: im.Data[newAddr-imAddr : newAddr-imAddr+newLen],
ESP32Encrypt: im.ESP32Encrypt,
}
newImages = append(newImages, nim)
glog.V(2).Infof("%d @ %x", len(nim.Data), nim.Addr)
newTotalLen += newLen
newAddr, newLen = 0, 0
}
glog.V(2).Infof("%d @ 0x%x -> %d", len(im.Data), im.Addr, newTotalLen)
// There's a price for fragmenting a large image: erasing many individual
// sectors is slower than erasing a whole block. So unless the difference
// is substantial, don't bother.
if newTotalLen < len(im.Data) && (newTotalLen < flashBlockSize || len(im.Data)-newTotalLen >= flashBlockSize) {
dedupedImages = append(dedupedImages, newImages...)
common.Reportf(" %7d @ 0x%x -> %d", len(im.Data), im.Addr, newTotalLen)
} else {
dedupedImages = append(dedupedImages, im)
}
}
return dedupedImages, nil
}
| {
sysParamsAddr := uint32(flashSize - sysParamsAreaSize)
for _, p := range fw.Parts {
if p.Type != sysParamsPartType {
continue
}
if p.Addr != sysParamsAddr {
glog.Infof("Sys params image moved from 0x%x to 0x%x", p.Addr, sysParamsAddr)
p.Addr = sysParamsAddr
}
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.