file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
serde_snapshot.rs | genesis_config::ClusterType,
genesis_config::GenesisConfig,
hard_forks::HardForks,
hash::Hash,
inflation::Inflation,
pubkey::Pubkey,
},
std::{
collections::{HashMap, HashSet},
io::{BufReader, BufWriter, Read, Write},
path::{Path, PathBuf},
result::Result,
sync::{atomic::Ordering, Arc, RwLock},
time::Instant,
},
};
#[cfg(RUSTC_WITH_SPECIALIZATION)]
use solana_frozen_abi::abi_example::IgnoreAsHelper;
mod common;
mod future;
mod tests;
mod utils;
use future::Context as TypeContextFuture;
#[allow(unused_imports)]
use utils::{serialize_iter_as_map, serialize_iter_as_seq, serialize_iter_as_tuple};
// a number of test cases in accounts_db use this
#[cfg(test)]
pub(crate) use self::tests::reconstruct_accounts_db_via_serialization;
pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages};
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum SerdeStyle {
NEWER,
}
const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
struct AccountsDbFields<T>(HashMap<Slot, Vec<T>>, u64, Slot, BankHashInfo);
trait TypeContext<'a> {
type SerializableAccountStorageEntry: Serialize
+ DeserializeOwned
+ From<&'a AccountStorageEntry>
+ Into<AccountStorageEntry>;
fn serialize_bank_and_storage<S: serde::ser::Serializer>(
serializer: S,
serializable_bank: &SerializableBankAndStorage<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDB<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn deserialize_bank_fields<R>(
stream: &mut BufReader<R>,
) -> Result<
(
BankFieldsToDeserialize,
AccountsDbFields<Self::SerializableAccountStorageEntry>,
),
Error,
>
where
R: Read;
fn deserialize_accounts_db_fields<R>(
stream: &mut BufReader<R>,
) -> Result<AccountsDbFields<Self::SerializableAccountStorageEntry>, Error>
where
R: Read;
}
fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
where
R: Read,
T: DeserializeOwned,
{
bincode::options()
.with_limit(MAX_STREAM_SIZE)
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from::<R, T>(reader)
}
pub(crate) fn bank_from_stream<R, P>(
serde_style: SerdeStyle,
stream: &mut BufReader<R>,
append_vecs_path: P,
account_paths: &[PathBuf],
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> std::result::Result<Bank, Error>
where
R: Read,
P: AsRef<Path>,
{
macro_rules! INTO {
($x:ident) => {{
let (bank_fields, accounts_db_fields) = $x::deserialize_bank_fields(stream)?;
let bank = reconstruct_bank_from_fields(
bank_fields,
accounts_db_fields,
genesis_config,
frozen_account_pubkeys,
account_paths,
append_vecs_path,
debug_keys,
additional_builtins,
)?;
Ok(bank)
}};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_from_stream error: {:?}", err);
err
})
}
pub(crate) fn bank_to_stream<W>(
serde_style: SerdeStyle,
stream: &mut BufWriter<W>,
bank: &Bank,
snapshot_storages: &[SnapshotStorage],
) -> Result<(), Error>
where
W: Write,
{
macro_rules! INTO {
($x:ident) => {
bincode::serialize_into(
stream,
&SerializableBankAndStorage::<$x> {
bank,
snapshot_storages,
phantom: std::marker::PhantomData::default(),
},
)
};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_to_stream error: {:?}", err);
err
})
}
struct SerializableBankAndStorage<'a, C> {
bank: &'a Bank,
snapshot_storages: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> {
fn | <S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_bank_and_storage(serializer, self)
}
}
struct SerializableAccountsDB<'a, C> {
accounts_db: &'a AccountsDB,
slot: Slot,
account_storage_entries: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_accounts_db_fields(serializer, self)
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {}
fn reconstruct_bank_from_fields<E, P>(
bank_fields: BankFieldsToDeserialize,
accounts_db_fields: AccountsDbFields<E>,
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
account_paths: &[PathBuf],
append_vecs_path: P,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> Result<Bank, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = reconstruct_accountsdb_from_fields(
accounts_db_fields,
account_paths,
append_vecs_path,
&genesis_config.cluster_type,
)?;
accounts_db.freeze_accounts(&bank_fields.ancestors, frozen_account_pubkeys);
let bank_rc = BankRc::new(Accounts::new_empty(accounts_db), bank_fields.slot);
let bank = Bank::new_from_fields(
bank_rc,
genesis_config,
bank_fields,
debug_keys,
additional_builtins,
);
Ok(bank)
}
fn reconstruct_accountsdb_from_fields<E, P>(
accounts_db_fields: AccountsDbFields<E>,
account_paths: &[PathBuf],
stream_append_vecs_path: P,
cluster_type: &ClusterType,
) -> Result<AccountsDB, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = AccountsDB::new(account_paths.to_vec(), cluster_type);
let AccountsDbFields(storage, version, slot, bank_hash_info) = accounts_db_fields;
// convert to two level map of slot -> id -> account storage entry
let storage = {
let mut map = HashMap::new();
for (slot, entries) in storage.into_iter() {
let sub_map = map.entry(slot).or_insert_with(HashMap::new);
for entry in entries.into_iter() {
let entry: AccountStorageEntry = entry.into();
entry.slot.store(slot, Ordering::Relaxed);
sub_map.insert(entry.append_vec_id(), Arc::new(entry));
}
}
map
};
let mut last_log_update = Instant::now();
let mut remaining_slots_to_process = storage.len();
// Remap the deserialized AppendVec paths to point to correct local paths
let mut storage = storage
.into_iter()
.map(|(slot, mut slot_storage)| {
let now = Instant::now();
if now.duration_since(last_log_update).as_secs() >= 10 {
info!("{} slots remaining...", remaining_slots_to_process);
last_log_update = now;
}
remaining_slots_to_process -= 1;
let mut new_slot_storage = HashMap::new();
for (id, storage_entry) in slot_storage.drain() {
let path_index = thread_rng().gen_range(0, accounts_db.paths.len());
let local_dir = &accounts_db.paths[path_index];
std::fs::create_dir_all(local_dir).expect("Create directory failed");
// Move the corresponding AppendVec from the snapshot into the directory pointed
// at by `local_dir`
let append_vec_relative_path =
AppendVec::new_relative_path(slot, storage_entry.append_vec_id());
let append_vec_abs_path = stream_append_vecs_path
.as_ref()
.join | serialize | identifier_name |
serde_snapshot.rs | genesis_config::ClusterType,
genesis_config::GenesisConfig,
hard_forks::HardForks,
hash::Hash,
inflation::Inflation,
pubkey::Pubkey,
},
std::{
collections::{HashMap, HashSet},
io::{BufReader, BufWriter, Read, Write},
path::{Path, PathBuf},
result::Result,
sync::{atomic::Ordering, Arc, RwLock},
time::Instant,
},
};
#[cfg(RUSTC_WITH_SPECIALIZATION)]
use solana_frozen_abi::abi_example::IgnoreAsHelper;
mod common;
mod future;
mod tests;
mod utils;
use future::Context as TypeContextFuture;
#[allow(unused_imports)]
use utils::{serialize_iter_as_map, serialize_iter_as_seq, serialize_iter_as_tuple};
// a number of test cases in accounts_db use this
#[cfg(test)]
pub(crate) use self::tests::reconstruct_accounts_db_via_serialization;
pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages};
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum SerdeStyle {
NEWER,
}
const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
struct AccountsDbFields<T>(HashMap<Slot, Vec<T>>, u64, Slot, BankHashInfo);
trait TypeContext<'a> {
type SerializableAccountStorageEntry: Serialize
+ DeserializeOwned
+ From<&'a AccountStorageEntry>
+ Into<AccountStorageEntry>;
fn serialize_bank_and_storage<S: serde::ser::Serializer>(
serializer: S,
serializable_bank: &SerializableBankAndStorage<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDB<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn deserialize_bank_fields<R>(
stream: &mut BufReader<R>,
) -> Result<
(
BankFieldsToDeserialize,
AccountsDbFields<Self::SerializableAccountStorageEntry>,
),
Error,
>
where
R: Read;
fn deserialize_accounts_db_fields<R>(
stream: &mut BufReader<R>,
) -> Result<AccountsDbFields<Self::SerializableAccountStorageEntry>, Error>
where
R: Read;
}
fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
where
R: Read,
T: DeserializeOwned,
{
bincode::options()
.with_limit(MAX_STREAM_SIZE)
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from::<R, T>(reader)
}
pub(crate) fn bank_from_stream<R, P>(
serde_style: SerdeStyle,
stream: &mut BufReader<R>,
append_vecs_path: P,
account_paths: &[PathBuf],
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> std::result::Result<Bank, Error>
where
R: Read,
P: AsRef<Path>,
{
macro_rules! INTO {
($x:ident) => {{
let (bank_fields, accounts_db_fields) = $x::deserialize_bank_fields(stream)?;
let bank = reconstruct_bank_from_fields(
bank_fields,
accounts_db_fields,
genesis_config,
frozen_account_pubkeys,
account_paths,
append_vecs_path,
debug_keys,
additional_builtins,
)?;
Ok(bank)
}};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_from_stream error: {:?}", err);
err
})
}
pub(crate) fn bank_to_stream<W>(
serde_style: SerdeStyle,
stream: &mut BufWriter<W>,
bank: &Bank,
snapshot_storages: &[SnapshotStorage],
) -> Result<(), Error>
where
W: Write,
{
macro_rules! INTO {
($x:ident) => {
bincode::serialize_into(
stream,
&SerializableBankAndStorage::<$x> {
bank,
snapshot_storages,
phantom: std::marker::PhantomData::default(),
},
)
};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_to_stream error: {:?}", err);
err
})
}
struct SerializableBankAndStorage<'a, C> {
bank: &'a Bank,
snapshot_storages: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_bank_and_storage(serializer, self)
}
}
struct SerializableAccountsDB<'a, C> {
accounts_db: &'a AccountsDB,
slot: Slot,
account_storage_entries: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_accounts_db_fields(serializer, self)
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {}
fn reconstruct_bank_from_fields<E, P>(
bank_fields: BankFieldsToDeserialize,
accounts_db_fields: AccountsDbFields<E>,
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
account_paths: &[PathBuf],
append_vecs_path: P,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
) -> Result<Bank, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
{
let mut accounts_db = reconstruct_accountsdb_from_fields(
accounts_db_fields,
account_paths,
append_vecs_path,
&genesis_config.cluster_type,
)?;
accounts_db.freeze_accounts(&bank_fields.ancestors, frozen_account_pubkeys);
let bank_rc = BankRc::new(Accounts::new_empty(accounts_db), bank_fields.slot);
let bank = Bank::new_from_fields(
bank_rc,
genesis_config,
bank_fields,
debug_keys,
additional_builtins,
);
Ok(bank)
}
fn reconstruct_accountsdb_from_fields<E, P>(
accounts_db_fields: AccountsDbFields<E>,
account_paths: &[PathBuf],
stream_append_vecs_path: P,
cluster_type: &ClusterType,
) -> Result<AccountsDB, Error>
where
E: Into<AccountStorageEntry>,
P: AsRef<Path>,
| let mut remaining_slots_to_process = storage.len();
// Remap the deserialized AppendVec paths to point to correct local paths
let mut storage = storage
.into_iter()
.map(|(slot, mut slot_storage)| {
let now = Instant::now();
if now.duration_since(last_log_update).as_secs() >= 10 {
info!("{} slots remaining...", remaining_slots_to_process);
last_log_update = now;
}
remaining_slots_to_process -= 1;
let mut new_slot_storage = HashMap::new();
for (id, storage_entry) in slot_storage.drain() {
let path_index = thread_rng().gen_range(0, accounts_db.paths.len());
let local_dir = &accounts_db.paths[path_index];
std::fs::create_dir_all(local_dir).expect("Create directory failed");
// Move the corresponding AppendVec from the snapshot into the directory pointed
// at by `local_dir`
let append_vec_relative_path =
AppendVec::new_relative_path(slot, storage_entry.append_vec_id());
let append_vec_abs_path = stream_append_vecs_path
.as_ref()
.join(& | {
let mut accounts_db = AccountsDB::new(account_paths.to_vec(), cluster_type);
let AccountsDbFields(storage, version, slot, bank_hash_info) = accounts_db_fields;
// convert to two level map of slot -> id -> account storage entry
let storage = {
let mut map = HashMap::new();
for (slot, entries) in storage.into_iter() {
let sub_map = map.entry(slot).or_insert_with(HashMap::new);
for entry in entries.into_iter() {
let entry: AccountStorageEntry = entry.into();
entry.slot.store(slot, Ordering::Relaxed);
sub_map.insert(entry.append_vec_id(), Arc::new(entry));
}
}
map
};
let mut last_log_update = Instant::now(); | identifier_body |
control.go | MsgSendertoSMO: MsgSendertoSMO,
//subscriber: subscriber,
AccessDbagent: AcessDbAgent,
Endpoint: endpoint,
}
return c
}
func (c *Control) ReadyCB(data interface{}) {
if c.MsgSendertoSMO == nil {
}
}
func (c *Control) CreateAndRunMsgServer (grpcAddr string) {
svc := service.NewMsgService(c)
ep := endpoint.NewMsgServiceEndpoint(svc)
s := transport.NewMsgServer(ep)
// The gRPC listener mounts the Go kit gRPC server we created.
grpcListener, err := net.Listen("tcp", grpcAddr)
if err != nil {
xapp.Logger.Info("transport", "gRPC", "during", "Listen", "err", err)
os.Exit(1)
}
xapp.Logger.Info("transport", "gRPC", "addr", grpcAddr)
// we add the Go Kit gRPC Interceptor to our gRPC service as it is used by
// the here demonstrated zipkin tracing middleware.
baseServer := grpc.NewServer(grpc.UnaryInterceptor(kitgrpc.Interceptor))
msgx.RegisterMsgServiceServer(baseServer, s)
baseServer.Serve(grpcListener)
}
func (c *Control) Run(grpcAddr string) {
//xapp.SetReadyCB(c.ReadyCB, nil)
//xapp.Run(c,grpcAddr)
c.CreateAndRunMsgServer(grpcAddr)
}
func (c *Control) Consume(msg *xapp.MsgParams) (err error) {
xapp.Logger.Debug("Received message type: %s", xapp.RicMessageTypeToName[msg.Mtype])
if c.MsgSendertoSMO == nil {
err = fmt.Errorf("Msg object nil can handle %s", msg.String())
xapp.Logger.Error("%s", err.Error())
return
}
c.CntRecvMsg++
switch msg.Mtype {
case xapp.RIC_O1_REGISTER:
go c.handleXappRegisterRequest(msg)
case xapp.RIC_O1_INIT:
go c.handleSMOInit(msg)
case xapp.RIC_O1_ENABLE:
go c.handleSMOEnableOrDisable(msg)
case xapp.RIC_O1_DISABLE:
go c.handleSMOEnableOrDisable(msg)
default:
xapp.Logger.Info("Unknown Message Type '%d', discarding", msg.Mtype)
}
return
}
//分配xappID
//XappID 合法值 (1...65535),0 为 非法值
func (c *Control) allocXappID(RegMsg msgx.XappRegMsg) (uint32,error, bool){
isRegistered := false
resp,err := c.AccessDbagent.Client.MOITableReadAll(context.Background(),&db.MOITableReadAllRequest{Api: "1",})
if err != nil {
xapp.Logger.Error(err.Error())
return 0 ,err,isRegistered
}
//第一个注册的xapp,直接分配 xappID = 1
if len(resp.MoiTables) == 0 {
return 1,nil,isRegistered
}
for _,r := range resp.MoiTables{
//重复注册,走正常返回流程
if r.XappName == RegMsg.XappName {
isRegistered = true
return r.XappID,nil,isRegistered
}
}
//新注册 ,先按从小到大排序(只有1个表项也可以排序,不会返回错误),然后找到最小的可用xappID
ps := resp.MoiTables
sort.SliceStable(ps, func(i, j int) bool {
return ps[i].XappID < ps[j].XappID
})
var i uint32
//正常的分配为: ps[0].XappID = 1 ,ps[1].XappID = 2 ,ps[2].XappID = 3 ,
//如果出现ps[1].XappID = 3 ,说明原来的XappID = 2 表项已经释放,可以被再次分配使用
for i = 1; i <= 65535;i++ {
if i != ps[i-1].XappID {
// i 值未被使用,可以被分配
return i,nil,isRegistered
}
}
return 0 , fmt.Errorf("分配失败"),isRegistered
}
//新增该xApp的管理对象实例(xApp MOI)表项
func (c *Control) addXappMOI(XappID uint32,RegMsg msgx.XappRegMsg) error {
r := &db.MOITableInsertRequest{}
r.Api = "1"
m := &db.MOITable{}
m.XappID = XappID
m.XappName = RegMsg.XappName
m.XappVer = RegMsg.XappVer
m.Functions = RegMsg.XappFunctions
m.RunningStatus = "inactive"
m.IsReady = "false"
m.Topic = "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
r.MoiTable = m
_, err := c.AccessDbagent.Client.MOITableInsert(context.Background(),r)
return err
}
//通知网管该xApp在nRT RIC平台上的部署
func (c *Control) Register2SMO (RegMsg *msgx.XappRegMsg,params *xapp.MsgParams){
RICO1RegMsg,err := proto.Marshal(RegMsg)
if err != nil {
xapp.Logger.Error("Marshal RICO1RegMsg failed! %s",err.Error())
return
}
params.Payload = RICO1RegMsg
params.PayloadLen = len(RICO1RegMsg)
//确保消息发送成功,否则每隔 5 秒再次发送
for {
err := c.MsgSendertoSMO.SendMsg(params)
if err == nil {
break
}
xapp.Logger.Error("Register2SMO:",err.Error())
time.Sleep( 5 * time.Second)
}
}
//-------------------------------------------------------------------
// handle from XAPP Register Request
//------------------------------------------------------------------
func (c *Control) handleXappRegisterRequest(params *xapp.MsgParams) {
xapp.Logger.Info("Register MSG from XAPP: %s", params.String())
var RegMsg msgx.XappRegMsg
err := proto.Unmarshal(params.Payload,&RegMsg)
if err != nil {
xapp.Logger.Error("Unmarshal XappRegMsg failed! %s",err.Error())
//(解析不到xapp的ip,无法返回响应消息)xapp 接收不到注册成功响应消息,会继续发起注册 | .Info("XappName = %s,XappRequestID = %d,Token = %s /n",
RegMsg.XappName,RegMsg.Header.XappRequestID,RegMsg.Header.Token)
//第一个消息,xapp还没获取到topic,需要通过grpc来返回注册响应消息
Client2Xapp := msgx.NewMsgSender(RegMsg.XappIpaddr,RegMsg.XappPort)
//分配xappID ; 并判断是否重复注册
XappID,err,isRegistered := c.allocXappID(RegMsg)
if err != nil {
xapp.Logger.Error("Alloc XappID failed! %s",err.Error())
return
}
//非重复注册,新增该xApp的管理对象实例(xApp MOI)表项
if !isRegistered {
err = c.addXappMOI(XappID ,RegMsg)
if err != nil {
xapp.Logger.Error("Add Xapp MOI failed! %s",err.Error())
return
}
}
//通知网管该xApp在nRT RIC平台上的部署
RegMsg.Header.XappRequestID.XappID = XappID
go c.Register2SMO(&RegMsg,params)
//response: 携带xApp所需服务(如数据库、冲突解决功能)的信息(服务名称、版本、详细信息等)
Topic := "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
//除第一个RegisterResp消息外,第二个及以后的消息返回xapp,都通过xapp专有的kafka消息通道返回
Endpoint := msgx.NewKafkaMsgSender(Topic)
c.Endpoint[XappID] = Endpoint
//
var RicServices map [string]*msgx.RICService
RicServices = make(map[string]*msgx.RICService)
RicSubsmgr := msgx.RICService{Name:"nricsubs",ServiceVer: 1,IpAddr: internal.SubmgrHost,Port: internal.DefaultGRPCPort}
RicServices["nricsubs"] = &RicSubsmgr
RicCflmgr := msgx.RICService{Name:"nriccflm",ServiceVer: 1,IpAddr: internal.NriccflmHost,Port: internal.DefaultGRPCPort}
RicServices["nriccflm"] = &RicCflmgr
RicDbagent := msgx.RICService{Name:"nricdbagent",ServiceVer: 1,IpAddr: internal.DbagentHost,Port: internal.DefaultGRPCPort}
RicServices["nricdbagent"] = &RicDbagent
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
|
return
}
xapp.Logger | identifier_name |
control.go | //subscriber: subscriber,
AccessDbagent: AcessDbAgent,
Endpoint: endpoint,
}
return c
}
func (c *Control) ReadyCB(data interface{}) {
if c.MsgSendertoSMO == nil {
}
}
func (c *Control) CreateAndRunMsgServer (grpcAddr string) {
svc := service.NewMsgService(c)
ep := endpoint.NewMsgServiceEndpoint(svc)
s := transport.NewMsgServer(ep)
// The gRPC listener mounts the Go kit gRPC server we created.
grpcListener, err := net.Listen("tcp", grpcAddr)
if err != nil {
xapp.Logger.Info("transport", "gRPC", "during", "Listen", "err", err)
os.Exit(1)
}
xapp.Logger.Info("transport", "gRPC", "addr", grpcAddr)
// we add the Go Kit gRPC Interceptor to our gRPC service as it is used by
// the here demonstrated zipkin tracing middleware.
baseServer := grpc.NewServer(grpc.UnaryInterceptor(kitgrpc.Interceptor))
msgx.RegisterMsgServiceServer(baseServer, s)
baseServer.Serve(grpcListener)
}
func (c *Control) Run(grpcAddr string) {
//xapp.SetReadyCB(c.ReadyCB, nil)
//xapp.Run(c,grpcAddr)
c.CreateAndRunMsgServer(grpcAddr)
}
func (c *Control) Consume(msg *xapp.MsgParams) (err error) {
xapp.Logger.Debug("Received message type: %s", xapp.RicMessageTypeToName[msg.Mtype])
if c.MsgSendertoSMO == nil {
err = fmt.Errorf("Msg object nil can handle %s", msg.String())
xapp.Logger.Error("%s", err.Error())
return
}
c.CntRecvMsg++
switch msg.Mtype {
case xapp.RIC_O1_REGISTER:
go c.handleXappRegisterRequest(msg)
case xapp.RIC_O1_INIT:
go c.handleSMOInit(msg)
case xapp.RIC_O1_ENABLE:
go c.handleSMOEnableOrDisable(msg)
case xapp.RIC_O1_DISABLE:
go c.handleSMOEnableOrDisable(msg)
default:
xapp.Logger.Info("Unknown Message Type '%d', discarding", msg.Mtype)
}
return
}
//分配xappID
//XappID 合法值 (1...65535),0 为 非法值
func (c *Control) allocXappID(RegMsg msgx.XappRegMsg) (uint32,error, bool){
isRegistered := false
resp,err := c.AccessDbagent.Client.MOITableReadAll(context.Background(),&db.MOITableReadAllRequest{Api: "1",})
if err != nil {
xapp.Logger.Error(err.Error())
return 0 ,err,isRegistered
}
//第一个注册的xapp,直接分配 xappID = 1
if len(resp.MoiTables) == 0 {
return 1,nil,isRegistered
}
for _,r := range resp.MoiTables{
//重复注册,走正常返回流程
if r.XappName == RegMsg.XappName {
isRegistered = true
return r.XappID,nil,isRegistered
}
}
//新注册 ,先按从小到大排序(只有1个表项也可以排序,不会返回错误),然后找到最小的可用xappID
ps := resp.MoiTables
sort.SliceStable(ps, func(i, j int) bool {
return ps[i].XappID < ps[j].XappID
})
var i uint32
//正常的分配为: ps[0].XappID = 1 ,ps[1].XappID = 2 ,ps[2].XappID = 3 ,
//如果出现ps[1].XappID = 3 ,说明原来的XappID = 2 表项已经释放,可以被再次分配使用
for i = 1; i <= 65535;i++ {
if i != ps[i-1].XappID {
// i 值未被使用,可以被分配
return i,nil,isRegistered
}
}
return 0 , fmt.Errorf("分配失败"),isRegistered
}
//新增该xApp的管理对象实例(xApp MOI)表项
func (c *Control) addXappMOI(XappID uint32,RegMsg msgx.XappRegMsg) error {
r := &db.MOITableInsertRequest{}
r.Api = "1"
m := &db.MOITable{}
m.XappID = XappID
m.XappName = RegMsg.XappName
m.XappVer = RegMsg.XappVer
m.Functions = RegMsg.XappFunctions
m.RunningStatus = "inactive"
m.IsReady = "false"
m.Topic = "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
r.MoiTable = m
_, err := c.AccessDbagent.Client.MOITableInsert(context.Background(),r)
return err
}
//通知网管该xApp在nRT RIC平台上的部署
func (c *Control) Register2SMO (RegMsg *msgx.XappRegMsg,params *xapp.MsgParams){
RICO1RegMsg,err := proto.Marshal(RegMsg)
if err != nil {
xapp.Logger.Error("Marshal RICO1RegMsg failed! %s",err.Error())
return
}
params.Payload = RICO1RegMsg
params.PayloadLen = len(RICO1RegMsg)
//确保消息发送成功,否则每隔 5 秒再次发送
for {
err := c.MsgSendertoSMO.SendMsg(params)
if err == nil {
break
}
xapp.Logger.Error("Register2SMO:",err.Error())
time.Sleep( 5 * time.Second)
}
}
//-------------------------------------------------------------------
// handle from XAPP Register Request
//------------------------------------------------------------------
func (c *Control) handleXappRegisterRequest(params *xapp.MsgParams) {
xapp.Logger.Info("Register MSG from XAPP: %s", params.String())
var RegMsg msgx.XappRegMsg
err := proto.Unmarshal(params.Payload,&RegMsg)
if err != nil {
xapp.Logger.Error("Unmarshal XappRegMsg failed! %s",err.Error())
//(解析不到xapp的ip,无法返回响应消息)xapp 接收不到注册成功响应消息,会继续发起注册
return
}
xapp.Logger.Info("XappName = %s,XappRequestID = %d,Token = %s /n",
RegMsg.XappName,RegMsg.Header.XappRequestID,RegMsg.Header.Token)
//第一个消息,xapp还没获取到topic,需要通过grpc来返回注册响应消息
Client2Xapp := msgx.NewMsgSender(RegMsg.XappIpaddr,RegMsg.XappPort)
//分配xappID ; 并判断是否重复注册
XappID,err,isRegistered := c.allocXappID(RegMsg)
if err != nil {
xapp.Logger.Error("Alloc XappID failed! %s",err.Error())
return
}
//非重复注册,新增该xApp的管理对象实例(xApp MOI)表项
if !isRegistered {
err = c.addXappMOI(XappID ,RegMsg)
if err != nil {
xapp.Logger.Error("Add Xapp MOI failed! %s",err.Error())
return
}
}
//通知网管该xApp在nRT RIC平台上的部署
RegMsg.Header.XappRequestID.XappID = XappID
go c.Register2SMO(&RegMsg,params)
//response: 携带xApp所需服务(如数据库、冲突解决功能)的信息(服务名称、版本、详细信息等)
Topic := "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
//除第一个RegisterResp消息外,第二个及以后的消息返回xapp,都通过xapp专有的kafka消息通道返回
Endpoint := msgx.NewKafkaMsgSender(Topic)
c.Endpoint[XappID] = Endpoint
//
var RicServices map [string]*msgx.RICService
RicServices = make(map[string]*msgx.RICService)
RicSubsmgr := msgx.RICService{Name:"nricsubs",ServiceVer: 1,IpAddr: internal.SubmgrHost,Port: internal.DefaultGRPCPort}
RicServices["nricsubs"] = &RicSubsmgr
RicCflmgr := msgx.RICService{Name:"nriccflm",ServiceVer: 1,IpAddr: internal.NriccflmHost,Port: internal.DefaultGRPCPort}
RicServices["nriccflm"] = &RicCflmgr
RicDbagent := msgx.RICService{Name:"nricdbagent",ServiceVer: 1,IpAddr: internal.DbagentHost,Port: internal.DefaultGRPCPort}
RicServices["nricdbagent"] = &RicDbagent
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader | MsgSendertoSMO: MsgSendertoSMO, | random_line_split | |
control.go | Ready = "false"
m.Topic = "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
r.MoiTable = m
_, err := c.AccessDbagent.Client.MOITableInsert(context.Background(),r)
return err
}
//通知网管该xApp在nRT RIC平台上的部署
func (c *Control) Register2SMO (RegMsg *msgx.XappRegMsg,params *xapp.MsgParams){
RICO1RegMsg,err := proto.Marshal(RegMsg)
if err != nil {
xapp.Logger.Error("Marshal RICO1RegMsg failed! %s",err.Error())
return
}
params.Payload = RICO1RegMsg
params.PayloadLen = len(RICO1RegMsg)
//确保消息发送成功,否则每隔 5 秒再次发送
for {
err := c.MsgSendertoSMO.SendMsg(params)
if err == nil {
break
}
xapp.Logger.Error("Register2SMO:",err.Error())
time.Sleep( 5 * time.Second)
}
}
//-------------------------------------------------------------------
// handle from XAPP Register Request
//------------------------------------------------------------------
func (c *Control) handleXappRegisterRequest(params *xapp.MsgParams) {
xapp.Logger.Info("Register MSG from XAPP: %s", params.String())
var RegMsg msgx.XappRegMsg
err := proto.Unmarshal(params.Payload,&RegMsg)
if err != nil {
xapp.Logger.Error("Unmarshal XappRegMsg failed! %s",err.Error())
//(解析不到xapp的ip,无法返回响应消息)xapp 接收不到注册成功响应消息,会继续发起注册
return
}
xapp.Logger.Info("XappName = %s,XappRequestID = %d,Token = %s /n",
RegMsg.XappName,RegMsg.Header.XappRequestID,RegMsg.Header.Token)
//第一个消息,xapp还没获取到topic,需要通过grpc来返回注册响应消息
Client2Xapp := msgx.NewMsgSender(RegMsg.XappIpaddr,RegMsg.XappPort)
//分配xappID ; 并判断是否重复注册
XappID,err,isRegistered := c.allocXappID(RegMsg)
if err != nil {
xapp.Logger.Error("Alloc XappID failed! %s",err.Error())
return
}
//非重复注册,新增该xApp的管理对象实例(xApp MOI)表项
if !isRegistered {
err = c.addXappMOI(XappID ,RegMsg)
if err != nil {
xapp.Logger.Error("Add Xapp MOI failed! %s",err.Error())
return
}
}
//通知网管该xApp在nRT RIC平台上的部署
RegMsg.Header.XappRequestID.XappID = XappID
go c.Register2SMO(&RegMsg,params)
//response: 携带xApp所需服务(如数据库、冲突解决功能)的信息(服务名称、版本、详细信息等)
Topic := "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
//除第一个RegisterResp消息外,第二个及以后的消息返回xapp,都通过xapp专有的kafka消息通道返回
Endpoint := msgx.NewKafkaMsgSender(Topic)
c.Endpoint[XappID] = Endpoint
//
var RicServices map [string]*msgx.RICService
RicServices = make(map[string]*msgx.RICService)
RicSubsmgr := msgx.RICService{Name:"nricsubs",ServiceVer: 1,IpAddr: internal.SubmgrHost,Port: internal.DefaultGRPCPort}
RicServices["nricsubs"] = &RicSubsmgr
RicCflmgr := msgx.RICService{Name:"nriccflm",ServiceVer: 1,IpAddr: internal.NriccflmHost,Port: internal.DefaultGRPCPort}
RicServices["nriccflm"] = &RicCflmgr
RicDbagent := msgx.RICService{Name:"nricdbagent",ServiceVer: 1,IpAddr: internal.DbagentHost,Port: internal.DefaultGRPCPort}
RicServices["nricdbagent"] = &RicDbagent
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_RESP,
MsgVer: 1,
XappRequestID: &msgx.XAPPRequestID{
XappID: XappID, //返回分配的XappID
XappInstanceID: RegMsg.Header.XappRequestID.XappInstanceID,
},
},
RicServices: RicServices,
Topic:Topic,
KafkaURL: internal.KafkaURL,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) SendRegisterFailureResp(Client2Xapp *msgx.MsgSender,Cause string,params *xapp.MsgParams) {
xapp.Logger.Info("Send RegisterFailureResp Msg to Xapp: %s\n",xapp.RicMessageTypeToName[params.Mtype])
XappRegResp := msgx.XappRegResp{
Header: &msgx.RICMsgHeader{
MsgType: xapp.RIC_O1_REGISTER_FAILURE,
MsgVer: 1,
},
Cause: Cause,
}
pbXappRegResp,err := proto.Marshal(&XappRegResp)
if err != nil {
xapp.Logger.Error("Marshal XappRegResp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
params.Mtype = xapp.RIC_O1_REGISTER_RESP
params.Payload = pbXappRegResp
params.PayloadLen = len(pbXappRegResp)
err = Client2Xapp.SendMsg(params)
if err != nil {
xapp.Logger.Error("Send RIC_O1_REGISTER_RESP to Xapp failed! %s",err.Error())
// 释放MOI表项,释放XappID
c.AccessDbagent.Client.MOITableDelete(context.Background(),&db.MOITableDeleteRequest{XappID: XappID,Api: "1"})
return
}
}
//-------------------------------------------------------------------
// handle from SMO Init Request
//------------------------------------------------------------------
func (c *Control) handleSMOInit(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOInitMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Init msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
}
//-------------------------------------------------------------------
// handle from SMO Enable or Disable Xapp Request
//------------------------------------------------------------------
func (c *Control) handleSMOEnableOrDisable(params *xapp.MsgParams) {
xapp.Logger.Info("Recv Msg From SMO: %s\n",xapp.RicMessageTypeToName[params.Mtype])
M := &msgx.SMOEnableMsg{}
err := proto.Unmarshal(params.Payload,M)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
//send Enable or Disable msg to Xapp
if e, ok := c.Endpoint[M.Header.XappRequestID.XappID]; ok {
err = e.SendMsg(params)
if err != nil {
xapp.Logger.Error(err.Error())
return
}
}else{
xapp.Logger.Error("Endpoint is nil ,M.Header.XappRequestID.XappID = %d",M.Header.XappRequestID.XappID)
return
}
}
| conditional_block | ||
control.go |
func (c *Control) ReadyCB(data interface{}) {
if c.MsgSendertoSMO == nil {
}
}
func (c *Control) CreateAndRunMsgServer (grpcAddr string) {
svc := service.NewMsgService(c)
ep := endpoint.NewMsgServiceEndpoint(svc)
s := transport.NewMsgServer(ep)
// The gRPC listener mounts the Go kit gRPC server we created.
grpcListener, err := net.Listen("tcp", grpcAddr)
if err != nil {
xapp.Logger.Info("transport", "gRPC", "during", "Listen", "err", err)
os.Exit(1)
}
xapp.Logger.Info("transport", "gRPC", "addr", grpcAddr)
// we add the Go Kit gRPC Interceptor to our gRPC service as it is used by
// the here demonstrated zipkin tracing middleware.
baseServer := grpc.NewServer(grpc.UnaryInterceptor(kitgrpc.Interceptor))
msgx.RegisterMsgServiceServer(baseServer, s)
baseServer.Serve(grpcListener)
}
func (c *Control) Run(grpcAddr string) {
//xapp.SetReadyCB(c.ReadyCB, nil)
//xapp.Run(c,grpcAddr)
c.CreateAndRunMsgServer(grpcAddr)
}
func (c *Control) Consume(msg *xapp.MsgParams) (err error) {
xapp.Logger.Debug("Received message type: %s", xapp.RicMessageTypeToName[msg.Mtype])
if c.MsgSendertoSMO == nil {
err = fmt.Errorf("Msg object nil can handle %s", msg.String())
xapp.Logger.Error("%s", err.Error())
return
}
c.CntRecvMsg++
switch msg.Mtype {
case xapp.RIC_O1_REGISTER:
go c.handleXappRegisterRequest(msg)
case xapp.RIC_O1_INIT:
go c.handleSMOInit(msg)
case xapp.RIC_O1_ENABLE:
go c.handleSMOEnableOrDisable(msg)
case xapp.RIC_O1_DISABLE:
go c.handleSMOEnableOrDisable(msg)
default:
xapp.Logger.Info("Unknown Message Type '%d', discarding", msg.Mtype)
}
return
}
//分配xappID
//XappID 合法值 (1...65535),0 为 非法值
func (c *Control) allocXappID(RegMsg msgx.XappRegMsg) (uint32,error, bool){
isRegistered := false
resp,err := c.AccessDbagent.Client.MOITableReadAll(context.Background(),&db.MOITableReadAllRequest{Api: "1",})
if err != nil {
xapp.Logger.Error(err.Error())
return 0 ,err,isRegistered
}
//第一个注册的xapp,直接分配 xappID = 1
if len(resp.MoiTables) == 0 {
return 1,nil,isRegistered
}
for _,r := range resp.MoiTables{
//重复注册,走正常返回流程
if r.XappName == RegMsg.XappName {
isRegistered = true
return r.XappID,nil,isRegistered
}
}
//新注册 ,先按从小到大排序(只有1个表项也可以排序,不会返回错误),然后找到最小的可用xappID
ps := resp.MoiTables
sort.SliceStable(ps, func(i, j int) bool {
return ps[i].XappID < ps[j].XappID
})
var i uint32
//正常的分配为: ps[0].XappID = 1 ,ps[1].XappID = 2 ,ps[2].XappID = 3 ,
//如果出现ps[1].XappID = 3 ,说明原来的XappID = 2 表项已经释放,可以被再次分配使用
for i = 1; i <= 65535;i++ {
if i != ps[i-1].XappID {
// i 值未被使用,可以被分配
return i,nil,isRegistered
}
}
return 0 , fmt.Errorf("分配失败"),isRegistered
}
//新增该xApp的管理对象实例(xApp MOI)表项
func (c *Control) addXappMOI(XappID uint32,RegMsg msgx.XappRegMsg) error {
r := &db.MOITableInsertRequest{}
r.Api = "1"
m := &db.MOITable{}
m.XappID = XappID
m.XappName = RegMsg.XappName
m.XappVer = RegMsg.XappVer
m.Functions = RegMsg.XappFunctions
m.RunningStatus = "inactive"
m.IsReady = "false"
m.Topic = "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
r.MoiTable = m
_, err := c.AccessDbagent.Client.MOITableInsert(context.Background(),r)
return err
}
//通知网管该xApp在nRT RIC平台上的部署
func (c *Control) Register2SMO (RegMsg *msgx.XappRegMsg,params *xapp.MsgParams){
RICO1RegMsg,err := proto.Marshal(RegMsg)
if err != nil {
xapp.Logger.Error("Marshal RICO1RegMsg failed! %s",err.Error())
return
}
params.Payload = RICO1RegMsg
params.PayloadLen = len(RICO1RegMsg)
//确保消息发送成功,否则每隔 5 秒再次发送
for {
err := c.MsgSendertoSMO.SendMsg(params)
if err == nil {
break
}
xapp.Logger.Error("Register2SMO:",err.Error())
time.Sleep( 5 * time.Second)
}
}
//-------------------------------------------------------------------
// handle from XAPP Register Request
//------------------------------------------------------------------
func (c *Control) handleXappRegisterRequest(params *xapp.MsgParams) {
xapp.Logger.Info("Register MSG from XAPP: %s", params.String())
var RegMsg msgx.XappRegMsg
err := proto.Unmarshal(params.Payload,&RegMsg)
if err != nil {
xapp.Logger.Error("Unmarshal XappRegMsg failed! %s",err.Error())
//(解析不到xapp的ip,无法返回响应消息)xapp 接收不到注册成功响应消息,会继续发起注册
return
}
xapp.Logger.Info("XappName = %s,XappRequestID = %d,Token = %s /n",
RegMsg.XappName,RegMsg.Header.XappRequestID,RegMsg.Header.Token)
//第一个消息,xapp还没获取到topic,需要通过grpc来返回注册响应消息
Client2Xapp := msgx.NewMsgSender(RegMsg.XappIpaddr,RegMsg.XappPort)
//分配xappID ; 并判断是否重复注册
XappID,err,isRegistered := c.allocXappID(RegMsg)
if err != nil {
xapp.Logger.Error("Alloc XappID failed! %s",err.Error())
return
}
//非重复注册,新增该xApp的管理对象实例(xApp MOI)表项
if !isRegistered {
err = c.addXappMOI(XappID ,RegMsg)
if err != nil {
xapp.Logger.Error("Add Xapp MOI failed! %s",err.Error())
return
}
}
//通知网管该xApp在nRT RIC平台上的部署
RegMsg.Header.XappRequestID.XappID = XappID
go c.Register2SMO(&RegMsg,params)
//response: 携带xApp所需服务(如数据库、冲突解决功能)的信息(服务名称、版本、详细信息等)
Topic := "Xapp_"+strconv.Itoa(int(XappID))+"_topic"
//除第一个RegisterResp消息外,第二个及以后的消息返回xapp,都通过xapp专有的kafka消息通道返回
Endpoint := msgx.NewKafkaMsgSender(Topic)
c.Endpoint[XappID] = Endpoint
//
var RicServices map [string]*msgx.RICService
RicServices = make(map[string]*msgx.RICService)
RicSubsmgr := msgx.RICService{Name:"nricsubs",ServiceVer: 1,IpAddr: internal.SubmgrHost,Port: internal.DefaultGRPCPort}
RicServices["nricsubs"] = &RicSubsmgr
RicCflmgr := msgx.RICService{Name:"nriccflm",ServiceVer: 1,IpAddr: internal.NriccflmHost,Port: internal.DefaultGRPCPort}
RicServices["nriccflm"] = &RicCflmgr
RicDbagent := msgx.RICService{Name:"nricdbagent",ServiceVer: 1,IpAddr: internal.DbagentHost,Port: internal.DefaultGRPCPort}
| {
endpoint := make(map[uint32]*msgx.KafkaMsgSender)
c := &Control{
//MsgClientToXapp: MsgClientToXapp,
MsgSendertoSMO: MsgSendertoSMO,
//subscriber: subscriber,
AccessDbagent: AcessDbAgent,
Endpoint: endpoint,
}
return c
} | identifier_body | |
daemon.go | a Gateway
// for RPC calls
liteModeDeps := node.Options()
if isLite {
gapi, closer, err := lcli.GetGatewayAPI(cctx)
if err != nil {
return err
}
defer closer()
liteModeDeps = node.Override(new(lapi.Gateway), gapi)
}
// some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore
// use ipfs/go-metrics-interface. This injects a Prometheus exporter
// for those. Metrics are exported to the default registry.
if err := metricsprom.Inject(); err != nil {
log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err)
}
var api lapi.FullNode
stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)),
node.Base(),
node.Repo(r),
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
genesis,
liteModeDeps,
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error {
apima, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" +
cctx.String("api"))
if err != nil {
return err
}
return lr.SetAPIEndpoint(apima)
})),
node.ApplyIf(func(s *node.Settings) bool { return !cctx.Bool("bootstrap") },
node.Unset(node.RunPeerMgrKey),
node.Unset(new(*peermgr.PeerMgr)),
),
)
if err != nil {
return xerrors.Errorf("initializing node: %w", err)
}
if cctx.String("import-key") != "" {
if err := importKey(ctx, api, cctx.String("import-key")); err != nil {
log.Errorf("importing key failed: %+v", err)
}
}
endpoint, err := r.APIEndpoint()
if err != nil {
return xerrors.Errorf("getting api endpoint: %w", err)
}
//
// Instantiate JSON-RPC endpoint.
// ----
// Populate JSON-RPC options.
serverOptions := []jsonrpc.ServerOption{jsonrpc.WithServerErrors(lapi.RPCErrors)}
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
// Instantiate the full node handler.
h, err := node.FullNodeHandler(api, true, serverOptions...)
if err != nil {
return fmt.Errorf("failed to instantiate rpc handler: %s", err)
}
// Serve the RPC.
rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
node.ShutdownHandler{Component: "node", StopFunc: stop},
)
<-finishCh // fires when shutdown is complete.
// TODO: properly parse api endpoint (or make it a URL)
return nil
},
Subcommands: []*cli.Command{
daemonStopCmd,
},
}
func importKey(ctx context.Context, api lapi.FullNode, f string) error {
f, err := homedir.Expand(f)
if err != nil {
return err
}
hexdata, err := os.ReadFile(f)
if err != nil {
return err
}
data, err := hex.DecodeString(strings.TrimSpace(string(hexdata)))
if err != nil {
return err
}
var ki types.KeyInfo
if err := json.Unmarshal(data, &ki); err != nil {
return err
}
addr, err := api.WalletImport(ctx, &ki)
if err != nil {
return err
}
if err := api.WalletSetDefault(ctx, addr); err != nil {
return err
}
log.Infof("successfully imported key for %s", addr)
return nil
}
func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) {
var rd io.Reader
var l int64
if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
rrd, err := httpreader.NewResumableReader(ctx, fname)
if err != nil {
return xerrors.Errorf("fetching chain CAR failed: setting up resumable reader: %w", err)
}
rd = rrd
l = rrd.ContentLength()
} else {
fname, err = homedir.Expand(fname)
if err != nil {
return err
}
fi, err := os.Open(fname)
if err != nil {
return err
}
defer fi.Close() //nolint:errcheck
st, err := os.Stat(fname)
if err != nil {
return err
}
rd = fi
l = st.Size()
}
lr, err := r.Lock(repo.FullNode)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return xerrors.Errorf("failed to open blockstore: %w", err)
}
mds, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return err
}
j, err := fsjournal.OpenFSJournal(lr, journal.EnvDisabledEvents())
if err != nil {
return xerrors.Errorf("failed to open journal: %w", err)
}
cst := store.NewChainStore(bs, bs, mds, filcns.Weight, j)
defer cst.Close() //nolint:errcheck
log.Infof("importing chain from %s...", fname)
bufr := bufio.NewReaderSize(rd, 1<<20)
header, err := bufr.Peek(4)
if err != nil {
return xerrors.Errorf("peek header: %w", err)
}
bar := pb.New64(l)
br := bar.NewProxyReader(bufr)
bar.ShowTimeLeft = true
bar.ShowPercent = true
bar.ShowSpeed = true
bar.Units = pb.U_BYTES
var ir io.Reader = br
if string(header[1:]) == "\xB5\x2F\xFD" { // zstd
zr := zstd.NewReader(br)
defer func() {
if err := zr.Close(); err != nil {
log.Errorw("closing zstd reader", "error", err)
}
}()
ir = zr
}
bar.Start()
ts, err := cst.Import(ctx, ir)
bar.Finish()
if err != nil {
return xerrors.Errorf("importing chain failed: %w", err)
}
if err := cst.FlushValidationCache(ctx); err != nil {
return xerrors.Errorf("flushing validation cache failed: %w", err)
}
gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true)
if err != nil {
return err
}
err = cst.SetGenesis(ctx, gb.Blocks()[0])
if err != nil {
return err
}
if !snapshot {
shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gb.MinTimestamp(), nil)
if err != nil {
return xerrors.Errorf("failed to construct beacon schedule: %w", err)
}
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
if err != nil {
return err
}
log.Infof("validating imported chain...")
if err := stm.ValidateChain(ctx, ts); err != nil {
return xerrors.Errorf("chain validation failed: %w", err)
}
}
log.Infof("accepting %s as new head", ts.Cids())
if err := cst.ForceHeadSilent(ctx, ts); err != nil {
return err
}
// populate the message index if user has EnableMsgIndex enabled
//
c, err := lr.Config()
if err != nil {
return err
}
cfg, ok := c.(*config.FullNode)
if !ok {
return xerrors.Errorf("invalid config for repo, got: %T", c)
}
if cfg.Index.EnableMsgIndex {
log.Info("populating message index...")
if err := index.PopulateAfterSnapshot(ctx, path.Join(lr.Path(), "sqlite"), cst); err != nil {
return err
}
log.Info("populating message index done")
}
return nil
}
func | removeExistingChain | identifier_name | |
daemon.go | return xerrors.Errorf("restoring from backup is only possible with a fresh repo!")
}
if err := restore(cctx, r); err != nil {
return xerrors.Errorf("restoring from backup: %w", err)
}
}
if cctx.Bool("remove-existing-chain") {
lr, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("error opening fs repo: %w", err)
}
exists, err := lr.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
err = removeExistingChain(cctx, lr)
if err != nil {
return err
}
}
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
if chainfile != "" && snapshot != "" {
return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'")
}
var issnapshot bool
if chainfile == "" {
chainfile = snapshot
issnapshot = true
}
if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
return err
}
if cctx.Bool("halt-after-import") {
fmt.Println("Chain import complete, halting as requested...")
return nil
}
}
genesis := node.Options()
if len(genBytes) > 0 {
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genBytes))
}
if cctx.String(makeGenFlag) != "" {
if cctx.String(preTemplateFlag) == "" {
return xerrors.Errorf("must also pass file with genesis template to `--%s`", preTemplateFlag)
}
genesis = node.Override(new(modules.Genesis), testing.MakeGenesis(cctx.String(makeGenFlag), cctx.String(preTemplateFlag)))
}
shutdownChan := make(chan struct{})
// If the daemon is started in "lite mode", provide a Gateway
// for RPC calls
liteModeDeps := node.Options()
if isLite {
gapi, closer, err := lcli.GetGatewayAPI(cctx)
if err != nil {
return err
}
defer closer()
liteModeDeps = node.Override(new(lapi.Gateway), gapi)
}
// some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore
// use ipfs/go-metrics-interface. This injects a Prometheus exporter
// for those. Metrics are exported to the default registry.
if err := metricsprom.Inject(); err != nil {
log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err)
}
var api lapi.FullNode
stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)),
node.Base(),
node.Repo(r),
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
genesis,
liteModeDeps,
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error {
apima, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" +
cctx.String("api"))
if err != nil {
return err
}
return lr.SetAPIEndpoint(apima)
})),
node.ApplyIf(func(s *node.Settings) bool { return !cctx.Bool("bootstrap") },
node.Unset(node.RunPeerMgrKey),
node.Unset(new(*peermgr.PeerMgr)),
),
)
if err != nil {
return xerrors.Errorf("initializing node: %w", err)
}
if cctx.String("import-key") != "" {
if err := importKey(ctx, api, cctx.String("import-key")); err != nil {
log.Errorf("importing key failed: %+v", err)
}
}
endpoint, err := r.APIEndpoint()
if err != nil {
return xerrors.Errorf("getting api endpoint: %w", err)
}
//
// Instantiate JSON-RPC endpoint.
// ----
// Populate JSON-RPC options.
serverOptions := []jsonrpc.ServerOption{jsonrpc.WithServerErrors(lapi.RPCErrors)}
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
// Instantiate the full node handler.
h, err := node.FullNodeHandler(api, true, serverOptions...)
if err != nil {
return fmt.Errorf("failed to instantiate rpc handler: %s", err)
}
// Serve the RPC.
rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
node.ShutdownHandler{Component: "node", StopFunc: stop},
)
<-finishCh // fires when shutdown is complete.
// TODO: properly parse api endpoint (or make it a URL)
return nil
},
Subcommands: []*cli.Command{
daemonStopCmd,
},
}
func importKey(ctx context.Context, api lapi.FullNode, f string) error {
f, err := homedir.Expand(f)
if err != nil {
return err
}
hexdata, err := os.ReadFile(f)
if err != nil {
return err
}
data, err := hex.DecodeString(strings.TrimSpace(string(hexdata)))
if err != nil {
return err
}
var ki types.KeyInfo
if err := json.Unmarshal(data, &ki); err != nil {
return err
}
addr, err := api.WalletImport(ctx, &ki)
if err != nil {
return err
}
if err := api.WalletSetDefault(ctx, addr); err != nil {
return err
}
log.Infof("successfully imported key for %s", addr)
return nil
}
func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) {
var rd io.Reader
var l int64
if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
rrd, err := httpreader.NewResumableReader(ctx, fname)
if err != nil {
return xerrors.Errorf("fetching chain CAR failed: setting up resumable reader: %w", err)
}
rd = rrd
l = rrd.ContentLength()
} else {
fname, err = homedir.Expand(fname)
if err != nil {
return err
}
fi, err := os.Open(fname)
if err != nil {
return err
}
defer fi.Close() //nolint:errcheck
st, err := os.Stat(fname)
if err != nil {
return err
}
rd = fi
l = st.Size()
}
lr, err := r.Lock(repo.FullNode)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return xerrors.Errorf("failed to open blockstore: %w", err)
}
mds, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return err
}
j, err := fsjournal.OpenFSJournal(lr, journal.EnvDisabledEvents())
if err != nil {
return xerrors.Errorf("failed to open journal: %w", err)
}
cst := store.NewChainStore(bs, bs, mds, filcns.Weight, j)
defer cst.Close() //nolint:errcheck
log.Infof("importing chain from %s...", fname)
bufr := bufio.NewReaderSize(rd, 1<<20)
header, err := bufr.Peek(4)
if err != nil {
return xerrors.Errorf("peek header: %w", err)
}
bar := pb.New64(l)
br := bar.NewProxyReader(bufr)
bar.ShowTimeLeft = true
bar.ShowPercent = true
bar.ShowSpeed = true
bar.Units = pb.U_BYTES
var ir io.Reader = br
if string(header[1:]) == "\xB5\x2F\xFD" { // zstd
zr := zstd.NewReader(br)
defer func() {
if err := zr.Close(); err != nil | {
log.Errorw("closing zstd reader", "error", err)
} | conditional_block | |
daemon.go | ,
},
&cli.StringFlag{
Name: "import-chain",
Usage: "on first run, load chain from given file or url and validate",
},
&cli.StringFlag{
Name: "import-snapshot",
Usage: "import chain state from a given chain export file or url",
},
&cli.BoolFlag{
Name: "remove-existing-chain",
Usage: "remove existing chain and splitstore data on a snapshot-import",
},
&cli.BoolFlag{
Name: "halt-after-import",
Usage: "halt the process after importing chain from file",
},
&cli.BoolFlag{
Name: "lite",
Usage: "start lotus in lite mode",
},
&cli.StringFlag{
Name: "pprof",
Usage: "specify name of file for writing cpu profile to",
},
&cli.StringFlag{
Name: "profile",
Usage: "specify type of node",
},
&cli.BoolFlag{
Name: "manage-fdlimit",
Usage: "manage open file limit",
Value: true,
},
&cli.StringFlag{
Name: "config",
Usage: "specify path of config file to use",
},
// FIXME: This is not the correct place to put this configuration
// option. Ideally it would be part of `config.toml` but at the
// moment that only applies to the node configuration and not outside
// components like the RPC server.
&cli.IntFlag{
Name: "api-max-req-size",
Usage: "maximum API request size accepted by the JSON RPC server",
},
&cli.PathFlag{
Name: "restore",
Usage: "restore from backup file",
},
&cli.PathFlag{
Name: "restore-config",
Usage: "config file to use when restoring from backup",
},
},
Action: func(cctx *cli.Context) error {
isLite := cctx.Bool("lite")
err := runmetrics.Enable(runmetrics.RunMetricOptions{
EnableCPU: true,
EnableMemory: true,
})
if err != nil {
return xerrors.Errorf("enabling runtime metrics: %w", err)
}
if cctx.Bool("manage-fdlimit") {
if _, _, err := ulimit.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
if prof := cctx.String("pprof"); prof != "" {
profile, err := os.Create(prof)
if err != nil {
return err
}
if err := pprof.StartCPUProfile(profile); err != nil {
return err
}
defer pprof.StopCPUProfile()
}
var isBootstrapper dtypes.Bootstrapper
switch profile := cctx.String("profile"); profile {
case "bootstrapper":
isBootstrapper = true
case "":
// do nothing
default:
return fmt.Errorf("unrecognized profile type: %q", profile)
}
ctx, _ := tag.New(context.Background(),
tag.Insert(metrics.Version, build.BuildVersion),
tag.Insert(metrics.Commit, build.CurrentCommit),
tag.Insert(metrics.NodeType, "chain"),
)
// Register all metric views
if err = view.Register(
metrics.ChainNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
// Set the metric to one so it is published to the exporter
stats.Record(ctx, metrics.LotusInfo.M(1)) |
{
dir, err := homedir.Expand(cctx.String("repo"))
if err != nil {
log.Warnw("could not expand repo location", "error", err)
} else {
log.Infof("lotus repo: %s", dir)
}
}
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("opening fs repo: %w", err)
}
if cctx.String("config") != "" {
r.SetConfigPath(cctx.String("config"))
}
err = r.Init(repo.FullNode)
if err != nil && err != repo.ErrRepoExists {
return xerrors.Errorf("repo init error: %w", err)
}
freshRepo := err != repo.ErrRepoExists
if !isLite {
if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
}
var genBytes []byte
if cctx.String("genesis") != "" {
genBytes, err = os.ReadFile(cctx.String("genesis"))
if err != nil {
return xerrors.Errorf("reading genesis: %w", err)
}
} else {
genBytes = build.MaybeGenesis()
}
if cctx.IsSet("restore") {
if !freshRepo {
return xerrors.Errorf("restoring from backup is only possible with a fresh repo!")
}
if err := restore(cctx, r); err != nil {
return xerrors.Errorf("restoring from backup: %w", err)
}
}
if cctx.Bool("remove-existing-chain") {
lr, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("error opening fs repo: %w", err)
}
exists, err := lr.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
err = removeExistingChain(cctx, lr)
if err != nil {
return err
}
}
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
if chainfile != "" && snapshot != "" {
return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'")
}
var issnapshot bool
if chainfile == "" {
chainfile = snapshot
issnapshot = true
}
if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
return err
}
if cctx.Bool("halt-after-import") {
fmt.Println("Chain import complete, halting as requested...")
return nil
}
}
genesis := node.Options()
if len(genBytes) > 0 {
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genBytes))
}
if cctx.String(makeGenFlag) != "" {
if cctx.String(preTemplateFlag) == "" {
return xerrors.Errorf("must also pass file with genesis template to `--%s`", preTemplateFlag)
}
genesis = node.Override(new(modules.Genesis), testing.MakeGenesis(cctx.String(makeGenFlag), cctx.String(preTemplateFlag)))
}
shutdownChan := make(chan struct{})
// If the daemon is started in "lite mode", provide a Gateway
// for RPC calls
liteModeDeps := node.Options()
if isLite {
gapi, closer, err := lcli.GetGatewayAPI(cctx)
if err != nil {
return err
}
defer closer()
liteModeDeps = node.Override(new(lapi.Gateway), gapi)
}
// some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore
// use ipfs/go-metrics-interface. This injects a Prometheus exporter
// for those. Metrics are exported to the default registry.
if err := metricsprom.Inject(); err != nil {
log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err)
}
var api lapi.FullNode
stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)),
node.Base(),
node.Repo(r),
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
genesis,
liteModeDeps,
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error {
apima, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" +
cctx.String("api"))
if err != nil {
return err
}
return lr.SetAPIEndpoint(apima)
})),
node.ApplyIf(func(s *node.Settings) bool { return !cctx.Bool("bootstrap") },
node.Unset(node.Run | random_line_split | |
daemon.go | _, err := ulimit.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
if prof := cctx.String("pprof"); prof != "" {
profile, err := os.Create(prof)
if err != nil {
return err
}
if err := pprof.StartCPUProfile(profile); err != nil {
return err
}
defer pprof.StopCPUProfile()
}
var isBootstrapper dtypes.Bootstrapper
switch profile := cctx.String("profile"); profile {
case "bootstrapper":
isBootstrapper = true
case "":
// do nothing
default:
return fmt.Errorf("unrecognized profile type: %q", profile)
}
ctx, _ := tag.New(context.Background(),
tag.Insert(metrics.Version, build.BuildVersion),
tag.Insert(metrics.Commit, build.CurrentCommit),
tag.Insert(metrics.NodeType, "chain"),
)
// Register all metric views
if err = view.Register(
metrics.ChainNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
// Set the metric to one so it is published to the exporter
stats.Record(ctx, metrics.LotusInfo.M(1))
{
dir, err := homedir.Expand(cctx.String("repo"))
if err != nil {
log.Warnw("could not expand repo location", "error", err)
} else {
log.Infof("lotus repo: %s", dir)
}
}
r, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("opening fs repo: %w", err)
}
if cctx.String("config") != "" {
r.SetConfigPath(cctx.String("config"))
}
err = r.Init(repo.FullNode)
if err != nil && err != repo.ErrRepoExists {
return xerrors.Errorf("repo init error: %w", err)
}
freshRepo := err != repo.ErrRepoExists
if !isLite {
if err := paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), 0); err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
}
var genBytes []byte
if cctx.String("genesis") != "" {
genBytes, err = os.ReadFile(cctx.String("genesis"))
if err != nil {
return xerrors.Errorf("reading genesis: %w", err)
}
} else {
genBytes = build.MaybeGenesis()
}
if cctx.IsSet("restore") {
if !freshRepo {
return xerrors.Errorf("restoring from backup is only possible with a fresh repo!")
}
if err := restore(cctx, r); err != nil {
return xerrors.Errorf("restoring from backup: %w", err)
}
}
if cctx.Bool("remove-existing-chain") {
lr, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("error opening fs repo: %w", err)
}
exists, err := lr.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
err = removeExistingChain(cctx, lr)
if err != nil {
return err
}
}
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
if chainfile != "" && snapshot != "" {
return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'")
}
var issnapshot bool
if chainfile == "" {
chainfile = snapshot
issnapshot = true
}
if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
return err
}
if cctx.Bool("halt-after-import") {
fmt.Println("Chain import complete, halting as requested...")
return nil
}
}
genesis := node.Options()
if len(genBytes) > 0 {
genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genBytes))
}
if cctx.String(makeGenFlag) != "" {
if cctx.String(preTemplateFlag) == "" {
return xerrors.Errorf("must also pass file with genesis template to `--%s`", preTemplateFlag)
}
genesis = node.Override(new(modules.Genesis), testing.MakeGenesis(cctx.String(makeGenFlag), cctx.String(preTemplateFlag)))
}
shutdownChan := make(chan struct{})
// If the daemon is started in "lite mode", provide a Gateway
// for RPC calls
liteModeDeps := node.Options()
if isLite {
gapi, closer, err := lcli.GetGatewayAPI(cctx)
if err != nil {
return err
}
defer closer()
liteModeDeps = node.Override(new(lapi.Gateway), gapi)
}
// some libraries like ipfs/go-ds-measure and ipfs/go-ipfs-blockstore
// use ipfs/go-metrics-interface. This injects a Prometheus exporter
// for those. Metrics are exported to the default registry.
if err := metricsprom.Inject(); err != nil {
log.Warnf("unable to inject prometheus ipfs/go-metrics exporter; some metrics will be unavailable; err: %s", err)
}
var api lapi.FullNode
stop, err := node.New(ctx,
node.FullAPI(&api, node.Lite(isLite)),
node.Base(),
node.Repo(r),
node.Override(new(dtypes.Bootstrapper), isBootstrapper),
node.Override(new(dtypes.ShutdownChan), shutdownChan),
genesis,
liteModeDeps,
node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("api") },
node.Override(node.SetApiEndpointKey, func(lr repo.LockedRepo) error {
apima, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/" +
cctx.String("api"))
if err != nil {
return err
}
return lr.SetAPIEndpoint(apima)
})),
node.ApplyIf(func(s *node.Settings) bool { return !cctx.Bool("bootstrap") },
node.Unset(node.RunPeerMgrKey),
node.Unset(new(*peermgr.PeerMgr)),
),
)
if err != nil {
return xerrors.Errorf("initializing node: %w", err)
}
if cctx.String("import-key") != "" {
if err := importKey(ctx, api, cctx.String("import-key")); err != nil {
log.Errorf("importing key failed: %+v", err)
}
}
endpoint, err := r.APIEndpoint()
if err != nil {
return xerrors.Errorf("getting api endpoint: %w", err)
}
//
// Instantiate JSON-RPC endpoint.
// ----
// Populate JSON-RPC options.
serverOptions := []jsonrpc.ServerOption{jsonrpc.WithServerErrors(lapi.RPCErrors)}
if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 {
serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize)))
}
// Instantiate the full node handler.
h, err := node.FullNodeHandler(api, true, serverOptions...)
if err != nil {
return fmt.Errorf("failed to instantiate rpc handler: %s", err)
}
// Serve the RPC.
rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint)
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
node.ShutdownHandler{Component: "node", StopFunc: stop},
)
<-finishCh // fires when shutdown is complete.
// TODO: properly parse api endpoint (or make it a URL)
return nil
},
Subcommands: []*cli.Command{
daemonStopCmd,
},
}
func importKey(ctx context.Context, api lapi.FullNode, f string) error | {
f, err := homedir.Expand(f)
if err != nil {
return err
}
hexdata, err := os.ReadFile(f)
if err != nil {
return err
}
data, err := hex.DecodeString(strings.TrimSpace(string(hexdata)))
if err != nil {
return err
}
var ki types.KeyInfo
if err := json.Unmarshal(data, &ki); err != nil {
return err
} | identifier_body | |
store.rs | use core::{
cmp::Eq,
fmt::{
Binary,
Debug,
Display,
LowerHex,
UpperHex,
},
mem::size_of,
ops::{
BitAnd,
BitAndAssign,
BitOrAssign,
Not,
Shl,
ShlAssign,
Shr,
ShrAssign,
},
};
#[cfg(feature = "atomic")]
use core::sync::atomic::{
self,
Ordering::Relaxed,
};
#[cfg(not(feature = "atomic"))]
use core::cell::Cell;
/** Generalizes over the fundamental types for use in `bitvec` data structures.
This trait must only be implemented on unsigned integer primitives with full
alignment. It cannot be implemented on `u128` on any architecture, or on `u64`
on 32-bit systems.
The `Sealed` supertrait ensures that this can only be implemented locally, and
will never be implemented by downstream crates on new types.
**/
pub trait BitStore:
// Forbid external implementation
Sealed
+ Binary
// Element-wise binary manipulation
+ BitAnd<Self, Output=Self>
+ BitAndAssign<Self>
+ BitOrAssign<Self>
// Permit indexing into a generic array
+ Copy
+ Debug
+ Default
+ Display
// Permit testing a value against 1 in `get()`.
+ Eq
// Rust treats numeric literals in code as vaguely typed and does not make
// them concrete until long after trait expansion, so this enables building
// a concrete Self value from a numeric literal.
+ From<u8>
// Permit extending into a `u64`.
+ Into<u64>
+ LowerHex
+ Not<Output=Self>
+ Send
+ Shl<u8, Output=Self>
+ ShlAssign<u8>
+ Shr<u8, Output=Self>
+ ShrAssign<u8>
// Allow direct access to a concrete implementor type.
+ Sized
+ Sync
+ UpperHex
{
/// The width, in bits, of this type.
const BITS: u8 = size_of::<Self>() as u8 * 8;
/// The number of bits required to index a bit inside the type. This is
/// always log<sub>2</sub> of the type’s bit width.
const INDX: u8 = Self::BITS.trailing_zeros() as u8;
/// The bitmask to turn an arbitrary number into a bit index. Bit indices
/// are always stored in the lowest bits of an index value.
const MASK: u8 = Self::BITS - 1;
/// Name of the implementing type. This is only necessary until the compiler
/// stabilizes `type_name()`.
const TYPENAME: &'static str;
/// Shared-mutability wrapper type used to safely mutate aliased data.
///
/// Within `&/mut BitSlice` contexts, the `Nucleus` type **must** be used to
/// ensure correctly-synchronized access to memory elements that may have
/// aliased mutable access. When a codepath knows that it has full ownership
/// of a memory element of `Self`, and no other codepath may observe, much
/// less modify, it, then that codepath may skip the `Nucleus` type and use
/// plain accessors.
type Nucleus: BitAccess<Self>;
/// Sets a specific bit in an element to a given value.
///
/// # Safety
///
/// This method cannot be called from within a `&mut BitSlice` context; it
/// may only be called by construction of an `&mut Self` reference from a
/// `Self` element directly.
///
/// # Parameters
///
/// - `&mut self`
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be set according to `value`.
/// - `value`: A Boolean value, which sets the bit on `true` and clears it
/// on `false`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
#[inline(always)]
fn set<C>(&mut self, place: BitIdx<Self>, value: bool)
where C: Cursor {
let mask = *C::mask(place);
if value {
*self |= mask;
}
else {
*self &= !mask;
}
}
/// Gets a specific bit in an element.
///
/// # Safety
///
/// This method cannot be called from within a `&BitSlice` context; it may
/// only be called by construction of an `&Self` reference from a `Self`
/// element directly.
///
/// # Parameters
///
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<Self>) -> bool
where C: Cursor {
*self & *C::mask(place) != Self::from(0)
}
/// Counts how many bits in `self` are set to `1`.
///
/// This zero-extends `self` to `u64`, and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `1`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_ones(&0u8), 0);
/// assert_eq!(BitStore::count_ones(&128u8), 1);
/// assert_eq!(BitStore::count_ones(&192u8), 2);
/// assert_eq!(BitStore::count_ones(&224u8), 3);
/// assert_eq!(BitStore::count_ones(&240u8), 4);
/// assert_eq!(BitStore::count_ones(&248u8), 5);
/// assert_eq!(BitStore::count_ones(&252u8), 6);
/// assert_eq!(BitStore::count_ones(&254u8), 7);
/// assert_eq!(BitStore::count_ones(&255u8), 8);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_ones(&self) -> usize {
u64::count_ones((*self).into()) as usize
}
/// Counts how many bits in `self` are set to `0`.
///
/// This inverts `self`, so all `0` bits are `1` and all `1` bits are `0`,
/// then zero-extends `self` to `u64` and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `0`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_zeros(&0u8), 8);
/// assert_eq!(BitStore::count_zeros(&1u8), 7);
/// assert_eq!(BitStore::count_zeros(&3u8), 6);
/// assert_eq!(BitStore::count_zeros(&7u8), 5);
/// assert_eq!(BitStore::count_zeros(&15u8), 4);
/// assert_eq!(BitStore::count_zeros(&31u8), 3);
/// assert_eq!(BitStore::count_zeros(&63u8), 2);
/// assert_eq!(BitStore::count_zeros(&127u8), 1);
/// assert_eq!(BitStore::count_zeros(&255u8), 0);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_zeros(&self) -> usize {
// invert (0 becomes 1, 1 becomes 0), zero-extend, count ones | use crate::{
cursor::Cursor,
indices::BitIdx,
};
| random_line_split | |
store.rs | _ones
#[inline(always)]
fn count_zeros(&self) -> usize {
// invert (0 becomes 1, 1 becomes 0), zero-extend, count ones
u64::count_ones((!*self).into()) as usize
}
/// Extends a single bit to fill the entire element.
///
/// # Parameters
///
/// - `bit`: The bit to extend.
///
/// # Returns
///
/// An element with all bits set to the input.
#[inline]
fn bits(bit: bool) -> Self {
if bit {
!Self::from(0)
}
else {
Self::from(0)
}
}
}
/** Marker trait to seal `BitStore` against downstream implementation.
This trait is public in the module, so that other modules in the crate can use
it, but so long as it is not exported by the crate root and this module is
private, this trait effectively forbids downstream implementation of the
`BitStore` trait.
**/
#[doc(hidden)]
pub trait Sealed {}
macro_rules! store {
( $( $t:ty , $a:ty $( ; )? );* ) => { $(
impl Sealed for $t {}
impl BitStore for $t {
const TYPENAME: &'static str = stringify!($t);
#[cfg(feature = "atomic")]
type Nucleus = $a;
#[cfg(not(feature = "atomic"))]
type Nucleus = Cell<Self>;
}
)* };
}
store![
u8, atomic::AtomicU8;
u16, atomic::AtomicU16;
u32, atomic::AtomicU32;
];
#[cfg(target_pointer_width = "64")]
store![u64, atomic::AtomicU64];
/// Type alias to the CPU word element, `u32`.
#[cfg(target_pointer_width = "32")]
pub type Word = u32;
/// Type alias to the CPU word element, `u64`.
#[cfg(target_pointer_width = "64")]
pub type Word = u64;
/** Common interface for atomic and cellular shared-mutability wrappers.
`&/mut BitSlice` contexts must use the `BitStore::Nucleus` type for all
reference production, and must route through this trait in order to access the
underlying memory. In multi-threaded contexts, this trait enforces that all
access is synchronized through atomic accesses; in single-threaded contexts,
this trait solely permits modification of an aliased element.
It is implemented on the atomic type wrappers when the `atomic` feature is set,
and implemented on the `Cell` type wrapper when the feature is missing. Coupled
with the `Send` implementation on `BitSlice`
**/
pub trait BitAccess<T>: Sized
where T: BitStore {
/// Sets a specific bit in an element low.
///
/// `BitAccess::set` calls this when its `value` is `false`; it
/// unconditionally writes a `0` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn clear_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Sets a specific bit in an element high.
///
/// `BitAccess::set` calls this when its `value` is `true`; it
/// unconditionally writes a `1` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn set_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Inverts a specific bit in an element.
///
/// This is the driver of `BitStore::invert_bit`, and has the same API and
/// documented behavior.
fn invert_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Gets a specific bit in an element.
///
/// # Parameters
///
/// - `&self`: A shared reference to a maybe-mutable element. This uses the
/// trait `load` function to ensure correct reads from memory.
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<T>) -> bool
where C: Cursor {
self.load() & *C::mask(place) != T::from(0)
}
/// Sets a specific bit in an element to a given value.
///
/// This is the driver of `BitStore::set`, and has the same API and
/// documented behavior.
#[inline(always)]
fn set<C>(&self, place: BitIdx<T>, value: bool)
where C: Cursor {
if value {
self.set_bit::<C>(place);
}
else {
self.clear_bit::<C>(place);
}
}
/// Removes the shared-mutability wrapper, producing a read reference to the
/// inner type.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// A read reference to the wrapped type.
///
/// # Safety
///
/// As this removes mutability, it is strictly safe.
#[inline(always)]
fn base(&self) -> &T {
unsafe { &*(self as *const Self as *const T) }
}
/// Transforms a reference of `&[T::Nucleus]` into `&mut [T]`.
///
/// # Safety
///
/// This function is undefined when the `this` slice referent has aliasing
/// pointers. It must only ever be called when the slice referent is
/// guaranteed to have no aliases, but mutability has been removed from the
/// type system at an earlier point in the call stack.
///
/// # Parameters
///
/// - `this`: A slice reference to some shared-mutability reference type.
///
/// # Returns
///
/// A mutable reference to the wrapped interior type of the `this` referent.
#[inline(always)]
unsafe fn base_slice_mut(this: &[Self]) -> &mut [T] {
&mut *(this as *const [Self] as *const [T] as *mut [T])
}
/// Performs a synchronized load on an unsynchronized reference.
///
/// Atomic implementors must ensure that the load is well synchronized, and
/// cell implementors can just read. Each implementor must be strictly gated
/// on the `atomic` feature flag.
fn load(&self) -> T;
}
/* FIXME(myrrlyn): When the `radium` crate publishes generic traits, erase the
implementations currently in use and enable the generic implementation below:
impl<T, R> BitAccess<T> for R
where T: BitStore, R: RadiumBits<T> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
}
*/
#[cfg(feature = "atomic")] fn _atom() {
impl BitAccess<u8> for atomic::AtomicU8 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u8 {
self.load(Relaxed)
}
}
impl BitAccess<u16> for atomic::AtomicU16 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
| self.fetch_and(!*C::mask(bit), Relaxed);
}
| identifier_body | |
store.rs | "32")]
pub type Word = u32;
/// Type alias to the CPU word element, `u64`.
#[cfg(target_pointer_width = "64")]
pub type Word = u64;
/** Common interface for atomic and cellular shared-mutability wrappers.
`&/mut BitSlice` contexts must use the `BitStore::Nucleus` type for all
reference production, and must route through this trait in order to access the
underlying memory. In multi-threaded contexts, this trait enforces that all
access is synchronized through atomic accesses; in single-threaded contexts,
this trait solely permits modification of an aliased element.
It is implemented on the atomic type wrappers when the `atomic` feature is set,
and implemented on the `Cell` type wrapper when the feature is missing. Coupled
with the `Send` implementation on `BitSlice`
**/
pub trait BitAccess<T>: Sized
where T: BitStore {
/// Sets a specific bit in an element low.
///
/// `BitAccess::set` calls this when its `value` is `false`; it
/// unconditionally writes a `0` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn clear_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Sets a specific bit in an element high.
///
/// `BitAccess::set` calls this when its `value` is `true`; it
/// unconditionally writes a `1` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn set_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Inverts a specific bit in an element.
///
/// This is the driver of `BitStore::invert_bit`, and has the same API and
/// documented behavior.
fn invert_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Gets a specific bit in an element.
///
/// # Parameters
///
/// - `&self`: A shared reference to a maybe-mutable element. This uses the
/// trait `load` function to ensure correct reads from memory.
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<T>) -> bool
where C: Cursor {
self.load() & *C::mask(place) != T::from(0)
}
/// Sets a specific bit in an element to a given value.
///
/// This is the driver of `BitStore::set`, and has the same API and
/// documented behavior.
#[inline(always)]
fn set<C>(&self, place: BitIdx<T>, value: bool)
where C: Cursor {
if value {
self.set_bit::<C>(place);
}
else {
self.clear_bit::<C>(place);
}
}
/// Removes the shared-mutability wrapper, producing a read reference to the
/// inner type.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// A read reference to the wrapped type.
///
/// # Safety
///
/// As this removes mutability, it is strictly safe.
#[inline(always)]
fn base(&self) -> &T {
unsafe { &*(self as *const Self as *const T) }
}
/// Transforms a reference of `&[T::Nucleus]` into `&mut [T]`.
///
/// # Safety
///
/// This function is undefined when the `this` slice referent has aliasing
/// pointers. It must only ever be called when the slice referent is
/// guaranteed to have no aliases, but mutability has been removed from the
/// type system at an earlier point in the call stack.
///
/// # Parameters
///
/// - `this`: A slice reference to some shared-mutability reference type.
///
/// # Returns
///
/// A mutable reference to the wrapped interior type of the `this` referent.
#[inline(always)]
unsafe fn base_slice_mut(this: &[Self]) -> &mut [T] {
&mut *(this as *const [Self] as *const [T] as *mut [T])
}
/// Performs a synchronized load on an unsynchronized reference.
///
/// Atomic implementors must ensure that the load is well synchronized, and
/// cell implementors can just read. Each implementor must be strictly gated
/// on the `atomic` feature flag.
fn load(&self) -> T;
}
/* FIXME(myrrlyn): When the `radium` crate publishes generic traits, erase the
implementations currently in use and enable the generic implementation below:
impl<T, R> BitAccess<T> for R
where T: BitStore, R: RadiumBits<T> {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<T>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
}
*/
#[cfg(feature = "atomic")] fn _atom() {
impl BitAccess<u8> for atomic::AtomicU8 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u8>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u8 {
self.load(Relaxed)
}
}
impl BitAccess<u16> for atomic::AtomicU16 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u16>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u16 {
self.load(Relaxed)
}
}
impl BitAccess<u32> for atomic::AtomicU32 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u32>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn load(&self) -> u32 {
self.load(Relaxed)
}
}
#[cfg(target_pointer_width = "64")]
impl BitAccess<u64> for atomic::AtomicU64 {
#[inline(always)]
fn clear_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_and(!*C::mask(bit), Relaxed);
}
#[inline(always)]
fn set_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_or(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn invert_bit<C>(&self, bit: BitIdx<u64>)
where C: Cursor {
self.fetch_xor(*C::mask(bit), Relaxed);
}
#[inline(always)]
fn lo | ad(& | identifier_name | |
store.rs | `: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be set according to `value`.
/// - `value`: A Boolean value, which sets the bit on `true` and clears it
/// on `false`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
#[inline(always)]
fn set<C>(&mut self, place: BitIdx<Self>, value: bool)
where C: Cursor {
let mask = *C::mask(place);
if value {
| else {
*self &= !mask;
}
}
/// Gets a specific bit in an element.
///
/// # Safety
///
/// This method cannot be called from within a `&BitSlice` context; it may
/// only be called by construction of an `&Self` reference from a `Self`
/// element directly.
///
/// # Parameters
///
/// - `place`: A bit index in the element, from `0` to `Self::MASK`. The bit
/// under this index will be retrieved as a `bool`.
///
/// # Returns
///
/// The value of the bit under `place`, as a `bool`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation to translate the index into a position.
fn get<C>(&self, place: BitIdx<Self>) -> bool
where C: Cursor {
*self & *C::mask(place) != Self::from(0)
}
/// Counts how many bits in `self` are set to `1`.
///
/// This zero-extends `self` to `u64`, and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `1`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_ones(&0u8), 0);
/// assert_eq!(BitStore::count_ones(&128u8), 1);
/// assert_eq!(BitStore::count_ones(&192u8), 2);
/// assert_eq!(BitStore::count_ones(&224u8), 3);
/// assert_eq!(BitStore::count_ones(&240u8), 4);
/// assert_eq!(BitStore::count_ones(&248u8), 5);
/// assert_eq!(BitStore::count_ones(&252u8), 6);
/// assert_eq!(BitStore::count_ones(&254u8), 7);
/// assert_eq!(BitStore::count_ones(&255u8), 8);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_ones(&self) -> usize {
u64::count_ones((*self).into()) as usize
}
/// Counts how many bits in `self` are set to `0`.
///
/// This inverts `self`, so all `0` bits are `1` and all `1` bits are `0`,
/// then zero-extends `self` to `u64` and uses the [`u64::count_ones`]
/// inherent method.
///
/// # Parameters
///
/// - `&self`
///
/// # Returns
///
/// The number of bits in `self` set to `0`. This is a `usize` instead of a
/// `u32` in order to ease arithmetic throughout the crate.
///
/// # Examples
///
/// ```rust
/// use bitvec::prelude::BitStore;
/// assert_eq!(BitStore::count_zeros(&0u8), 8);
/// assert_eq!(BitStore::count_zeros(&1u8), 7);
/// assert_eq!(BitStore::count_zeros(&3u8), 6);
/// assert_eq!(BitStore::count_zeros(&7u8), 5);
/// assert_eq!(BitStore::count_zeros(&15u8), 4);
/// assert_eq!(BitStore::count_zeros(&31u8), 3);
/// assert_eq!(BitStore::count_zeros(&63u8), 2);
/// assert_eq!(BitStore::count_zeros(&127u8), 1);
/// assert_eq!(BitStore::count_zeros(&255u8), 0);
/// ```
///
/// [`u64::count_ones`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.count_ones
#[inline(always)]
fn count_zeros(&self) -> usize {
// invert (0 becomes 1, 1 becomes 0), zero-extend, count ones
u64::count_ones((!*self).into()) as usize
}
/// Extends a single bit to fill the entire element.
///
/// # Parameters
///
/// - `bit`: The bit to extend.
///
/// # Returns
///
/// An element with all bits set to the input.
#[inline]
fn bits(bit: bool) -> Self {
if bit {
!Self::from(0)
}
else {
Self::from(0)
}
}
}
/** Marker trait to seal `BitStore` against downstream implementation.
This trait is public in the module, so that other modules in the crate can use
it, but so long as it is not exported by the crate root and this module is
private, this trait effectively forbids downstream implementation of the
`BitStore` trait.
**/
#[doc(hidden)]
pub trait Sealed {}
macro_rules! store {
( $( $t:ty , $a:ty $( ; )? );* ) => { $(
impl Sealed for $t {}
impl BitStore for $t {
const TYPENAME: &'static str = stringify!($t);
#[cfg(feature = "atomic")]
type Nucleus = $a;
#[cfg(not(feature = "atomic"))]
type Nucleus = Cell<Self>;
}
)* };
}
store![
u8, atomic::AtomicU8;
u16, atomic::AtomicU16;
u32, atomic::AtomicU32;
];
#[cfg(target_pointer_width = "64")]
store![u64, atomic::AtomicU64];
/// Type alias to the CPU word element, `u32`.
#[cfg(target_pointer_width = "32")]
pub type Word = u32;
/// Type alias to the CPU word element, `u64`.
#[cfg(target_pointer_width = "64")]
pub type Word = u64;
/** Common interface for atomic and cellular shared-mutability wrappers.
`&/mut BitSlice` contexts must use the `BitStore::Nucleus` type for all
reference production, and must route through this trait in order to access the
underlying memory. In multi-threaded contexts, this trait enforces that all
access is synchronized through atomic accesses; in single-threaded contexts,
this trait solely permits modification of an aliased element.
It is implemented on the atomic type wrappers when the `atomic` feature is set,
and implemented on the `Cell` type wrapper when the feature is missing. Coupled
with the `Send` implementation on `BitSlice`
**/
pub trait BitAccess<T>: Sized
where T: BitStore {
/// Sets a specific bit in an element low.
///
/// `BitAccess::set` calls this when its `value` is `false`; it
/// unconditionally writes a `0` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn clear_bit<C>(&self, place: BitIdx<T>)
where C: Cursor;
/// Sets a specific bit in an element high.
///
/// `BitAccess::set` calls this when its `value` is `true`; it
/// unconditionally writes a `1` bit into the electrical position that
/// `place` controls according to the `Cursor` parameter `C`.
///
/// # Type Parameters
///
/// - `C`: A `Cursor` implementation which translates `place` into a usable
/// bit-mask.
///
/// # Parameters
///
/// - `&self`
/// - `place`: The semantic bit index in the `self` element.
fn | *self |= mask;
}
| conditional_block |
game.go | ObstructionProgression/2))
}
case MistLevel:
g.PrintStyled("The air seems dense on this level.", logSpecial)
g.StoryPrint("Special event: mist level")
for i := 0; i < 20; i++ {
g.PushEvent(&posEvent{Action: MistProgression},
g.Turn+DurationMistProgression+RandInt(DurationMistProgression/2))
}
case EarthquakeLevel:
g.PushEvent(&posEvent{P: gruid.Point{DungeonWidth/2 - 15 + RandInt(30), DungeonHeight/2 - 5 + RandInt(10)}, Action: Earthquake},
g.Turn+10+RandInt(50))
}
// initialize LOS
if g.Depth == 1 {
g.PrintStyled("► Press ? for help on keys or use the mouse and [buttons].", logSpecial)
}
if g.Depth == WinDepth {
g.PrintStyled("Finally! Shaedra should be imprisoned somewhere around here.", logSpecial)
} else if g.Depth == MaxDepth {
g.PrintStyled("This the bottom floor, you now have to look for the artifact.", logSpecial)
}
g.ComputeLOS()
g.MakeMonstersAware()
g.ComputeMonsterLOS()
if !Testing { // disable when testing
g.md.updateStatusInfo()
}
}
func (g *game) CleanEvents() {
g.Events.Filter(func(ev rl.Event) bool {
switch ev.(type) {
case *monsterTurnEvent, *posEvent, *monsterStatusEvent, *playerEvent:
return false
default:
// keep player statuses events
return true
}
})
// finish current turn's other effects (like status progression)
turn := g.Turn
for !g.Events.Empty() {
ev, r := g.Events.PopR()
if r == turn {
e, ok := ev.(event)
if ok {
e.Handle(g)
}
continue
}
g.Events.PushFirst(ev, r)
break
}
g.Turn++
}
func (g *game) StairsSlice() []gruid.Point {
stairs := []gruid.Point{}
it := g.Dungeon.Grid.Iterator()
for it.Next() {
c := cell(it.Cell())
if (terrain(c) != StairCell && terrain(c) != FakeStairCell) || !explored(c) {
continue
}
stairs = append(stairs, it.P())
}
return stairs
}
type descendstyle int
const (
DescendNormal descendstyle = iota
DescendJump
DescendFall
)
func (g *game) Descend(style descendstyle) bool {
g.LevelStats()
if g.Stats.DUSpotted[g.Depth] < 3 {
AchStealthNovice.Get(g)
}
if g.Depth >= 3 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 {
AchInsomniaNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 {
AchInsomniaInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DRests[g.Depth] == 0 && g.Stats.DRests[g.Depth-1] == 0 && g.Stats.DRests[g.Depth-2] == 0 &&
g.Stats.DRests[g.Depth-3] == 0 && g.Stats.DRests[g.Depth-4] == 0 && g.Stats.DRests[g.Depth-5] == 0 {
AchInsomniaMaster.Get(g)
}
}
if g.Depth >= 3 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 {
AchAntimagicNovice.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 {
AchAntimagicInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DMagaraUses[g.Depth] == 0 && g.Stats.DMagaraUses[g.Depth-1] == 0 && g.Stats.DMagaraUses[g.Depth-2] == 0 &&
g.Stats.DMagaraUses[g.Depth-3] == 0 && g.Stats.DMagaraUses[g.Depth-4] == 0 && g.Stats.DMagaraUses[g.Depth-5] == 0 {
AchAntimagicMaster.Get(g)
}
}
if g.Depth >= 5 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 {
AchStealthInitiate.Get(g)
}
}
if g.Depth >= 8 {
if g.Stats.DUSpotted[g.Depth] < 3 && g.Stats.DUSpotted[g.Depth-1] < 3 && g.Stats.DSpotted[g.Depth-2] < 3 &&
g.Stats.DSpotted[g.Depth-3] < 3 {
AchStealthMaster.Get(g)
}
}
c := g.Dungeon.Cell(g.Player.P)
if terrain(c) == StairCell && g.Objects.Stairs[g.Player.P] == WinStair {
g.StoryPrint("Escaped!")
g.ExploredLevels = g.Depth
g.Depth = -1
return true
}
if style != DescendNormal {
g.md.AbyssFallAnimation()
g.PrintStyled("You fall into the abyss. It hurts!", logDamage)
g.StoryPrint("Fell into the abyss")
} else {
g.Print("You descend deeper in the dungeon.")
g.StoryPrint("Descended stairs")
}
g.Depth++
g.DepthPlayerTurn = 0
g.InitLevel()
g.Save()
return false
}
func (g *game) EnterWizardMode() {
g.Wizard = true
g.PrintStyled("Wizard mode activated: winner status disabled.", logSpecial)
g.StoryPrint("Entered wizard mode.")
}
func (g *game) ApplyRest() {
g.Player.HP = g.Player.HPMax()
g.Player.HPbonus = 0
g.Player.MP = g.Player.MPMax()
g.Stats.Rest++
g.Stats.DRests[g.Depth]++
g.PrintStyled("You feel fresh again after eating banana and sleeping.", logStatusEnd)
g.StoryPrintf("Rested in barrel (bananas: %d)", g.Player.Bananas)
if g.Stats.Rest == 10 {
AchSleepy.Get(g)
}
}
func (g *game) AutoPlayer() bool {
switch {
case g.Resting:
const enoughRestTurns = 25
if g.RestingTurns < enoughRestTurns {
g.RestingTurns++
return true
}
if g.RestingTurns >= enoughRestTurns {
g.ApplyRest()
}
g.Resting = false
case g.Autoexploring:
switch {
case g.AutoHalt:
// stop exploring
default:
var n *gruid.Point
var finished bool
if g.AutoexploreMapRebuild {
if g.AllExplored() {
g.Print("You finished exploring.")
break
}
sources := g.AutoexploreSources()
g.BuildAutoexploreMap(sources)
}
n, finished = g.NextAuto()
if finished {
n = nil
}
if finished && g.AllExplored() {
g.Print("You finished exploring.")
} else if n == nil {
g.Print("You could not safely reach some places.")
}
if n != nil {
again, err := g.PlayerBump(*n)
if err != nil {
g.Print(err.Error())
break
}
return !again
}
}
g.Autoexploring = false
case valid(g.AutoTarget):
if g.MoveToTarget() {
return true
}
g.AutoTarget = invalidPos
case g.AutoDir != ZP:
if g.AutoToDir() {
return true
}
g.AutoDir = ZP
}
return false
}
func (g *game) Died() bool {
| if g.Player.HP <= 0 {
if g.Wizard {
g.Player.HP = g.Player.HPMax()
g.PrintStyled("You died.", logSpecial)
g.StoryPrint("You died (wizard mode)")
} else {
g.LevelStats()
return true
}
}
return false
}
| identifier_body | |
game.go | ml = RandomSmallWalkCaveUrbanised
} else if RandInt(10) == 0 {
ml = NaturalCave
}
}
g.GenRoomTunnels(ml)
}
func (g *game) InitPlayer() {
g.Player = &player{
HP: DefaultHealth,
MP: DefaultMPmax,
Bananas: 1,
}
g.Player.LOS = map[gruid.Point]bool{}
g.Player.Statuses = map[status]int{}
g.Player.Expire = map[status]int{}
g.Player.Magaras = []magara{
{},
{},
{},
{},
}
g.GeneratedMagaras = []magaraKind{}
g.Player.Magaras[0] = g.RandomStartingMagara()
g.GeneratedMagaras = append(g.GeneratedMagaras, g.Player.Magaras[0].Kind)
g.Player.Inventory.Misc = MarevorMagara
g.Player.FOV = rl.NewFOV(visionRange(g.Player.P, TreeRange))
// Testing
//g.Player.Magaras[1] = magara{Kind: DispersalMagara, Charges: 10}
//g.Player.Magaras[2] = magara{Kind: DelayedOricExplosionMagara, Charges: 10}
//g.Player.Magaras[2] = ConfusionMagara
}
type genFlavour int
const (
GenNothing genFlavour = iota
//GenWeapon
GenAmulet
GenCloak
)
func PutRandomLevels(m map[int]bool, n int) {
for i := 0; i < n; i++ {
j := 1 + RandInt(MaxDepth)
if !m[j] {
m[j] = true
} else {
i--
}
}
}
func (g *game) InitFirstLevel() {
g.Version = Version
g.Depth++ // start at 1
g.InitPlayer()
g.AutoTarget = invalidPos
g.RaysCache = rayMap{}
g.GeneratedLore = map[int]bool{}
g.Stats.KilledMons = map[monsterKind]int{}
g.Stats.UsedMagaras = map[magaraKind]int{}
g.Stats.Achievements = map[achievement]int{}
g.Stats.Lore = map[int]bool{}
g.Stats.Statuses = map[status]int{}
g.GenPlan = [MaxDepth + 1]genFlavour{
1: GenNothing,
2: GenCloak,
3: GenNothing,
4: GenAmulet,
5: GenNothing,
6: GenCloak,
7: GenNothing,
8: GenAmulet,
9: GenNothing,
10: GenCloak,
11: GenNothing,
}
g.Params.Lore = map[int]bool{}
PutRandomLevels(g.Params.Lore, 8)
g.Params.HealthPotion = map[int]bool{}
PutRandomLevels(g.Params.HealthPotion, 5)
g.Params.MappingStone = map[int]bool{}
PutRandomLevels(g.Params.MappingStone, 3)
g.Params.Blocked = map[int]bool{}
if RandInt(10) > 0 {
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
if RandInt(10) == 0 {
// a second one sometimes!
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
g.Params.Special = []specialRoom{
noSpecialRoom, // unused (depth 0)
noSpecialRoom,
noSpecialRoom,
roomMilfids,
roomCelmists,
roomVampires,
roomHarpies,
roomTreeMushrooms,
roomShaedra,
roomCelmists,
roomMirrorSpecters,
roomArtifact,
}
if RandInt(2) == 0 {
g.Params.Special[5] = roomNixes
}
if RandInt(4) == 0 |
if RandInt(4) == 0 {
if RandInt(2) == 0 {
g.Params.Special[3] = roomFrogs
} else {
g.Params.Special[7] = roomFrogs
}
}
if RandInt(4) == 0 {
g.Params.Special[10], g.Params.Special[5] = g.Params.Special[5], g.Params.Special[10]
}
if RandInt(4) == 0 {
g.Params.Special[6], g.Params.Special[7] = g.Params.Special[7], g.Params.Special[6]
}
if RandInt(4) == 0 {
g.Params.Special[3], g.Params.Special[4] = g.Params.Special[4], g.Params.Special[3]
}
g.Params.Event = map[int]specialEvent{}
for i := 0; i < 2; i++ {
g.Params.Event[2+5*i+RandInt(5)] = specialEvent(1 + RandInt(spEvMax))
}
g.Params.Event[2+RandInt(MaxDepth-1)] = NormalLevel
g.Params.FakeStair = map[int]bool{}
if RandInt(MaxDepth) > 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) == 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
}
}
}
g.Params.ExtraBanana = map[int]int{}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]++
}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]--
}
g.Params.Windows = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Holes = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Trees = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Tables = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.NoMagara = map[int]bool{}
g.Params.NoMagara[WinDepth] = true
g.Params.Stones = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
}
}
permi := RandInt(WinDepth - 1)
switch permi {
case 0, 1, 2, 3:
g.GenPlan[permi+1], g.GenPlan[permi+2] = g.GenPlan[permi+2], g.GenPlan[permi+1]
}
if RandInt(4) == 0 {
g.GenPlan[6], g.GenPlan[7] = g.GenPlan[7], g.GenPlan[6]
}
if RandInt(4) == 0 {
g.GenPlan[MaxDepth-1], g.GenPlan[MaxDepth] = g.GenPlan[MaxDepth], g.GenPlan[MaxDepth-1]
}
g.Params.CrazyImp = 2 + RandInt(MaxDepth-2)
g.PR = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
g.PRauto = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
}
func (g *game) InitLevelStructures() {
g.MonstersPosCache = make([]int, DungeonNCells)
g.Noise = map[gruid.Point]bool{}
g.TerrainKnowledge = map[gruid.Point]cell{}
g.ExclusionsMap = map[gruid.Point | {
if g.Params.Special[5] == roomNixes {
g.Params.Special[9] = roomVampires
} else {
g.Params.Special[9] = roomNixes
}
} | conditional_block |
game.go | ml = RandomSmallWalkCaveUrbanised
} else if RandInt(10) == 0 {
ml = NaturalCave
}
}
g.GenRoomTunnels(ml)
}
func (g *game) InitPlayer() {
g.Player = &player{
HP: DefaultHealth,
MP: DefaultMPmax,
Bananas: 1,
}
g.Player.LOS = map[gruid.Point]bool{}
g.Player.Statuses = map[status]int{}
g.Player.Expire = map[status]int{}
g.Player.Magaras = []magara{
{},
{},
{},
{},
}
g.GeneratedMagaras = []magaraKind{}
g.Player.Magaras[0] = g.RandomStartingMagara()
g.GeneratedMagaras = append(g.GeneratedMagaras, g.Player.Magaras[0].Kind)
g.Player.Inventory.Misc = MarevorMagara
g.Player.FOV = rl.NewFOV(visionRange(g.Player.P, TreeRange))
// Testing
//g.Player.Magaras[1] = magara{Kind: DispersalMagara, Charges: 10}
//g.Player.Magaras[2] = magara{Kind: DelayedOricExplosionMagara, Charges: 10}
//g.Player.Magaras[2] = ConfusionMagara
}
type genFlavour int
const (
GenNothing genFlavour = iota
//GenWeapon
GenAmulet
GenCloak
)
func | (m map[int]bool, n int) {
for i := 0; i < n; i++ {
j := 1 + RandInt(MaxDepth)
if !m[j] {
m[j] = true
} else {
i--
}
}
}
func (g *game) InitFirstLevel() {
g.Version = Version
g.Depth++ // start at 1
g.InitPlayer()
g.AutoTarget = invalidPos
g.RaysCache = rayMap{}
g.GeneratedLore = map[int]bool{}
g.Stats.KilledMons = map[monsterKind]int{}
g.Stats.UsedMagaras = map[magaraKind]int{}
g.Stats.Achievements = map[achievement]int{}
g.Stats.Lore = map[int]bool{}
g.Stats.Statuses = map[status]int{}
g.GenPlan = [MaxDepth + 1]genFlavour{
1: GenNothing,
2: GenCloak,
3: GenNothing,
4: GenAmulet,
5: GenNothing,
6: GenCloak,
7: GenNothing,
8: GenAmulet,
9: GenNothing,
10: GenCloak,
11: GenNothing,
}
g.Params.Lore = map[int]bool{}
PutRandomLevels(g.Params.Lore, 8)
g.Params.HealthPotion = map[int]bool{}
PutRandomLevels(g.Params.HealthPotion, 5)
g.Params.MappingStone = map[int]bool{}
PutRandomLevels(g.Params.MappingStone, 3)
g.Params.Blocked = map[int]bool{}
if RandInt(10) > 0 {
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
if RandInt(10) == 0 {
// a second one sometimes!
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
g.Params.Special = []specialRoom{
noSpecialRoom, // unused (depth 0)
noSpecialRoom,
noSpecialRoom,
roomMilfids,
roomCelmists,
roomVampires,
roomHarpies,
roomTreeMushrooms,
roomShaedra,
roomCelmists,
roomMirrorSpecters,
roomArtifact,
}
if RandInt(2) == 0 {
g.Params.Special[5] = roomNixes
}
if RandInt(4) == 0 {
if g.Params.Special[5] == roomNixes {
g.Params.Special[9] = roomVampires
} else {
g.Params.Special[9] = roomNixes
}
}
if RandInt(4) == 0 {
if RandInt(2) == 0 {
g.Params.Special[3] = roomFrogs
} else {
g.Params.Special[7] = roomFrogs
}
}
if RandInt(4) == 0 {
g.Params.Special[10], g.Params.Special[5] = g.Params.Special[5], g.Params.Special[10]
}
if RandInt(4) == 0 {
g.Params.Special[6], g.Params.Special[7] = g.Params.Special[7], g.Params.Special[6]
}
if RandInt(4) == 0 {
g.Params.Special[3], g.Params.Special[4] = g.Params.Special[4], g.Params.Special[3]
}
g.Params.Event = map[int]specialEvent{}
for i := 0; i < 2; i++ {
g.Params.Event[2+5*i+RandInt(5)] = specialEvent(1 + RandInt(spEvMax))
}
g.Params.Event[2+RandInt(MaxDepth-1)] = NormalLevel
g.Params.FakeStair = map[int]bool{}
if RandInt(MaxDepth) > 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) == 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
}
}
}
g.Params.ExtraBanana = map[int]int{}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]++
}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]--
}
g.Params.Windows = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Holes = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Trees = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Tables = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.NoMagara = map[int]bool{}
g.Params.NoMagara[WinDepth] = true
g.Params.Stones = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
}
}
permi := RandInt(WinDepth - 1)
switch permi {
case 0, 1, 2, 3:
g.GenPlan[permi+1], g.GenPlan[permi+2] = g.GenPlan[permi+2], g.GenPlan[permi+1]
}
if RandInt(4) == 0 {
g.GenPlan[6], g.GenPlan[7] = g.GenPlan[7], g.GenPlan[6]
}
if RandInt(4) == 0 {
g.GenPlan[MaxDepth-1], g.GenPlan[MaxDepth] = g.GenPlan[MaxDepth], g.GenPlan[MaxDepth-1]
}
g.Params.CrazyImp = 2 + RandInt(MaxDepth-2)
g.PR = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
g.PRauto = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
}
func (g *game) InitLevelStructures() {
g.MonstersPosCache = make([]int, DungeonNCells)
g.Noise = map[gruid.Point]bool{}
g.TerrainKnowledge = map[gruid.Point]cell{}
g.ExclusionsMap = map[gruid.Point]bool | PutRandomLevels | identifier_name |
game.go | ml = RandomSmallWalkCaveUrbanised
} else if RandInt(10) == 0 {
ml = NaturalCave
}
}
g.GenRoomTunnels(ml)
}
func (g *game) InitPlayer() {
g.Player = &player{
HP: DefaultHealth,
MP: DefaultMPmax,
Bananas: 1,
}
g.Player.LOS = map[gruid.Point]bool{}
g.Player.Statuses = map[status]int{}
g.Player.Expire = map[status]int{}
g.Player.Magaras = []magara{
{},
{},
{},
{},
}
g.GeneratedMagaras = []magaraKind{}
g.Player.Magaras[0] = g.RandomStartingMagara()
g.GeneratedMagaras = append(g.GeneratedMagaras, g.Player.Magaras[0].Kind)
g.Player.Inventory.Misc = MarevorMagara
g.Player.FOV = rl.NewFOV(visionRange(g.Player.P, TreeRange))
// Testing
//g.Player.Magaras[1] = magara{Kind: DispersalMagara, Charges: 10}
//g.Player.Magaras[2] = magara{Kind: DelayedOricExplosionMagara, Charges: 10}
//g.Player.Magaras[2] = ConfusionMagara
}
type genFlavour int
const (
GenNothing genFlavour = iota
//GenWeapon
GenAmulet
GenCloak
)
func PutRandomLevels(m map[int]bool, n int) {
for i := 0; i < n; i++ {
j := 1 + RandInt(MaxDepth)
if !m[j] {
m[j] = true
} else {
i--
}
}
}
func (g *game) InitFirstLevel() {
g.Version = Version
g.Depth++ // start at 1
g.InitPlayer()
g.AutoTarget = invalidPos
g.RaysCache = rayMap{}
g.GeneratedLore = map[int]bool{}
g.Stats.KilledMons = map[monsterKind]int{}
g.Stats.UsedMagaras = map[magaraKind]int{}
g.Stats.Achievements = map[achievement]int{}
g.Stats.Lore = map[int]bool{}
g.Stats.Statuses = map[status]int{}
g.GenPlan = [MaxDepth + 1]genFlavour{
1: GenNothing,
2: GenCloak,
3: GenNothing,
4: GenAmulet,
5: GenNothing,
6: GenCloak,
7: GenNothing,
8: GenAmulet,
9: GenNothing,
10: GenCloak,
11: GenNothing,
}
g.Params.Lore = map[int]bool{}
PutRandomLevels(g.Params.Lore, 8)
g.Params.HealthPotion = map[int]bool{}
PutRandomLevels(g.Params.HealthPotion, 5)
g.Params.MappingStone = map[int]bool{}
PutRandomLevels(g.Params.MappingStone, 3)
g.Params.Blocked = map[int]bool{}
if RandInt(10) > 0 {
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
if RandInt(10) == 0 {
// a second one sometimes!
g.Params.Blocked[2+RandInt(WinDepth-2)] = true
}
g.Params.Special = []specialRoom{
noSpecialRoom, // unused (depth 0)
noSpecialRoom,
noSpecialRoom,
roomMilfids,
roomCelmists,
roomVampires,
roomHarpies,
roomTreeMushrooms,
roomShaedra,
roomCelmists,
roomMirrorSpecters,
roomArtifact,
}
if RandInt(2) == 0 {
g.Params.Special[5] = roomNixes
}
if RandInt(4) == 0 {
if g.Params.Special[5] == roomNixes {
g.Params.Special[9] = roomVampires
} else {
g.Params.Special[9] = roomNixes
}
}
if RandInt(4) == 0 {
if RandInt(2) == 0 {
g.Params.Special[3] = roomFrogs
} else {
g.Params.Special[7] = roomFrogs
}
}
if RandInt(4) == 0 {
g.Params.Special[10], g.Params.Special[5] = g.Params.Special[5], g.Params.Special[10]
}
if RandInt(4) == 0 {
g.Params.Special[6], g.Params.Special[7] = g.Params.Special[7], g.Params.Special[6]
}
if RandInt(4) == 0 {
g.Params.Special[3], g.Params.Special[4] = g.Params.Special[4], g.Params.Special[3]
}
g.Params.Event = map[int]specialEvent{}
for i := 0; i < 2; i++ {
g.Params.Event[2+5*i+RandInt(5)] = specialEvent(1 + RandInt(spEvMax))
}
g.Params.Event[2+RandInt(MaxDepth-1)] = NormalLevel
g.Params.FakeStair = map[int]bool{}
if RandInt(MaxDepth) > 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
if RandInt(MaxDepth) == 0 {
g.Params.FakeStair[2+RandInt(MaxDepth-2)] = true
}
}
}
g.Params.ExtraBanana = map[int]int{}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]++
}
for i := 0; i < 2; i++ {
g.Params.ExtraBanana[1+5*i+RandInt(5)]--
}
g.Params.Windows = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Windows[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Holes = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Holes[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Trees = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Trees[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.Tables = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Tables[2+RandInt(MaxDepth-1)] = true
}
}
g.Params.NoMagara = map[int]bool{}
g.Params.NoMagara[WinDepth] = true
g.Params.Stones = map[int]bool{}
if RandInt(MaxDepth) > MaxDepth/2 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
if RandInt(MaxDepth) == 0 {
g.Params.Stones[2+RandInt(MaxDepth-1)] = true
}
}
permi := RandInt(WinDepth - 1)
switch permi {
case 0, 1, 2, 3:
g.GenPlan[permi+1], g.GenPlan[permi+2] = g.GenPlan[permi+2], g.GenPlan[permi+1]
}
if RandInt(4) == 0 { | }
if RandInt(4) == 0 {
g.GenPlan[MaxDepth-1], g.GenPlan[MaxDepth] = g.GenPlan[MaxDepth], g.GenPlan[MaxDepth-1]
}
g.Params.CrazyImp = 2 + RandInt(MaxDepth-2)
g.PR = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
g.PRauto = paths.NewPathRange(gruid.NewRange(0, 0, DungeonWidth, DungeonHeight))
}
func (g *game) InitLevelStructures() {
g.MonstersPosCache = make([]int, DungeonNCells)
g.Noise = map[gruid.Point]bool{}
g.TerrainKnowledge = map[gruid.Point]cell{}
g.ExclusionsMap = map[gruid.Point]bool | g.GenPlan[6], g.GenPlan[7] = g.GenPlan[7], g.GenPlan[6] | random_line_split |
gogl.go | , nil)
// todo should use gl.RED on OpenGL, gl.ALPHA on OpenGL ES
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.Enable(gl.STENCIL_TEST)
gl.StencilMask(0xFF)
gl.Clear(gl.STENCIL_BUFFER_BIT)
gl.StencilOp(gl.KEEP, gl.KEEP, gl.KEEP)
gl.StencilFunc(gl.EQUAL, 0, 0xFF)
gl.Disable(gl.SCISSOR_TEST)
return ctx, nil
}
// GoGLBackend is a canvas backend using Go-GL
type GoGLBackend struct {
x, y, w, h int
fx, fy, fw, fh float64
*GLContext
activateFn func()
disableTextureRenderTarget func()
}
type offscreenBuffer struct {
tex uint32
w int
h int
renderStencilBuf uint32
frameBuf uint32
alpha bool
}
// New returns a new canvas backend. x, y, w, h define the target
// rectangle in the window. ctx is a GLContext created with
// NewGLContext, but can be nil for a default one. It makes sense
// to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func New(x, y, w, h int, ctx *GLContext) (*GoGLBackend, error) {
if ctx == nil {
var err error
ctx, err = NewGLContext()
if err != nil {
return nil, err
}
}
b := &GoGLBackend{
w: w,
h: h,
fw: float64(w),
fh: float64(h),
GLContext: ctx,
}
b.activateFn = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
// todo reapply clipping since another application may have used the stencil buffer
}
b.disableTextureRenderTarget = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
}
return b, nil
}
// GoGLBackendOffscreen is a canvas backend using an offscreen
// texture
type GoGLBackendOffscreen struct {
GoGLBackend
TextureID uint32
offscrBuf offscreenBuffer
offscrImg Image
}
// NewOffscreen returns a new offscreen canvas backend. w, h define
// the size of the offscreen texture. ctx is a GLContext created
// with NewGLContext, but can be nil for a default one. It makes
// sense to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func NewOffscreen(w, h int, alpha bool, ctx *GLContext) (*GoGLBackendOffscreen, error) |
return bo, nil
}
// SetBounds updates the bounds of the canvas. This would
// usually be called for example when the window is resized
func (b *GoGLBackend) SetBounds(x, y, w, h int) {
b.x, b.y = x, y
b.fx, b.fy = float64(x), float64(y)
b.w, b.h = w, h
b.fw, b.fh = float64(w), float64(h)
if b == activeContext {
gl.Viewport(0, 0, int32(b.w), int32(b.h))
gl.Clear(gl.STENCIL_BUFFER_BIT)
}
}
// SetSize updates the size of the offscreen texture
func (b *GoGLBackendOffscreen) SetSize(w, h int) {
b.GoGLBackend.SetBounds(0, 0, w, h)
b.offscrImg.w = b.offscrBuf.w
b.offscrImg.h = b.offscrBuf.h
}
// Size returns the size of the window or offscreen
// texture
func (b *GoGLBackend) Size() (int, int) {
return b.w, b.h
}
func glError() error {
glErr := gl.GetError()
if glErr != gl.NO_ERROR {
return fmt.Errorf("GL Error: %x", glErr)
}
return nil
}
// Activate only needs to be called if there is other
// code also using the GL state
func (b *GoGLBackend) Activate() {
b.activate()
}
var activeContext *GoGLBackend
func (b *GoGLBackend) activate() {
if activeContext != b {
activeContext = b
b.activateFn()
}
}
// Delete deletes the offscreen texture. After calling this
// the backend can no longer be used
func (b *GoGLBackendOffscreen) Delete() {
gl.DeleteTextures(1, &b.offscrBuf.tex)
gl.DeleteFramebuffers(1, &b.offscrBuf.frameBuf)
gl.DeleteRenderbuffers(1, &b.offscrBuf.renderStencilBuf)
}
// CanUseAsImage returns true if the given backend can be
// directly used by this backend to avoid a conversion.
// Used internally
func (b *GoGLBackend) CanUseAsImage(b2 backendbase.Backend) bool {
_, ok := b2.(*GoGLBackendOffscreen)
return ok
}
// AsImage returns nil, since this backend cannot be directly
// used as an image. Used internally
func (b *GoGLBackend) AsImage() backendbase.Image {
return nil
}
// AsImage returns an implementation of the Image interface
// that can be used to render this offscreen texture
// directly. Used internally
func (b *GoGLBackendOffscreen) AsImage() backendbase.Image {
return &b.offscrImg
}
func (b *GoGLBackend) useShader(style *backendbase.FillStyle, tf [9]float32, useAlpha bool, alphaTexSlot int32) (vertexLoc, alphaTexCoordLoc uint32) {
gl.UseProgram(b.shd.ID)
gl.Uniform2f(b.shd.CanvasSize, float32(b.fw), float32(b.fh))
gl.UniformMatrix3fv(b.shd.Matrix, 1, false, &tf[0])
if useAlpha {
gl.Uniform1i(b.shd.UseAlphaTex, 1)
gl.Uniform1i(b.shd.AlphaTex, alphaTexSlot)
} else {
gl.Uniform1i(b.shd.UseAlphaTex, 0)
}
gl.Uniform1f(b.shd.GlobalAlpha, float32(style.Color.A)/255)
if lg := style.LinearGradient; lg != nil {
lg := lg.(*LinearGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, lg.tex)
from := backendbase.Vec{style.Gradient.X0, style.Gradient.Y0}
to := backendbase.Vec{style.Gradient.X1, style.Gradient.Y1}
dir := to.Sub(from)
length := dir.Len()
dir = dir.Mulf(1 / length)
gl.Uniform2f(b.shd.From, float32(from[0]), float32(from[1]))
gl.Uniform2f(b.shd.Dir, float32(dir[0]), float32(dir[1]))
gl.Uniform1f(b.shd.Len, float32(length))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncLinearGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if rg := style.RadialGradient; rg != nil {
rg := rg.(*RadialGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, rg.tex)
gl.Uniform2f(b.shd.From, float32(style.Gradient.X0), float32(style.Gradient.Y0))
gl.Uniform2f(b.shd.To, float32(style.Gradient.X1), float32(style.Gradient.Y1))
gl.Uniform1f(b.shd.RadFrom, float32(style.Gradient.RadFrom))
gl.Uniform1f(b.shd.RadTo, float32(style.Gradient.RadTo))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncRadialGradient)
return b.shd.Vertex, b.sh | {
b, err := New(0, 0, w, h, ctx)
if err != nil {
return nil, err
}
bo := &GoGLBackendOffscreen{GoGLBackend: *b}
bo.offscrBuf.alpha = alpha
bo.offscrImg.flip = true
bo.activateFn = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
gl.Viewport(0, 0, int32(bo.w), int32(bo.h))
bo.offscrImg.w = bo.offscrBuf.w
bo.offscrImg.h = bo.offscrBuf.h
bo.offscrImg.tex = bo.offscrBuf.tex
bo.TextureID = bo.offscrBuf.tex
}
bo.disableTextureRenderTarget = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
} | identifier_body |
gogl.go | , nil)
// todo should use gl.RED on OpenGL, gl.ALPHA on OpenGL ES
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.Enable(gl.STENCIL_TEST)
gl.StencilMask(0xFF)
gl.Clear(gl.STENCIL_BUFFER_BIT)
gl.StencilOp(gl.KEEP, gl.KEEP, gl.KEEP)
gl.StencilFunc(gl.EQUAL, 0, 0xFF)
gl.Disable(gl.SCISSOR_TEST)
return ctx, nil
}
// GoGLBackend is a canvas backend using Go-GL
type GoGLBackend struct {
x, y, w, h int
fx, fy, fw, fh float64
*GLContext
activateFn func()
disableTextureRenderTarget func()
}
type offscreenBuffer struct {
tex uint32
w int
h int
renderStencilBuf uint32
frameBuf uint32
alpha bool
}
// New returns a new canvas backend. x, y, w, h define the target
// rectangle in the window. ctx is a GLContext created with
// NewGLContext, but can be nil for a default one. It makes sense
// to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func New(x, y, w, h int, ctx *GLContext) (*GoGLBackend, error) {
if ctx == nil {
var err error
ctx, err = NewGLContext()
if err != nil {
return nil, err
}
}
b := &GoGLBackend{
w: w,
h: h,
fw: float64(w),
fh: float64(h),
GLContext: ctx,
}
b.activateFn = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
// todo reapply clipping since another application may have used the stencil buffer
}
b.disableTextureRenderTarget = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
}
return b, nil
}
// GoGLBackendOffscreen is a canvas backend using an offscreen
// texture
type GoGLBackendOffscreen struct {
GoGLBackend
TextureID uint32
offscrBuf offscreenBuffer
offscrImg Image
}
// NewOffscreen returns a new offscreen canvas backend. w, h define
// the size of the offscreen texture. ctx is a GLContext created
// with NewGLContext, but can be nil for a default one. It makes
// sense to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func NewOffscreen(w, h int, alpha bool, ctx *GLContext) (*GoGLBackendOffscreen, error) {
b, err := New(0, 0, w, h, ctx)
if err != nil {
return nil, err
}
bo := &GoGLBackendOffscreen{GoGLBackend: *b}
bo.offscrBuf.alpha = alpha
bo.offscrImg.flip = true
bo.activateFn = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
gl.Viewport(0, 0, int32(bo.w), int32(bo.h))
bo.offscrImg.w = bo.offscrBuf.w
bo.offscrImg.h = bo.offscrBuf.h
bo.offscrImg.tex = bo.offscrBuf.tex
bo.TextureID = bo.offscrBuf.tex
}
bo.disableTextureRenderTarget = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
}
return bo, nil
}
// SetBounds updates the bounds of the canvas. This would
// usually be called for example when the window is resized
func (b *GoGLBackend) SetBounds(x, y, w, h int) {
b.x, b.y = x, y
b.fx, b.fy = float64(x), float64(y)
b.w, b.h = w, h
b.fw, b.fh = float64(w), float64(h)
if b == activeContext |
}
// SetSize updates the size of the offscreen texture
func (b *GoGLBackendOffscreen) SetSize(w, h int) {
b.GoGLBackend.SetBounds(0, 0, w, h)
b.offscrImg.w = b.offscrBuf.w
b.offscrImg.h = b.offscrBuf.h
}
// Size returns the size of the window or offscreen
// texture
func (b *GoGLBackend) Size() (int, int) {
return b.w, b.h
}
func glError() error {
glErr := gl.GetError()
if glErr != gl.NO_ERROR {
return fmt.Errorf("GL Error: %x", glErr)
}
return nil
}
// Activate only needs to be called if there is other
// code also using the GL state
func (b *GoGLBackend) Activate() {
b.activate()
}
var activeContext *GoGLBackend
func (b *GoGLBackend) activate() {
if activeContext != b {
activeContext = b
b.activateFn()
}
}
// Delete deletes the offscreen texture. After calling this
// the backend can no longer be used
func (b *GoGLBackendOffscreen) Delete() {
gl.DeleteTextures(1, &b.offscrBuf.tex)
gl.DeleteFramebuffers(1, &b.offscrBuf.frameBuf)
gl.DeleteRenderbuffers(1, &b.offscrBuf.renderStencilBuf)
}
// CanUseAsImage returns true if the given backend can be
// directly used by this backend to avoid a conversion.
// Used internally
func (b *GoGLBackend) CanUseAsImage(b2 backendbase.Backend) bool {
_, ok := b2.(*GoGLBackendOffscreen)
return ok
}
// AsImage returns nil, since this backend cannot be directly
// used as an image. Used internally
func (b *GoGLBackend) AsImage() backendbase.Image {
return nil
}
// AsImage returns an implementation of the Image interface
// that can be used to render this offscreen texture
// directly. Used internally
func (b *GoGLBackendOffscreen) AsImage() backendbase.Image {
return &b.offscrImg
}
func (b *GoGLBackend) useShader(style *backendbase.FillStyle, tf [9]float32, useAlpha bool, alphaTexSlot int32) (vertexLoc, alphaTexCoordLoc uint32) {
gl.UseProgram(b.shd.ID)
gl.Uniform2f(b.shd.CanvasSize, float32(b.fw), float32(b.fh))
gl.UniformMatrix3fv(b.shd.Matrix, 1, false, &tf[0])
if useAlpha {
gl.Uniform1i(b.shd.UseAlphaTex, 1)
gl.Uniform1i(b.shd.AlphaTex, alphaTexSlot)
} else {
gl.Uniform1i(b.shd.UseAlphaTex, 0)
}
gl.Uniform1f(b.shd.GlobalAlpha, float32(style.Color.A)/255)
if lg := style.LinearGradient; lg != nil {
lg := lg.(*LinearGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, lg.tex)
from := backendbase.Vec{style.Gradient.X0, style.Gradient.Y0}
to := backendbase.Vec{style.Gradient.X1, style.Gradient.Y1}
dir := to.Sub(from)
length := dir.Len()
dir = dir.Mulf(1 / length)
gl.Uniform2f(b.shd.From, float32(from[0]), float32(from[1]))
gl.Uniform2f(b.shd.Dir, float32(dir[0]), float32(dir[1]))
gl.Uniform1f(b.shd.Len, float32(length))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncLinearGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if rg := style.RadialGradient; rg != nil {
rg := rg.(*RadialGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, rg.tex)
gl.Uniform2f(b.shd.From, float32(style.Gradient.X0), float32(style.Gradient.Y0))
gl.Uniform2f(b.shd.To, float32(style.Gradient.X1), float32(style.Gradient.Y1))
gl.Uniform1f(b.shd.RadFrom, float32(style.Gradient.RadFrom))
gl.Uniform1f(b.shd.RadTo, float32(style.Gradient.RadTo))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncRadialGradient)
return b.shd.Vertex, b.sh | {
gl.Viewport(0, 0, int32(b.w), int32(b.h))
gl.Clear(gl.STENCIL_BUFFER_BIT)
} | conditional_block |
gogl.go | , nil)
// todo should use gl.RED on OpenGL, gl.ALPHA on OpenGL ES
gl.Enable(gl.BLEND)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.Enable(gl.STENCIL_TEST)
gl.StencilMask(0xFF)
gl.Clear(gl.STENCIL_BUFFER_BIT)
gl.StencilOp(gl.KEEP, gl.KEEP, gl.KEEP)
gl.StencilFunc(gl.EQUAL, 0, 0xFF)
gl.Disable(gl.SCISSOR_TEST)
return ctx, nil
}
// GoGLBackend is a canvas backend using Go-GL
type GoGLBackend struct {
x, y, w, h int
fx, fy, fw, fh float64
*GLContext
activateFn func()
disableTextureRenderTarget func()
}
type offscreenBuffer struct {
tex uint32
w int
h int
renderStencilBuf uint32
frameBuf uint32
alpha bool
}
// New returns a new canvas backend. x, y, w, h define the target
// rectangle in the window. ctx is a GLContext created with
// NewGLContext, but can be nil for a default one. It makes sense
// to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func | (x, y, w, h int, ctx *GLContext) (*GoGLBackend, error) {
if ctx == nil {
var err error
ctx, err = NewGLContext()
if err != nil {
return nil, err
}
}
b := &GoGLBackend{
w: w,
h: h,
fw: float64(w),
fh: float64(h),
GLContext: ctx,
}
b.activateFn = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
// todo reapply clipping since another application may have used the stencil buffer
}
b.disableTextureRenderTarget = func() {
gl.BindFramebuffer(gl.FRAMEBUFFER, 0)
gl.Viewport(int32(b.x), int32(b.y), int32(b.w), int32(b.h))
}
return b, nil
}
// GoGLBackendOffscreen is a canvas backend using an offscreen
// texture
type GoGLBackendOffscreen struct {
GoGLBackend
TextureID uint32
offscrBuf offscreenBuffer
offscrImg Image
}
// NewOffscreen returns a new offscreen canvas backend. w, h define
// the size of the offscreen texture. ctx is a GLContext created
// with NewGLContext, but can be nil for a default one. It makes
// sense to pass one in when using for example an onscreen and an
// offscreen backend using the same GL context.
func NewOffscreen(w, h int, alpha bool, ctx *GLContext) (*GoGLBackendOffscreen, error) {
b, err := New(0, 0, w, h, ctx)
if err != nil {
return nil, err
}
bo := &GoGLBackendOffscreen{GoGLBackend: *b}
bo.offscrBuf.alpha = alpha
bo.offscrImg.flip = true
bo.activateFn = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
gl.Viewport(0, 0, int32(bo.w), int32(bo.h))
bo.offscrImg.w = bo.offscrBuf.w
bo.offscrImg.h = bo.offscrBuf.h
bo.offscrImg.tex = bo.offscrBuf.tex
bo.TextureID = bo.offscrBuf.tex
}
bo.disableTextureRenderTarget = func() {
bo.enableTextureRenderTarget(&bo.offscrBuf)
}
return bo, nil
}
// SetBounds updates the bounds of the canvas. This would
// usually be called for example when the window is resized
func (b *GoGLBackend) SetBounds(x, y, w, h int) {
b.x, b.y = x, y
b.fx, b.fy = float64(x), float64(y)
b.w, b.h = w, h
b.fw, b.fh = float64(w), float64(h)
if b == activeContext {
gl.Viewport(0, 0, int32(b.w), int32(b.h))
gl.Clear(gl.STENCIL_BUFFER_BIT)
}
}
// SetSize updates the size of the offscreen texture
func (b *GoGLBackendOffscreen) SetSize(w, h int) {
b.GoGLBackend.SetBounds(0, 0, w, h)
b.offscrImg.w = b.offscrBuf.w
b.offscrImg.h = b.offscrBuf.h
}
// Size returns the size of the window or offscreen
// texture
func (b *GoGLBackend) Size() (int, int) {
return b.w, b.h
}
func glError() error {
glErr := gl.GetError()
if glErr != gl.NO_ERROR {
return fmt.Errorf("GL Error: %x", glErr)
}
return nil
}
// Activate only needs to be called if there is other
// code also using the GL state
func (b *GoGLBackend) Activate() {
b.activate()
}
var activeContext *GoGLBackend
func (b *GoGLBackend) activate() {
if activeContext != b {
activeContext = b
b.activateFn()
}
}
// Delete deletes the offscreen texture. After calling this
// the backend can no longer be used
func (b *GoGLBackendOffscreen) Delete() {
gl.DeleteTextures(1, &b.offscrBuf.tex)
gl.DeleteFramebuffers(1, &b.offscrBuf.frameBuf)
gl.DeleteRenderbuffers(1, &b.offscrBuf.renderStencilBuf)
}
// CanUseAsImage returns true if the given backend can be
// directly used by this backend to avoid a conversion.
// Used internally
func (b *GoGLBackend) CanUseAsImage(b2 backendbase.Backend) bool {
_, ok := b2.(*GoGLBackendOffscreen)
return ok
}
// AsImage returns nil, since this backend cannot be directly
// used as an image. Used internally
func (b *GoGLBackend) AsImage() backendbase.Image {
return nil
}
// AsImage returns an implementation of the Image interface
// that can be used to render this offscreen texture
// directly. Used internally
func (b *GoGLBackendOffscreen) AsImage() backendbase.Image {
return &b.offscrImg
}
func (b *GoGLBackend) useShader(style *backendbase.FillStyle, tf [9]float32, useAlpha bool, alphaTexSlot int32) (vertexLoc, alphaTexCoordLoc uint32) {
gl.UseProgram(b.shd.ID)
gl.Uniform2f(b.shd.CanvasSize, float32(b.fw), float32(b.fh))
gl.UniformMatrix3fv(b.shd.Matrix, 1, false, &tf[0])
if useAlpha {
gl.Uniform1i(b.shd.UseAlphaTex, 1)
gl.Uniform1i(b.shd.AlphaTex, alphaTexSlot)
} else {
gl.Uniform1i(b.shd.UseAlphaTex, 0)
}
gl.Uniform1f(b.shd.GlobalAlpha, float32(style.Color.A)/255)
if lg := style.LinearGradient; lg != nil {
lg := lg.(*LinearGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, lg.tex)
from := backendbase.Vec{style.Gradient.X0, style.Gradient.Y0}
to := backendbase.Vec{style.Gradient.X1, style.Gradient.Y1}
dir := to.Sub(from)
length := dir.Len()
dir = dir.Mulf(1 / length)
gl.Uniform2f(b.shd.From, float32(from[0]), float32(from[1]))
gl.Uniform2f(b.shd.Dir, float32(dir[0]), float32(dir[1]))
gl.Uniform1f(b.shd.Len, float32(length))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncLinearGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if rg := style.RadialGradient; rg != nil {
rg := rg.(*RadialGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, rg.tex)
gl.Uniform2f(b.shd.From, float32(style.Gradient.X0), float32(style.Gradient.Y0))
gl.Uniform2f(b.shd.To, float32(style.Gradient.X1), float32(style.Gradient.Y1))
gl.Uniform1f(b.shd.RadFrom, float32(style.Gradient.RadFrom))
gl.Uniform1f(b.shd.RadTo, float32(style.Gradient.RadTo))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncRadialGradient)
return b.shd.Vertex, b.shd | New | identifier_name |
gogl.go | = w, h
b.fw, b.fh = float64(w), float64(h)
if b == activeContext {
gl.Viewport(0, 0, int32(b.w), int32(b.h))
gl.Clear(gl.STENCIL_BUFFER_BIT)
}
}
// SetSize updates the size of the offscreen texture
func (b *GoGLBackendOffscreen) SetSize(w, h int) {
b.GoGLBackend.SetBounds(0, 0, w, h)
b.offscrImg.w = b.offscrBuf.w
b.offscrImg.h = b.offscrBuf.h
}
// Size returns the size of the window or offscreen
// texture
func (b *GoGLBackend) Size() (int, int) {
return b.w, b.h
}
func glError() error {
glErr := gl.GetError()
if glErr != gl.NO_ERROR {
return fmt.Errorf("GL Error: %x", glErr)
}
return nil
}
// Activate only needs to be called if there is other
// code also using the GL state
func (b *GoGLBackend) Activate() {
b.activate()
}
var activeContext *GoGLBackend
func (b *GoGLBackend) activate() {
if activeContext != b {
activeContext = b
b.activateFn()
}
}
// Delete deletes the offscreen texture. After calling this
// the backend can no longer be used
func (b *GoGLBackendOffscreen) Delete() {
gl.DeleteTextures(1, &b.offscrBuf.tex)
gl.DeleteFramebuffers(1, &b.offscrBuf.frameBuf)
gl.DeleteRenderbuffers(1, &b.offscrBuf.renderStencilBuf)
}
// CanUseAsImage returns true if the given backend can be
// directly used by this backend to avoid a conversion.
// Used internally
func (b *GoGLBackend) CanUseAsImage(b2 backendbase.Backend) bool {
_, ok := b2.(*GoGLBackendOffscreen)
return ok
}
// AsImage returns nil, since this backend cannot be directly
// used as an image. Used internally
func (b *GoGLBackend) AsImage() backendbase.Image {
return nil
}
// AsImage returns an implementation of the Image interface
// that can be used to render this offscreen texture
// directly. Used internally
func (b *GoGLBackendOffscreen) AsImage() backendbase.Image {
return &b.offscrImg
}
func (b *GoGLBackend) useShader(style *backendbase.FillStyle, tf [9]float32, useAlpha bool, alphaTexSlot int32) (vertexLoc, alphaTexCoordLoc uint32) {
gl.UseProgram(b.shd.ID)
gl.Uniform2f(b.shd.CanvasSize, float32(b.fw), float32(b.fh))
gl.UniformMatrix3fv(b.shd.Matrix, 1, false, &tf[0])
if useAlpha {
gl.Uniform1i(b.shd.UseAlphaTex, 1)
gl.Uniform1i(b.shd.AlphaTex, alphaTexSlot)
} else {
gl.Uniform1i(b.shd.UseAlphaTex, 0)
}
gl.Uniform1f(b.shd.GlobalAlpha, float32(style.Color.A)/255)
if lg := style.LinearGradient; lg != nil {
lg := lg.(*LinearGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, lg.tex)
from := backendbase.Vec{style.Gradient.X0, style.Gradient.Y0}
to := backendbase.Vec{style.Gradient.X1, style.Gradient.Y1}
dir := to.Sub(from)
length := dir.Len()
dir = dir.Mulf(1 / length)
gl.Uniform2f(b.shd.From, float32(from[0]), float32(from[1]))
gl.Uniform2f(b.shd.Dir, float32(dir[0]), float32(dir[1]))
gl.Uniform1f(b.shd.Len, float32(length))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncLinearGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if rg := style.RadialGradient; rg != nil {
rg := rg.(*RadialGradient)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, rg.tex)
gl.Uniform2f(b.shd.From, float32(style.Gradient.X0), float32(style.Gradient.Y0))
gl.Uniform2f(b.shd.To, float32(style.Gradient.X1), float32(style.Gradient.Y1))
gl.Uniform1f(b.shd.RadFrom, float32(style.Gradient.RadFrom))
gl.Uniform1f(b.shd.RadTo, float32(style.Gradient.RadTo))
gl.Uniform1i(b.shd.Gradient, 0)
gl.Uniform1i(b.shd.Func, shdFuncRadialGradient)
return b.shd.Vertex, b.shd.TexCoord
}
if ip := style.ImagePattern; ip != nil {
ipd := ip.(*ImagePattern).data
img := ipd.Image.(*Image)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, img.tex)
gl.Uniform2f(b.shd.ImageSize, float32(img.w), float32(img.h))
gl.Uniform1i(b.shd.Image, 0)
var f32mat [9]float32
for i, v := range ipd.Transform {
f32mat[i] = float32(v)
}
gl.UniformMatrix3fv(b.shd.ImageTransform, 1, false, &f32mat[0])
switch ipd.Repeat {
case backendbase.Repeat:
gl.Uniform2f(b.shd.Repeat, 1, 1)
case backendbase.RepeatX:
gl.Uniform2f(b.shd.Repeat, 1, 0)
case backendbase.RepeatY:
gl.Uniform2f(b.shd.Repeat, 0, 1)
case backendbase.NoRepeat:
gl.Uniform2f(b.shd.Repeat, 0, 0)
}
gl.Uniform1i(b.shd.Func, shdFuncImagePattern)
return b.shd.Vertex, b.shd.TexCoord
}
cr := float32(style.Color.R) / 255
cg := float32(style.Color.G) / 255
cb := float32(style.Color.B) / 255
ca := float32(style.Color.A) / 255
gl.Uniform4f(b.shd.Color, cr, cg, cb, ca)
gl.Uniform1f(b.shd.GlobalAlpha, 1)
gl.Uniform1i(b.shd.Func, shdFuncSolid)
return b.shd.Vertex, b.shd.TexCoord
}
func (b *GoGLBackend) enableTextureRenderTarget(offscr *offscreenBuffer) {
if offscr.w == b.w && offscr.h == b.h {
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
return
}
if b.w == 0 || b.h == 0 {
return
}
if offscr.w != 0 && offscr.h != 0 {
gl.DeleteTextures(1, &offscr.tex)
gl.DeleteFramebuffers(1, &offscr.frameBuf)
gl.DeleteRenderbuffers(1, &offscr.renderStencilBuf)
}
offscr.w = b.w
offscr.h = b.h
gl.ActiveTexture(gl.TEXTURE0)
gl.GenTextures(1, &offscr.tex)
gl.BindTexture(gl.TEXTURE_2D, offscr.tex)
// todo do non-power-of-two textures work everywhere?
if offscr.alpha {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, int32(b.w), int32(b.h), 0, gl.RGBA, gl.UNSIGNED_BYTE, nil)
} else {
gl.TexImage2D(gl.TEXTURE_2D, 0, gl.RGB, int32(b.w), int32(b.h), 0, gl.RGB, gl.UNSIGNED_BYTE, nil)
}
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
gl.GenFramebuffers(1, &offscr.frameBuf)
gl.BindFramebuffer(gl.FRAMEBUFFER, offscr.frameBuf)
gl.GenRenderbuffers(1, &offscr.renderStencilBuf)
gl.BindRenderbuffer(gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.RenderbufferStorage(gl.RENDERBUFFER, gl.STENCIL_INDEX8, int32(b.w), int32(b.h))
gl.FramebufferRenderbuffer(gl.FRAMEBUFFER, gl.STENCIL_ATTACHMENT, gl.RENDERBUFFER, offscr.renderStencilBuf)
gl.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, offscr.tex, 0) | random_line_split | ||
main-ipc.ts | the window
*/
ipc.on('minimize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().minimize();
}
});
/**
* Open the explorer to the relevant file
*/
ipc.on('open-in-explorer', (event, fullPath: string) => {
shell.showItemInFolder(fullPath);
});
/**
* Open a URL in system's default browser
*/
ipc.on('please-open-url', (event, urlToOpen: string): void => {
shell.openExternal(urlToOpen, { activate: true });
});
/**
* Maximize the window
*/
ipc.on('maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().maximize();
}
});
/**
* Open a particular video file clicked inside Angular
*/
ipc.on('open-media-file', (event, fullFilePath) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
shell.openItem(path.normalize(fullFilePath)); // normalize because on windows, the path sometimes is mixing `\` and `/`
// shell.openPath(path.normalize(fullFilePath)); // Electron 9
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Open a particular video file clicked inside Angular at particular timestamp
*/
ipc.on('open-media-file-at-timestamp', (event, executablePath, fullFilePath: string, args: string) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
const cmdline: string = `"${path.normalize(executablePath)}" "${path.normalize(fullFilePath)}" ${args}`;
console.log(cmdline);
exec(cmdline);
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Handle dragging a file out of VHA into a video editor (e.g. Vegas or Premiere)
*/
ipc.on('drag-video-out-of-electron', (event, filePath): void => {
console.log(filePath);
event.sender.startDrag({
file: filePath,
icon: './src/assets/logo.png'
});
});
/**
* Select default video player
*/
ipc.on('select-default-video-player', (event) => {
console.log('asking for default video player');
dialog.showOpenDialog(win, {
title: systemMessages.selectDefaultPlayer, // TODO: check if errors out now that this is in `main-ipc.ts`
filters: [
{
name: 'Executable', // TODO: i18n fixme
extensions: ['exe', 'app']
}, {
name: 'All files', // TODO: i18n fixme
extensions: ['*']
}
],
properties: ['openFile']
}).then(result => {
const executablePath: string = result.filePaths[0];
if (executablePath) {
event.sender.send('preferred-video-player-returning', executablePath);
}
}).catch(err => {});
});
/**
* Create and play the playlist
* 1. filter out *FOLDER*
* 2. save .pls file
* 3. ask OS to open the .pls file
*/
ipc.on('please-create-playlist', (event, playlist: ImageElement[], sourceFolderMap: InputSources, execPath: string) => {
const cleanPlaylist: ImageElement[] = playlist.filter((element: ImageElement) => {
return element.cleanName !== '*FOLDER*';
});
const savePath: string = path.join(GLOBALS.settingsPath, 'temp.pls');
if (cleanPlaylist.length) {
createDotPlsFile(savePath, cleanPlaylist, sourceFolderMap, () => {
if (execPath) { // if `preferredVideoPlayer` is sent
const cmdline: string = `"${path.normalize(execPath)}" "${path.normalize(savePath)}"`;
console.log(cmdline);
exec(cmdline);
} else {
shell.openItem(savePath);
// shell.openPath(savePath); // Electron 9
}
});
}
});
/**
* Delete file from computer (send to recycling bin / trash) or dangerously delete (bypass trash)
*/
ipc.on('delete-video-file', (event, basePath: string, item: ImageElement, dangerousDelete: boolean): void => {
const fileToDelete = path.join(basePath, item.partialPath, item.fileName);
if (dangerousDelete) {
fs.unlink(fileToDelete, (err) => {
if (err) {
console.log('ERROR:', fileToDelete + ' was NOT deleted');
} else {
notifyFileDeleted(event, fileToDelete, item);
}
});
} else {
(async () => {
await trash(fileToDelete);
notifyFileDeleted(event, fileToDelete, item);
})();
}
});
/**
* Helper function for `delete-video-file`
* @param event
* @param fileToDelete
* @param item
*/
function notifyFileDeleted(event, fileToDelete, item) |
/**
* Method to replace thumbnail of a particular item
*/
ipc.on('replace-thumbnail', (event, pathToIncomingJpg: string, item: ImageElement) => {
const fileToReplace: string = path.join(
GLOBALS.selectedOutputFolder,
'vha-' + GLOBALS.hubName,
'thumbnails',
item.hash + '.jpg'
);
const height: number = GLOBALS.screenshotSettings.height;
replaceThumbnailWithNewImage(fileToReplace, pathToIncomingJpg, height)
.then(success => {
if (success) {
event.sender.send('thumbnail-replaced');
}
})
.catch((err) => {});
});
/**
* Summon system modal to choose INPUT directory
* where all the videos are located
*/
ipc.on('choose-input', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('input-folder-chosen', inputDirPath);
}
}).catch(err => {});
});
/**
* Summon system modal to choose NEW input directory for a now-disconnected folder
* where all the videos are located
*/
ipc.on('reconnect-this-folder', (event, inputSource: number) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('old-folder-reconnected', inputSource, inputDirPath);
}
}).catch(err => {});
});
/**
* Stop watching a particular folder
*/
ipc.on('stop-watching-folder', (event, watchedFolderIndex: number) => {
console.log('stop watching:', watchedFolderIndex);
closeWatcher(watchedFolderIndex);
});
/**
* Stop watching a particular folder
*/
ipc.on('start-watching-folder', (event, watchedFolderIndex: string, path: string, persistent: boolean) => {
// annoyingly it's not a number : ^^^^^^^^^^^^^^^^^^ -- because object keys are strings :(
console.log('start watching:', watchedFolderIndex, path, persistent);
startWatcher(parseInt(watchedFolderIndex, 10), path, persistent);
});
/**
* extract any missing thumbnails
*/
ipc.on('add-missing-thumbnails', (event, finalArray: ImageElement[], extractClips: boolean) => {
extractAnyMissingThumbs(finalArray);
});
/**
* Remove any thumbnails for files no longer present in the hub
*/
ipc.on('clean-old-thumbnails', (event, finalArray: ImageElement[]) => {
// !!! WARNING
const screenshotOutputFolder: string = path.join(GLOBALS.selectedOutputFolder, 'vha-' + GLOBALS.hubName);
// !! ^^^^^^^^^^^^^^^^^^^^^^ - make sure this points to the folder with screenshots only!
const allHashes: Map<string, 1> = new Map();
finalArray
.filter((element: ImageElement) => { return !element.deleted })
.forEach((element: ImageElement) => {
allHashes.set(element.hash, 1);
});
removeThumbnailsNotInHub(allHashes, screenshotOutputFolder); // WARNING !!! this function will delete stuff
});
/**
* Summon system modal to choose OUTPUT directory
* where the final .vha2 file, vha-folder, and all screenshots will be saved
*/
ipc.on('choose-output', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const outputDirPath: string = result.filePaths[0];
if (outputDirPath) {
event.sender.send('output-folder-chosen', outputDirPath);
}
}).catch(err => {});
});
| {
fs.access(fileToDelete, fs.constants.F_OK, (err: any) => {
if (err) {
console.log('FILE DELETED SUCCESS !!!')
event.sender.send('file-deleted', item);
}
});
} | identifier_body |
main-ipc.ts | imize the window
*/
ipc.on('minimize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().minimize();
}
});
/**
* Open the explorer to the relevant file
*/
ipc.on('open-in-explorer', (event, fullPath: string) => {
shell.showItemInFolder(fullPath);
});
/**
* Open a URL in system's default browser
*/
ipc.on('please-open-url', (event, urlToOpen: string): void => {
shell.openExternal(urlToOpen, { activate: true });
});
/**
* Maximize the window
*/
ipc.on('maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().maximize();
}
});
/**
* Open a particular video file clicked inside Angular
*/
ipc.on('open-media-file', (event, fullFilePath) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
shell.openItem(path.normalize(fullFilePath)); // normalize because on windows, the path sometimes is mixing `\` and `/`
// shell.openPath(path.normalize(fullFilePath)); // Electron 9
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Open a particular video file clicked inside Angular at particular timestamp
*/
ipc.on('open-media-file-at-timestamp', (event, executablePath, fullFilePath: string, args: string) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
const cmdline: string = `"${path.normalize(executablePath)}" "${path.normalize(fullFilePath)}" ${args}`;
console.log(cmdline);
exec(cmdline);
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Handle dragging a file out of VHA into a video editor (e.g. Vegas or Premiere)
*/
ipc.on('drag-video-out-of-electron', (event, filePath): void => {
console.log(filePath);
event.sender.startDrag({
file: filePath,
icon: './src/assets/logo.png'
});
});
/**
* Select default video player
*/
ipc.on('select-default-video-player', (event) => {
console.log('asking for default video player');
dialog.showOpenDialog(win, {
title: systemMessages.selectDefaultPlayer, // TODO: check if errors out now that this is in `main-ipc.ts`
filters: [
{
name: 'Executable', // TODO: i18n fixme
extensions: ['exe', 'app']
}, {
name: 'All files', // TODO: i18n fixme
extensions: ['*']
}
],
properties: ['openFile']
}).then(result => {
const executablePath: string = result.filePaths[0];
if (executablePath) {
event.sender.send('preferred-video-player-returning', executablePath);
}
}).catch(err => {});
});
/**
* Create and play the playlist
* 1. filter out *FOLDER*
* 2. save .pls file
* 3. ask OS to open the .pls file
*/
ipc.on('please-create-playlist', (event, playlist: ImageElement[], sourceFolderMap: InputSources, execPath: string) => {
const cleanPlaylist: ImageElement[] = playlist.filter((element: ImageElement) => {
return element.cleanName !== '*FOLDER*';
});
const savePath: string = path.join(GLOBALS.settingsPath, 'temp.pls');
if (cleanPlaylist.length) {
createDotPlsFile(savePath, cleanPlaylist, sourceFolderMap, () => {
if (execPath) { // if `preferredVideoPlayer` is sent
const cmdline: string = `"${path.normalize(execPath)}" "${path.normalize(savePath)}"`;
console.log(cmdline);
exec(cmdline);
} else {
shell.openItem(savePath);
// shell.openPath(savePath); // Electron 9
}
});
}
});
/**
* Delete file from computer (send to recycling bin / trash) or dangerously delete (bypass trash)
*/
ipc.on('delete-video-file', (event, basePath: string, item: ImageElement, dangerousDelete: boolean): void => {
const fileToDelete = path.join(basePath, item.partialPath, item.fileName);
if (dangerousDelete) {
fs.unlink(fileToDelete, (err) => {
if (err) {
console.log('ERROR:', fileToDelete + ' was NOT deleted');
} else {
notifyFileDeleted(event, fileToDelete, item);
}
});
} else {
(async () => {
await trash(fileToDelete);
notifyFileDeleted(event, fileToDelete, item);
})();
}
});
/**
* Helper function for `delete-video-file`
* @param event
* @param fileToDelete
* @param item
*/
function notifyFileDeleted(event, fileToDelete, item) {
fs.access(fileToDelete, fs.constants.F_OK, (err: any) => {
if (err) {
console.log('FILE DELETED SUCCESS !!!')
event.sender.send('file-deleted', item); | * Method to replace thumbnail of a particular item
*/
ipc.on('replace-thumbnail', (event, pathToIncomingJpg: string, item: ImageElement) => {
const fileToReplace: string = path.join(
GLOBALS.selectedOutputFolder,
'vha-' + GLOBALS.hubName,
'thumbnails',
item.hash + '.jpg'
);
const height: number = GLOBALS.screenshotSettings.height;
replaceThumbnailWithNewImage(fileToReplace, pathToIncomingJpg, height)
.then(success => {
if (success) {
event.sender.send('thumbnail-replaced');
}
})
.catch((err) => {});
});
/**
* Summon system modal to choose INPUT directory
* where all the videos are located
*/
ipc.on('choose-input', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('input-folder-chosen', inputDirPath);
}
}).catch(err => {});
});
/**
* Summon system modal to choose NEW input directory for a now-disconnected folder
* where all the videos are located
*/
ipc.on('reconnect-this-folder', (event, inputSource: number) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('old-folder-reconnected', inputSource, inputDirPath);
}
}).catch(err => {});
});
/**
* Stop watching a particular folder
*/
ipc.on('stop-watching-folder', (event, watchedFolderIndex: number) => {
console.log('stop watching:', watchedFolderIndex);
closeWatcher(watchedFolderIndex);
});
/**
* Stop watching a particular folder
*/
ipc.on('start-watching-folder', (event, watchedFolderIndex: string, path: string, persistent: boolean) => {
// annoyingly it's not a number : ^^^^^^^^^^^^^^^^^^ -- because object keys are strings :(
console.log('start watching:', watchedFolderIndex, path, persistent);
startWatcher(parseInt(watchedFolderIndex, 10), path, persistent);
});
/**
* extract any missing thumbnails
*/
ipc.on('add-missing-thumbnails', (event, finalArray: ImageElement[], extractClips: boolean) => {
extractAnyMissingThumbs(finalArray);
});
/**
* Remove any thumbnails for files no longer present in the hub
*/
ipc.on('clean-old-thumbnails', (event, finalArray: ImageElement[]) => {
// !!! WARNING
const screenshotOutputFolder: string = path.join(GLOBALS.selectedOutputFolder, 'vha-' + GLOBALS.hubName);
// !! ^^^^^^^^^^^^^^^^^^^^^^ - make sure this points to the folder with screenshots only!
const allHashes: Map<string, 1> = new Map();
finalArray
.filter((element: ImageElement) => { return !element.deleted })
.forEach((element: ImageElement) => {
allHashes.set(element.hash, 1);
});
removeThumbnailsNotInHub(allHashes, screenshotOutputFolder); // WARNING !!! this function will delete stuff
});
/**
* Summon system modal to choose OUTPUT directory
* where the final .vha2 file, vha-folder, and all screenshots will be saved
*/
ipc.on('choose-output', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const outputDirPath: string = result.filePaths[0];
if (outputDirPath) {
event.sender.send('output-folder-chosen', outputDirPath);
}
}).catch(err => {});
});
| }
});
}
/** | random_line_split |
main-ipc.ts | the window
*/
ipc.on('minimize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().minimize();
}
});
/**
* Open the explorer to the relevant file
*/
ipc.on('open-in-explorer', (event, fullPath: string) => {
shell.showItemInFolder(fullPath);
});
/**
* Open a URL in system's default browser
*/
ipc.on('please-open-url', (event, urlToOpen: string): void => {
shell.openExternal(urlToOpen, { activate: true });
});
/**
* Maximize the window
*/
ipc.on('maximize-window', (event) => {
if (BrowserWindow.getFocusedWindow()) {
BrowserWindow.getFocusedWindow().maximize();
}
});
/**
* Open a particular video file clicked inside Angular
*/
ipc.on('open-media-file', (event, fullFilePath) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
shell.openItem(path.normalize(fullFilePath)); // normalize because on windows, the path sometimes is mixing `\` and `/`
// shell.openPath(path.normalize(fullFilePath)); // Electron 9
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Open a particular video file clicked inside Angular at particular timestamp
*/
ipc.on('open-media-file-at-timestamp', (event, executablePath, fullFilePath: string, args: string) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
const cmdline: string = `"${path.normalize(executablePath)}" "${path.normalize(fullFilePath)}" ${args}`;
console.log(cmdline);
exec(cmdline);
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Handle dragging a file out of VHA into a video editor (e.g. Vegas or Premiere)
*/
ipc.on('drag-video-out-of-electron', (event, filePath): void => {
console.log(filePath);
event.sender.startDrag({
file: filePath,
icon: './src/assets/logo.png'
});
});
/**
* Select default video player
*/
ipc.on('select-default-video-player', (event) => {
console.log('asking for default video player');
dialog.showOpenDialog(win, {
title: systemMessages.selectDefaultPlayer, // TODO: check if errors out now that this is in `main-ipc.ts`
filters: [
{
name: 'Executable', // TODO: i18n fixme
extensions: ['exe', 'app']
}, {
name: 'All files', // TODO: i18n fixme
extensions: ['*']
}
],
properties: ['openFile']
}).then(result => {
const executablePath: string = result.filePaths[0];
if (executablePath) {
event.sender.send('preferred-video-player-returning', executablePath);
}
}).catch(err => {});
});
/**
* Create and play the playlist
* 1. filter out *FOLDER*
* 2. save .pls file
* 3. ask OS to open the .pls file
*/
ipc.on('please-create-playlist', (event, playlist: ImageElement[], sourceFolderMap: InputSources, execPath: string) => {
const cleanPlaylist: ImageElement[] = playlist.filter((element: ImageElement) => {
return element.cleanName !== '*FOLDER*';
});
const savePath: string = path.join(GLOBALS.settingsPath, 'temp.pls');
if (cleanPlaylist.length) {
createDotPlsFile(savePath, cleanPlaylist, sourceFolderMap, () => {
if (execPath) { // if `preferredVideoPlayer` is sent
const cmdline: string = `"${path.normalize(execPath)}" "${path.normalize(savePath)}"`;
console.log(cmdline);
exec(cmdline);
} else {
shell.openItem(savePath);
// shell.openPath(savePath); // Electron 9
}
});
}
});
/**
* Delete file from computer (send to recycling bin / trash) or dangerously delete (bypass trash)
*/
ipc.on('delete-video-file', (event, basePath: string, item: ImageElement, dangerousDelete: boolean): void => {
const fileToDelete = path.join(basePath, item.partialPath, item.fileName);
if (dangerousDelete) {
fs.unlink(fileToDelete, (err) => {
if (err) {
console.log('ERROR:', fileToDelete + ' was NOT deleted');
} else {
notifyFileDeleted(event, fileToDelete, item);
}
});
} else {
(async () => {
await trash(fileToDelete);
notifyFileDeleted(event, fileToDelete, item);
})();
}
});
/**
* Helper function for `delete-video-file`
* @param event
* @param fileToDelete
* @param item
*/
function | (event, fileToDelete, item) {
fs.access(fileToDelete, fs.constants.F_OK, (err: any) => {
if (err) {
console.log('FILE DELETED SUCCESS !!!')
event.sender.send('file-deleted', item);
}
});
}
/**
* Method to replace thumbnail of a particular item
*/
ipc.on('replace-thumbnail', (event, pathToIncomingJpg: string, item: ImageElement) => {
const fileToReplace: string = path.join(
GLOBALS.selectedOutputFolder,
'vha-' + GLOBALS.hubName,
'thumbnails',
item.hash + '.jpg'
);
const height: number = GLOBALS.screenshotSettings.height;
replaceThumbnailWithNewImage(fileToReplace, pathToIncomingJpg, height)
.then(success => {
if (success) {
event.sender.send('thumbnail-replaced');
}
})
.catch((err) => {});
});
/**
* Summon system modal to choose INPUT directory
* where all the videos are located
*/
ipc.on('choose-input', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('input-folder-chosen', inputDirPath);
}
}).catch(err => {});
});
/**
* Summon system modal to choose NEW input directory for a now-disconnected folder
* where all the videos are located
*/
ipc.on('reconnect-this-folder', (event, inputSource: number) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('old-folder-reconnected', inputSource, inputDirPath);
}
}).catch(err => {});
});
/**
* Stop watching a particular folder
*/
ipc.on('stop-watching-folder', (event, watchedFolderIndex: number) => {
console.log('stop watching:', watchedFolderIndex);
closeWatcher(watchedFolderIndex);
});
/**
* Stop watching a particular folder
*/
ipc.on('start-watching-folder', (event, watchedFolderIndex: string, path: string, persistent: boolean) => {
// annoyingly it's not a number : ^^^^^^^^^^^^^^^^^^ -- because object keys are strings :(
console.log('start watching:', watchedFolderIndex, path, persistent);
startWatcher(parseInt(watchedFolderIndex, 10), path, persistent);
});
/**
* extract any missing thumbnails
*/
ipc.on('add-missing-thumbnails', (event, finalArray: ImageElement[], extractClips: boolean) => {
extractAnyMissingThumbs(finalArray);
});
/**
* Remove any thumbnails for files no longer present in the hub
*/
ipc.on('clean-old-thumbnails', (event, finalArray: ImageElement[]) => {
// !!! WARNING
const screenshotOutputFolder: string = path.join(GLOBALS.selectedOutputFolder, 'vha-' + GLOBALS.hubName);
// !! ^^^^^^^^^^^^^^^^^^^^^^ - make sure this points to the folder with screenshots only!
const allHashes: Map<string, 1> = new Map();
finalArray
.filter((element: ImageElement) => { return !element.deleted })
.forEach((element: ImageElement) => {
allHashes.set(element.hash, 1);
});
removeThumbnailsNotInHub(allHashes, screenshotOutputFolder); // WARNING !!! this function will delete stuff
});
/**
* Summon system modal to choose OUTPUT directory
* where the final .vha2 file, vha-folder, and all screenshots will be saved
*/
ipc.on('choose-output', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const outputDirPath: string = result.filePaths[0];
if (outputDirPath) {
event.sender.send('output-folder-chosen', outputDirPath);
}
}).catch(err => {});
});
| notifyFileDeleted | identifier_name |
main-ipc.ts | because on windows, the path sometimes is mixing `\` and `/`
// shell.openPath(path.normalize(fullFilePath)); // Electron 9
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Open a particular video file clicked inside Angular at particular timestamp
*/
ipc.on('open-media-file-at-timestamp', (event, executablePath, fullFilePath: string, args: string) => {
fs.access(fullFilePath, fs.constants.F_OK, (err: any) => {
if (!err) {
const cmdline: string = `"${path.normalize(executablePath)}" "${path.normalize(fullFilePath)}" ${args}`;
console.log(cmdline);
exec(cmdline);
} else {
event.sender.send('file-not-found');
}
});
});
/**
* Handle dragging a file out of VHA into a video editor (e.g. Vegas or Premiere)
*/
ipc.on('drag-video-out-of-electron', (event, filePath): void => {
console.log(filePath);
event.sender.startDrag({
file: filePath,
icon: './src/assets/logo.png'
});
});
/**
* Select default video player
*/
ipc.on('select-default-video-player', (event) => {
console.log('asking for default video player');
dialog.showOpenDialog(win, {
title: systemMessages.selectDefaultPlayer, // TODO: check if errors out now that this is in `main-ipc.ts`
filters: [
{
name: 'Executable', // TODO: i18n fixme
extensions: ['exe', 'app']
}, {
name: 'All files', // TODO: i18n fixme
extensions: ['*']
}
],
properties: ['openFile']
}).then(result => {
const executablePath: string = result.filePaths[0];
if (executablePath) {
event.sender.send('preferred-video-player-returning', executablePath);
}
}).catch(err => {});
});
/**
* Create and play the playlist
* 1. filter out *FOLDER*
* 2. save .pls file
* 3. ask OS to open the .pls file
*/
ipc.on('please-create-playlist', (event, playlist: ImageElement[], sourceFolderMap: InputSources, execPath: string) => {
const cleanPlaylist: ImageElement[] = playlist.filter((element: ImageElement) => {
return element.cleanName !== '*FOLDER*';
});
const savePath: string = path.join(GLOBALS.settingsPath, 'temp.pls');
if (cleanPlaylist.length) {
createDotPlsFile(savePath, cleanPlaylist, sourceFolderMap, () => {
if (execPath) { // if `preferredVideoPlayer` is sent
const cmdline: string = `"${path.normalize(execPath)}" "${path.normalize(savePath)}"`;
console.log(cmdline);
exec(cmdline);
} else {
shell.openItem(savePath);
// shell.openPath(savePath); // Electron 9
}
});
}
});
/**
* Delete file from computer (send to recycling bin / trash) or dangerously delete (bypass trash)
*/
ipc.on('delete-video-file', (event, basePath: string, item: ImageElement, dangerousDelete: boolean): void => {
const fileToDelete = path.join(basePath, item.partialPath, item.fileName);
if (dangerousDelete) {
fs.unlink(fileToDelete, (err) => {
if (err) {
console.log('ERROR:', fileToDelete + ' was NOT deleted');
} else {
notifyFileDeleted(event, fileToDelete, item);
}
});
} else {
(async () => {
await trash(fileToDelete);
notifyFileDeleted(event, fileToDelete, item);
})();
}
});
/**
* Helper function for `delete-video-file`
* @param event
* @param fileToDelete
* @param item
*/
function notifyFileDeleted(event, fileToDelete, item) {
fs.access(fileToDelete, fs.constants.F_OK, (err: any) => {
if (err) {
console.log('FILE DELETED SUCCESS !!!')
event.sender.send('file-deleted', item);
}
});
}
/**
* Method to replace thumbnail of a particular item
*/
ipc.on('replace-thumbnail', (event, pathToIncomingJpg: string, item: ImageElement) => {
const fileToReplace: string = path.join(
GLOBALS.selectedOutputFolder,
'vha-' + GLOBALS.hubName,
'thumbnails',
item.hash + '.jpg'
);
const height: number = GLOBALS.screenshotSettings.height;
replaceThumbnailWithNewImage(fileToReplace, pathToIncomingJpg, height)
.then(success => {
if (success) {
event.sender.send('thumbnail-replaced');
}
})
.catch((err) => {});
});
/**
* Summon system modal to choose INPUT directory
* where all the videos are located
*/
ipc.on('choose-input', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('input-folder-chosen', inputDirPath);
}
}).catch(err => {});
});
/**
* Summon system modal to choose NEW input directory for a now-disconnected folder
* where all the videos are located
*/
ipc.on('reconnect-this-folder', (event, inputSource: number) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const inputDirPath: string = result.filePaths[0];
if (inputDirPath) {
event.sender.send('old-folder-reconnected', inputSource, inputDirPath);
}
}).catch(err => {});
});
/**
* Stop watching a particular folder
*/
ipc.on('stop-watching-folder', (event, watchedFolderIndex: number) => {
console.log('stop watching:', watchedFolderIndex);
closeWatcher(watchedFolderIndex);
});
/**
* Stop watching a particular folder
*/
ipc.on('start-watching-folder', (event, watchedFolderIndex: string, path: string, persistent: boolean) => {
// annoyingly it's not a number : ^^^^^^^^^^^^^^^^^^ -- because object keys are strings :(
console.log('start watching:', watchedFolderIndex, path, persistent);
startWatcher(parseInt(watchedFolderIndex, 10), path, persistent);
});
/**
* extract any missing thumbnails
*/
ipc.on('add-missing-thumbnails', (event, finalArray: ImageElement[], extractClips: boolean) => {
extractAnyMissingThumbs(finalArray);
});
/**
* Remove any thumbnails for files no longer present in the hub
*/
ipc.on('clean-old-thumbnails', (event, finalArray: ImageElement[]) => {
// !!! WARNING
const screenshotOutputFolder: string = path.join(GLOBALS.selectedOutputFolder, 'vha-' + GLOBALS.hubName);
// !! ^^^^^^^^^^^^^^^^^^^^^^ - make sure this points to the folder with screenshots only!
const allHashes: Map<string, 1> = new Map();
finalArray
.filter((element: ImageElement) => { return !element.deleted })
.forEach((element: ImageElement) => {
allHashes.set(element.hash, 1);
});
removeThumbnailsNotInHub(allHashes, screenshotOutputFolder); // WARNING !!! this function will delete stuff
});
/**
* Summon system modal to choose OUTPUT directory
* where the final .vha2 file, vha-folder, and all screenshots will be saved
*/
ipc.on('choose-output', (event) => {
dialog.showOpenDialog(win, {
properties: ['openDirectory']
}).then(result => {
const outputDirPath: string = result.filePaths[0];
if (outputDirPath) {
event.sender.send('output-folder-chosen', outputDirPath);
}
}).catch(err => {});
});
/**
* Try to rename the particular file
*/
ipc.on('try-to-rename-this-file', (event, sourceFolder: string, relPath: string, file: string, renameTo: string, index: number): void => {
console.log('renaming file:');
const original: string = path.join(sourceFolder, relPath, file);
const newName: string = path.join(sourceFolder, relPath, renameTo);
console.log(original);
console.log(newName);
let success = true;
let errMsg: string;
// check if already exists first
if (fs.existsSync(newName)) {
console.log('some file already EXISTS WITH THAT NAME !!!');
success = false;
errMsg = 'RIGHTCLICK.errorFileNameExists';
} else {
try {
fs.renameSync(original, newName);
} catch (err) {
success = false;
console.log(err);
if (err.code === 'ENOENT') {
// const pathObj = path.parse(err.path);
// console.log(pathObj);
errMsg = 'RIGHTCLICK.errorFileNotFound';
} else | {
errMsg = 'RIGHTCLICK.errorSomeError';
} | conditional_block | |
helpers.go | .ParseCIDR(network.PodCIDR())
if err != nil {
dPodcidr = nil
}
_, dServicecidr, err = net.ParseCIDR(network.ServiceCIDR())
if err != nil {
dServicecidr = nil
}
dhostPrefix, _ = network.GetHostPrefix()
return dMachinecidr, dPodcidr, dServicecidr, dhostPrefix, computeInstanceType
}
func (c *Client) LogEvent(key string, body map[string]string) {
event, err := cmv1.NewEvent().Key(key).Body(body).Build()
if err == nil {
_, _ = c.ocm.ClustersMgmt().V1().
Events().
Add().
Body(event).
Send()
}
}
func (c *Client) GetCurrentAccount() (*amsv1.Account, error) {
response, err := c.ocm.AccountsMgmt().V1().
CurrentAccount().
Get().
Send()
if err != nil {
if response.Status() == http.StatusNotFound {
return nil, nil
}
return nil, handleErr(response.Error(), err)
}
return response.Body(), nil
}
func (c *Client) GetCurrentOrganization() (id string, externalID string, err error) {
acctResponse, err := c.GetCurrentAccount()
if err != nil {
return
}
id = acctResponse.Organization().ID() | func (c *Client) IsCapabilityEnabled(capability string) (enabled bool, err error) {
organizationID, _, err := c.GetCurrentOrganization()
if err != nil {
return
}
isCapabilityEnable, err := c.isCapabilityEnabled(capability, organizationID)
if err != nil {
return
}
if !isCapabilityEnable {
return false, nil
}
return true, nil
}
func (c *Client) isCapabilityEnabled(capabilityName string, orgID string) (bool, error) {
capabilityResponse, err := c.ocm.AccountsMgmt().V1().Organizations().
Organization(orgID).Get().Parameter("fetchCapabilities", true).Send()
if err != nil {
return false, handleErr(capabilityResponse.Error(), err)
}
if len(capabilityResponse.Body().Capabilities()) > 0 {
for _, capability := range capabilityResponse.Body().Capabilities() {
if capability.Name() == capabilityName {
return capability.Value() == "true", nil
}
}
}
return false, nil
}
func (c *Client) UnlinkUserRoleFromAccount(accountID string, roleARN string) error {
linkedRoles, err := c.GetAccountLinkedUserRoles(accountID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(USERRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role ARN '%s' is not linked with the current account '%s'", roleARN, accountID)
}
func (c *Client) LinkAccountRole(accountID string, roleARN string) error {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels("sts_user_role").Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return errors.Forbidden.UserErrorf("%v", err)
}
return handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
if value == roleARN {
exists = true
break
}
}
}
}
if exists {
return nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key("sts_user_role").Value(roleARN).Build()
if err != nil {
return err
}
_, err = c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
return err
}
func (c *Client) UnlinkOCMRoleFromOrg(orgID string, roleARN string) error {
linkedRoles, err := c.GetOrganizationLinkedOCMRoles(orgID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role-arn '%s' is not linked with the organization account '%s'", roleARN, orgID)
}
func (c *Client) LinkOrgToRole(orgID string, roleARN string) (bool, error) {
parsedARN, err := arn.Parse(roleARN)
if err != nil {
return false, err
}
exists, existingARN, selectedARN, err := c.CheckIfAWSAccountExists(orgID, parsedARN.AccountID)
if err != nil {
return false, err
}
if exists {
if selectedARN != roleARN {
return false, errors.UserErrorf("User organization '%s' has role-arn '%s' associated. "+
"Only one role can be linked per AWS account per organization", orgID, selectedARN)
}
return false, nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(roleARN).Build()
if err != nil {
return false, err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return false, handleErr(resp.Error(), err)
}
return true, nil
}
func (c *Client) GetAccountLinkedUserRoles(accountID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels(USERRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, handleErr(resp.Error(), err)
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) GetOrganizationLinkedOCMRoles(orgID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, err
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) CheckIfAWSAccountExists(orgID string, awsAccountID string) (bool, string, string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return false, "", "", errors.Forbidden.UserErrorf("%v", err)
}
return false, "", "", handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false | externalID = acctResponse.Organization().ExternalID()
return
}
| random_line_split |
helpers.go | segments := version.Segments()
return fmt.Sprintf("%d.%d", segments[0], segments[1])
}
func CheckSupportedVersion(clusterVersion string, operatorVersion string) (bool, error) {
v1, err := semver.NewVersion(clusterVersion)
if err != nil {
return false, err
}
v2, err := semver.NewVersion(operatorVersion)
if err != nil {
return false, err
}
//Cluster version is greater than or equal to operator version
return v1.GreaterThanOrEqual(v2), nil
}
func (c *Client) GetPolicies(policyType string) (map[string]*cmv1.AWSSTSPolicy, error) {
query := fmt.Sprintf("policy_type = '%s'", policyType)
m := make(map[string]*cmv1.AWSSTSPolicy)
stmt := c.ocm.ClustersMgmt().V1().AWSInquiries().STSPolicies().List()
if policyType != "" {
stmt = stmt.Search(query)
}
accountRolePoliciesResponse, err := stmt.Send()
if err != nil {
return m, handleErr(accountRolePoliciesResponse.Error(), err)
}
accountRolePoliciesResponse.Items().Each(func(awsPolicy *cmv1.AWSSTSPolicy) bool {
m[awsPolicy.ID()] = awsPolicy
return true
})
return m, nil
}
// The actual values might differ from classic to hcp
// prefer using GetCredRequests(isHypershift bool) when there is prior knowledge of the topology
func (c *Client) GetAllCredRequests() (map[string]*cmv1.STSOperator, error) {
result := make(map[string]*cmv1.STSOperator)
classic, err := c.GetCredRequests(false)
if err != nil {
return result, err
}
hcp, err := c.GetCredRequests(true)
if err != nil {
return result, err
}
for key, value := range classic {
result[key] = value
}
for key, value := range hcp {
result[key] = value
}
return result, nil
}
func (c *Client) GetCredRequests(isHypershift bool) (map[string]*cmv1.STSOperator, error) {
m := make(map[string]*cmv1.STSOperator)
stsCredentialResponse, err := c.ocm.ClustersMgmt().
V1().
AWSInquiries().
STSCredentialRequests().
List().
Parameter("is_hypershift", isHypershift).
Send()
if err != nil {
return m, handleErr(stsCredentialResponse.Error(), err)
}
stsCredentialResponse.Items().Each(func(stsCredentialRequest *cmv1.STSCredentialRequest) bool {
m[stsCredentialRequest.Name()] = stsCredentialRequest.Operator()
return true
})
return m, nil
}
func (c *Client) FindMissingOperatorRolesForUpgrade(cluster *cmv1.Cluster,
newMinorVersion string) (map[string]*cmv1.STSOperator, error) {
missingRoles := make(map[string]*cmv1.STSOperator)
credRequests, err := c.GetCredRequests(cluster.Hypershift().Enabled())
if err != nil {
return nil, errors.Errorf("Error getting operator credential request from OCM %s", err)
}
for credRequest, operator := range credRequests {
if operator.MinVersion() != "" {
clusterUpgradeVersion, err := semver.NewVersion(newMinorVersion)
if err != nil {
return nil, err
}
operatorMinVersion, err := semver.NewVersion(operator.MinVersion())
if err != nil {
return nil, err
}
if clusterUpgradeVersion.GreaterThanOrEqual(operatorMinVersion) {
if !isOperatorRoleAlreadyExist(cluster, operator) {
missingRoles[credRequest] = operator
}
}
}
}
return missingRoles, nil
}
func (c *Client) createCloudProviderDataBuilder(roleARN string, awsClient aws.Client,
externalID string) (*cmv1.CloudProviderDataBuilder, error) {
var awsBuilder *cmv1.AWSBuilder
if roleARN != "" {
stsBuilder := cmv1.NewSTS().RoleARN(roleARN)
if externalID != "" {
stsBuilder = stsBuilder.ExternalID(externalID)
}
awsBuilder = cmv1.NewAWS().STS(stsBuilder)
} else {
accessKeys, err := awsClient.GetAWSAccessKeys()
if err != nil {
return &cmv1.CloudProviderDataBuilder{}, err
}
awsBuilder = cmv1.NewAWS().AccessKeyID(accessKeys.AccessKeyID).SecretAccessKey(accessKeys.SecretAccessKey)
}
return cmv1.NewCloudProviderData().AWS(awsBuilder), nil
}
func isOperatorRoleAlreadyExist(cluster *cmv1.Cluster, operator *cmv1.STSOperator) bool {
for _, role := range cluster.AWS().STS().OperatorIAMRoles() {
//FIXME: Check it does not exist on AWS itself too
// the iam roles will only return up to the version of the cluster
if role.Namespace() == operator.Namespace() && role.Name() == operator.Name() {
return true
}
}
return false
}
const (
BYOVPCSingleAZSubnetsCount = 2
BYOVPCMultiAZSubnetsCount = 6
privateLinkSingleAZSubnetsCount = 1
privateLinkMultiAZSubnetsCount = 3
)
func ValidateSubnetsCount(multiAZ bool, privateLink bool, subnetsInputCount int) error {
if privateLink {
if multiAZ && subnetsInputCount != privateLinkMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ private link cluster should be %d, "+
"instead received: %d", privateLinkMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != privateLinkSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ private link cluster should be %d, "+
"instead received: %d", privateLinkSingleAZSubnetsCount, subnetsInputCount)
}
} else {
if multiAZ && subnetsInputCount != BYOVPCMultiAZSubnetsCount {
return fmt.Errorf("The number of subnets for a multi-AZ cluster should be %d, "+
"instead received: %d", BYOVPCMultiAZSubnetsCount, subnetsInputCount)
}
if !multiAZ && subnetsInputCount != BYOVPCSingleAZSubnetsCount {
return fmt.Errorf("The number of subnets for a single AZ cluster should be %d, "+
"instead received: %d", BYOVPCSingleAZSubnetsCount, subnetsInputCount)
}
}
return nil
}
func ValidateHostedClusterSubnets(awsClient aws.Client, isPrivate bool, subnetIDs []string) (int, error) {
if isPrivate && len(subnetIDs) < 1 {
return 0, fmt.Errorf("The number of subnets for a private hosted cluster should be at least one")
}
if !isPrivate && len(subnetIDs) < 2 {
return 0, fmt.Errorf("The number of subnets for a public hosted cluster should be at least two")
}
vpcSubnets, vpcSubnetsErr := awsClient.GetVPCSubnets(subnetIDs[0])
if vpcSubnetsErr != nil {
return 0, vpcSubnetsErr
}
var subnets []*ec2.Subnet
for _, subnet := range vpcSubnets {
for _, subnetId := range subnetIDs {
if awssdk.StringValue(subnet.SubnetId) == subnetId {
subnets = append(subnets, subnet)
break
}
}
}
privateSubnets, privateSubnetsErr := awsClient.FilterVPCsPrivateSubnets(subnets)
if privateSubnetsErr != nil {
return 0, privateSubnetsErr
}
privateSubnetCount := len(privateSubnets)
publicSubnetsCount := len(subnets) - privateSubnetCount
if isPrivate {
if publicSubnetsCount > 0 {
return 0, fmt.Errorf("The number of public subnets for a private hosted cluster should be zero")
}
} else {
if publicSubnetsCount == 0 {
return 0, fmt.Errorf("The number of public subnets for a public hosted " +
"cluster should be at least one")
}
}
return privateSubnetCount, nil
}
const (
singleAZCount = 1
MultiAZCount = 3
)
func ValidateAvailabilityZonesCount(multiAZ bool, availabilityZonesCount int) error {
if multiAZ && availabilityZonesCount != MultiAZCount {
return fmt.Errorf("The number of availability zones for a multi AZ cluster should be %d, "+
"instead received: %d", MultiAZCount, availabilityZonesCount)
}
if !multiAZ && availabilityZonesCount != singleAZCount | {
return fmt.Errorf("The number of availability zones for a single AZ cluster should be %d, "+
"instead received: %d", singleAZCount, availabilityZonesCount)
} | conditional_block | |
helpers.go | .ParseCIDR(network.PodCIDR())
if err != nil {
dPodcidr = nil
}
_, dServicecidr, err = net.ParseCIDR(network.ServiceCIDR())
if err != nil {
dServicecidr = nil
}
dhostPrefix, _ = network.GetHostPrefix()
return dMachinecidr, dPodcidr, dServicecidr, dhostPrefix, computeInstanceType
}
func (c *Client) LogEvent(key string, body map[string]string) |
func (c *Client) GetCurrentAccount() (*amsv1.Account, error) {
response, err := c.ocm.AccountsMgmt().V1().
CurrentAccount().
Get().
Send()
if err != nil {
if response.Status() == http.StatusNotFound {
return nil, nil
}
return nil, handleErr(response.Error(), err)
}
return response.Body(), nil
}
func (c *Client) GetCurrentOrganization() (id string, externalID string, err error) {
acctResponse, err := c.GetCurrentAccount()
if err != nil {
return
}
id = acctResponse.Organization().ID()
externalID = acctResponse.Organization().ExternalID()
return
}
func (c *Client) IsCapabilityEnabled(capability string) (enabled bool, err error) {
organizationID, _, err := c.GetCurrentOrganization()
if err != nil {
return
}
isCapabilityEnable, err := c.isCapabilityEnabled(capability, organizationID)
if err != nil {
return
}
if !isCapabilityEnable {
return false, nil
}
return true, nil
}
func (c *Client) isCapabilityEnabled(capabilityName string, orgID string) (bool, error) {
capabilityResponse, err := c.ocm.AccountsMgmt().V1().Organizations().
Organization(orgID).Get().Parameter("fetchCapabilities", true).Send()
if err != nil {
return false, handleErr(capabilityResponse.Error(), err)
}
if len(capabilityResponse.Body().Capabilities()) > 0 {
for _, capability := range capabilityResponse.Body().Capabilities() {
if capability.Name() == capabilityName {
return capability.Value() == "true", nil
}
}
}
return false, nil
}
func (c *Client) UnlinkUserRoleFromAccount(accountID string, roleARN string) error {
linkedRoles, err := c.GetAccountLinkedUserRoles(accountID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(USERRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role ARN '%s' is not linked with the current account '%s'", roleARN, accountID)
}
func (c *Client) LinkAccountRole(accountID string, roleARN string) error {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels("sts_user_role").Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return errors.Forbidden.UserErrorf("%v", err)
}
return handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
if value == roleARN {
exists = true
break
}
}
}
}
if exists {
return nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key("sts_user_role").Value(roleARN).Build()
if err != nil {
return err
}
_, err = c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
return err
}
func (c *Client) UnlinkOCMRoleFromOrg(orgID string, roleARN string) error {
linkedRoles, err := c.GetOrganizationLinkedOCMRoles(orgID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role-arn '%s' is not linked with the organization account '%s'", roleARN, orgID)
}
func (c *Client) LinkOrgToRole(orgID string, roleARN string) (bool, error) {
parsedARN, err := arn.Parse(roleARN)
if err != nil {
return false, err
}
exists, existingARN, selectedARN, err := c.CheckIfAWSAccountExists(orgID, parsedARN.AccountID)
if err != nil {
return false, err
}
if exists {
if selectedARN != roleARN {
return false, errors.UserErrorf("User organization '%s' has role-arn '%s' associated. "+
"Only one role can be linked per AWS account per organization", orgID, selectedARN)
}
return false, nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(roleARN).Build()
if err != nil {
return false, err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return false, handleErr(resp.Error(), err)
}
return true, nil
}
func (c *Client) GetAccountLinkedUserRoles(accountID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels(USERRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, handleErr(resp.Error(), err)
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) GetOrganizationLinkedOCMRoles(orgID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, err
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) CheckIfAWSAccountExists(orgID string, awsAccountID string) (bool, string, string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return false, "", "", errors.Forbidden.UserErrorf("%v", err)
}
return false, "", "", handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists | {
event, err := cmv1.NewEvent().Key(key).Body(body).Build()
if err == nil {
_, _ = c.ocm.ClustersMgmt().V1().
Events().
Add().
Body(event).
Send()
}
} | identifier_body |
helpers.go | gmt().V1().
CurrentAccount().
Get().
Send()
if err != nil {
if response.Status() == http.StatusNotFound {
return nil, nil
}
return nil, handleErr(response.Error(), err)
}
return response.Body(), nil
}
func (c *Client) GetCurrentOrganization() (id string, externalID string, err error) {
acctResponse, err := c.GetCurrentAccount()
if err != nil {
return
}
id = acctResponse.Organization().ID()
externalID = acctResponse.Organization().ExternalID()
return
}
func (c *Client) IsCapabilityEnabled(capability string) (enabled bool, err error) {
organizationID, _, err := c.GetCurrentOrganization()
if err != nil {
return
}
isCapabilityEnable, err := c.isCapabilityEnabled(capability, organizationID)
if err != nil {
return
}
if !isCapabilityEnable {
return false, nil
}
return true, nil
}
func (c *Client) isCapabilityEnabled(capabilityName string, orgID string) (bool, error) {
capabilityResponse, err := c.ocm.AccountsMgmt().V1().Organizations().
Organization(orgID).Get().Parameter("fetchCapabilities", true).Send()
if err != nil {
return false, handleErr(capabilityResponse.Error(), err)
}
if len(capabilityResponse.Body().Capabilities()) > 0 {
for _, capability := range capabilityResponse.Body().Capabilities() {
if capability.Name() == capabilityName {
return capability.Value() == "true", nil
}
}
}
return false, nil
}
func (c *Client) UnlinkUserRoleFromAccount(accountID string, roleARN string) error {
linkedRoles, err := c.GetAccountLinkedUserRoles(accountID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(USERRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).Labels().
Labels(USERRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role ARN '%s' is not linked with the current account '%s'", roleARN, accountID)
}
func (c *Client) LinkAccountRole(accountID string, roleARN string) error {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels("sts_user_role").Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return errors.Forbidden.UserErrorf("%v", err)
}
return handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
if value == roleARN {
exists = true
break
}
}
}
}
if exists {
return nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key("sts_user_role").Value(roleARN).Build()
if err != nil {
return err
}
_, err = c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
return err
}
func (c *Client) UnlinkOCMRoleFromOrg(orgID string, roleARN string) error {
linkedRoles, err := c.GetOrganizationLinkedOCMRoles(orgID)
if err != nil {
return err
}
if helper.Contains(linkedRoles, roleARN) {
linkedRoles = helper.RemoveStrFromSlice(linkedRoles, roleARN)
if len(linkedRoles) > 0 {
newRoleARN := strings.Join(linkedRoles, ",")
label, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(newRoleARN).Build()
if err != nil {
return err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Update().Body(label).Send()
if err != nil {
return handleErr(resp.Error(), err)
}
} else {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).Labels().
Labels(OCMRoleLabel).Delete().Send()
if err != nil {
return handleErr(resp.Error(), err)
}
}
return nil
}
return errors.UserErrorf("Role-arn '%s' is not linked with the organization account '%s'", roleARN, orgID)
}
func (c *Client) LinkOrgToRole(orgID string, roleARN string) (bool, error) {
parsedARN, err := arn.Parse(roleARN)
if err != nil {
return false, err
}
exists, existingARN, selectedARN, err := c.CheckIfAWSAccountExists(orgID, parsedARN.AccountID)
if err != nil {
return false, err
}
if exists {
if selectedARN != roleARN {
return false, errors.UserErrorf("User organization '%s' has role-arn '%s' associated. "+
"Only one role can be linked per AWS account per organization", orgID, selectedARN)
}
return false, nil
}
if existingARN != "" {
roleARN = existingARN + "," + roleARN
}
labelBuilder, err := amsv1.NewLabel().Key(OCMRoleLabel).Value(roleARN).Build()
if err != nil {
return false, err
}
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Add().Body(labelBuilder).Send()
if err != nil {
return false, handleErr(resp.Error(), err)
}
return true, nil
}
func (c *Client) GetAccountLinkedUserRoles(accountID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Accounts().Account(accountID).
Labels().Labels(USERRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, handleErr(resp.Error(), err)
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) GetOrganizationLinkedOCMRoles(orgID string) ([]string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != http.StatusNotFound {
return nil, err
}
return strings.Split(resp.Body().Value(), ","), nil
}
func (c *Client) CheckIfAWSAccountExists(orgID string, awsAccountID string) (bool, string, string, error) {
resp, err := c.ocm.AccountsMgmt().V1().Organizations().Organization(orgID).
Labels().Labels(OCMRoleLabel).Get().Send()
if err != nil && resp.Status() != 404 {
if resp.Status() == 403 {
return false, "", "", errors.Forbidden.UserErrorf("%v", err)
}
return false, "", "", handleErr(resp.Error(), err)
}
existingARN := resp.Body().Value()
exists := false
selectedARN := ""
if existingARN != "" {
existingARNArr := strings.Split(existingARN, ",")
if len(existingARNArr) > 0 {
for _, value := range existingARNArr {
parsedARN, err := arn.Parse(value)
if err != nil {
return false, "", "", err
}
if parsedARN.AccountID == awsAccountID {
exists = true
selectedARN = value
break
}
}
}
}
return exists, existingARN, selectedARN, nil
}
/*
We should allow only one role per aws account per organization
If the user request same ocm role we should let them proceed to ensure they can add admin role
if not exists or attach policies or link etc
if the user request diff ocm role name we error out
*/
func (c *Client) | CheckRoleExists | identifier_name | |
reconciler.go | concileFailed", "Failed to reconcile Data Plane resource(s): %s", err.Error())
return err
}
ps.Status.MarkDeployed()
return nil
}
func (r *Base) reconcileSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) (string, error) {
if ps.Status.ProjectID == "" {
projectID, err := utils.ProjectID(ps.Spec.Project)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to find project id", zap.Error(err))
return "", err
}
// Set the projectID in the status.
ps.Status.ProjectID = projectID
}
// Auth to GCP is handled by having the GOOGLE_APPLICATION_CREDENTIALS environment variable
// pointing at a credential file.
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return "", err
}
defer client.Close()
// Generate the subscription name
subID := resources.GenerateSubscriptionName(ps)
// Load the subscription.
sub := client.Subscription(subID)
subExists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return "", err
}
t := client.Topic(ps.Spec.Topic)
topicExists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return "", err
}
if !topicExists {
return "", fmt.Errorf("Topic %q does not exist", ps.Spec.Topic)
}
// subConfig is the wanted config based on settings.
subConfig := gpubsub.SubscriptionConfig{
Topic: t,
RetainAckedMessages: ps.Spec.RetainAckedMessages,
}
if ps.Spec.AckDeadline != nil {
ackDeadline, err := time.ParseDuration(*ps.Spec.AckDeadline)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid ackDeadline", zap.String("ackDeadline", *ps.Spec.AckDeadline))
return "", fmt.Errorf("invalid ackDeadline: %w", err)
}
subConfig.AckDeadline = ackDeadline
}
if ps.Spec.RetentionDuration != nil {
retentionDuration, err := time.ParseDuration(*ps.Spec.RetentionDuration)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid retentionDuration", zap.String("retentionDuration", *ps.Spec.RetentionDuration))
return "", fmt.Errorf("invalid retentionDuration: %w", err)
}
subConfig.RetentionDuration = retentionDuration
}
// If the subscription doesn't exist, create it.
if !subExists {
// Create a new subscription to the previous topic with the given name.
sub, err = client.CreateSubscription(ctx, subID, subConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create subscription", zap.Error(err))
return "", err
}
}
// TODO update the subscription's config if needed.
return subID, nil
}
// deleteSubscription looks at the status.SubscriptionID and if non-empty,
// hence indicating that we have created a subscription successfully
// in the PullSubscription, remove it.
func (r *Base) deleteSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) error {
if ps.Status.SubscriptionID == "" {
return nil
}
// At this point the project ID should have been populated in the status.
// Querying Pub/Sub as the subscription could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
// Load the subscription.
sub := client.Subscription(ps.Status.SubscriptionID)
exists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return err
}
if exists {
if err := sub.Delete(ctx); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to delete Pub/Sub subscription", zap.Error(err))
return err
}
}
return nil
}
func (r *Base) updateStatus(ctx context.Context, original *v1alpha1.PullSubscription, desired *v1alpha1.PullSubscription) error {
existing := original.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
existing, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).Get(desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
becomesReady := desired.Status.IsReady() && !existing.Status.IsReady()
existing.Status = desired.Status
_, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).UpdateStatus(existing)
if err == nil && becomesReady {
// TODO compute duration since last non-ready. See https://github.com/google/knative-gcp/issues/455.
duration := time.Since(existing.ObjectMeta.CreationTimestamp.Time)
logging.FromContext(ctx).Desugar().Info("PullSubscription became ready", zap.Any("after", duration))
r.Recorder.Event(existing, corev1.EventTypeNormal, "ReadinessChanged", fmt.Sprintf("PullSubscription %q became ready", existing.Name))
if metricErr := r.StatsReporter.ReportReady("PullSubscription", existing.Namespace, existing.Name, duration); metricErr != nil {
logging.FromContext(ctx).Desugar().Error("Failed to record ready for PullSubscription", zap.Error(metricErr))
}
}
return err
})
}
// updateFinalizers is a generic method for future compatibility with a
// reconciler SDK.
func (r *Base) updateFinalizers(ctx context.Context, desired *v1alpha1.PullSubscription) (*v1alpha1.PullSubscription, bool, error) {
source, err := r.PullSubscriptionLister.PullSubscriptions(desired.Namespace).Get(desired.Name)
if err != nil {
return nil, false, err
}
// Don't modify the informers copy.
existing := source.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.NewString(existing.Finalizers...)
desiredFinalizers := sets.NewString(desired.Finalizers...)
if desiredFinalizers.Has(r.FinalizerName) {
if existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.FinalizerName)
} else {
if !existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.FinalizerName)
finalizers = existingFinalizers.List()
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return desired, false, err
}
update, err := r.RunClientSet.PubsubV1alpha1().PullSubscriptions(existing.Namespace).Patch(existing.Name, types.MergePatchType, patch)
return update, true, err
}
func (r *Base) addFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Insert(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) removeFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Delete(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) reconcileDataPlaneResources(ctx context.Context, src *v1alpha1.PullSubscription, f ReconcileDataPlaneFunc) error | {
loggingConfig, err := logging.LoggingConfigToJson(r.LoggingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing existing logging config", zap.Error(err))
}
if r.MetricsConfig != nil {
component := sourceComponent
// Set the metric component based on the channel label.
if _, ok := src.Labels["events.cloud.google.com/channel"]; ok {
component = channelComponent
}
r.MetricsConfig.Component = component
}
metricsConfig, err := metrics.MetricsOptionsToJson(r.MetricsConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing metrics config", zap.Error(err))
}
| identifier_body | |
reconciler.go | ClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return "", err
}
defer client.Close()
// Generate the subscription name
subID := resources.GenerateSubscriptionName(ps)
// Load the subscription.
sub := client.Subscription(subID)
subExists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return "", err
}
t := client.Topic(ps.Spec.Topic)
topicExists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return "", err
}
if !topicExists {
return "", fmt.Errorf("Topic %q does not exist", ps.Spec.Topic)
}
// subConfig is the wanted config based on settings.
subConfig := gpubsub.SubscriptionConfig{
Topic: t,
RetainAckedMessages: ps.Spec.RetainAckedMessages,
}
if ps.Spec.AckDeadline != nil {
ackDeadline, err := time.ParseDuration(*ps.Spec.AckDeadline)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid ackDeadline", zap.String("ackDeadline", *ps.Spec.AckDeadline))
return "", fmt.Errorf("invalid ackDeadline: %w", err)
}
subConfig.AckDeadline = ackDeadline
}
if ps.Spec.RetentionDuration != nil {
retentionDuration, err := time.ParseDuration(*ps.Spec.RetentionDuration)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid retentionDuration", zap.String("retentionDuration", *ps.Spec.RetentionDuration))
return "", fmt.Errorf("invalid retentionDuration: %w", err)
}
subConfig.RetentionDuration = retentionDuration
}
// If the subscription doesn't exist, create it.
if !subExists {
// Create a new subscription to the previous topic with the given name.
sub, err = client.CreateSubscription(ctx, subID, subConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create subscription", zap.Error(err))
return "", err
}
}
// TODO update the subscription's config if needed.
return subID, nil
}
// deleteSubscription looks at the status.SubscriptionID and if non-empty,
// hence indicating that we have created a subscription successfully
// in the PullSubscription, remove it.
func (r *Base) deleteSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) error {
if ps.Status.SubscriptionID == "" {
return nil
}
// At this point the project ID should have been populated in the status.
// Querying Pub/Sub as the subscription could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
// Load the subscription.
sub := client.Subscription(ps.Status.SubscriptionID)
exists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return err
}
if exists {
if err := sub.Delete(ctx); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to delete Pub/Sub subscription", zap.Error(err))
return err
}
}
return nil
}
func (r *Base) updateStatus(ctx context.Context, original *v1alpha1.PullSubscription, desired *v1alpha1.PullSubscription) error {
existing := original.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
existing, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).Get(desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
becomesReady := desired.Status.IsReady() && !existing.Status.IsReady()
existing.Status = desired.Status
_, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).UpdateStatus(existing)
if err == nil && becomesReady {
// TODO compute duration since last non-ready. See https://github.com/google/knative-gcp/issues/455.
duration := time.Since(existing.ObjectMeta.CreationTimestamp.Time)
logging.FromContext(ctx).Desugar().Info("PullSubscription became ready", zap.Any("after", duration))
r.Recorder.Event(existing, corev1.EventTypeNormal, "ReadinessChanged", fmt.Sprintf("PullSubscription %q became ready", existing.Name))
if metricErr := r.StatsReporter.ReportReady("PullSubscription", existing.Namespace, existing.Name, duration); metricErr != nil {
logging.FromContext(ctx).Desugar().Error("Failed to record ready for PullSubscription", zap.Error(metricErr))
}
}
return err
})
}
// updateFinalizers is a generic method for future compatibility with a
// reconciler SDK.
func (r *Base) updateFinalizers(ctx context.Context, desired *v1alpha1.PullSubscription) (*v1alpha1.PullSubscription, bool, error) {
source, err := r.PullSubscriptionLister.PullSubscriptions(desired.Namespace).Get(desired.Name)
if err != nil {
return nil, false, err
}
// Don't modify the informers copy.
existing := source.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.NewString(existing.Finalizers...)
desiredFinalizers := sets.NewString(desired.Finalizers...)
if desiredFinalizers.Has(r.FinalizerName) {
if existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.FinalizerName)
} else {
if !existingFinalizers.Has(r.FinalizerName) {
// Nothing to do.
return desired, false, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.FinalizerName)
finalizers = existingFinalizers.List()
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return desired, false, err
}
update, err := r.RunClientSet.PubsubV1alpha1().PullSubscriptions(existing.Namespace).Patch(existing.Name, types.MergePatchType, patch)
return update, true, err
}
func (r *Base) addFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Insert(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) removeFinalizer(s *v1alpha1.PullSubscription) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Delete(r.FinalizerName)
s.Finalizers = finalizers.List()
}
func (r *Base) reconcileDataPlaneResources(ctx context.Context, src *v1alpha1.PullSubscription, f ReconcileDataPlaneFunc) error {
loggingConfig, err := logging.LoggingConfigToJson(r.LoggingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing existing logging config", zap.Error(err))
}
if r.MetricsConfig != nil {
component := sourceComponent
// Set the metric component based on the channel label.
if _, ok := src.Labels["events.cloud.google.com/channel"]; ok {
component = channelComponent
}
r.MetricsConfig.Component = component
}
metricsConfig, err := metrics.MetricsOptionsToJson(r.MetricsConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing metrics config", zap.Error(err))
}
tracingConfig, err := tracing.ConfigToJSON(r.TracingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing tracing config", zap.Error(err))
}
desired := resources.MakeReceiveAdapter(ctx, &resources.ReceiveAdapterArgs{
Image: r.ReceiveAdapterImage,
Source: src,
Labels: resources.GetLabels(r.ControllerAgentName, src.Name),
SubscriptionID: src.Status.SubscriptionID,
SinkURI: src.Status.SinkURI,
TransformerURI: src.Status.TransformerURI,
LoggingConfig: loggingConfig,
MetricsConfig: metricsConfig,
TracingConfig: tracingConfig,
})
return f(ctx, desired, src)
}
func (r *Base) | GetOrCreateReceiveAdapter | identifier_name | |
reconciler.go | may no longer exist, in which case we stop processing.
logging.FromContext(ctx).Desugar().Error("PullSubscription in work queue no longer exists")
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy
ps := original.DeepCopy()
// Reconcile this copy of the PullSubscription and then write back any status
// updates regardless of whether the reconciliation errored out.
var reconcileErr = r.reconcile(ctx, ps)
// If no error is returned, mark the observed generation.
// This has to be done before updateStatus is called.
if reconcileErr == nil {
ps.Status.ObservedGeneration = ps.Generation
}
if equality.Semantic.DeepEqual(original.Finalizers, ps.Finalizers) {
// If we didn't change finalizers then don't call updateFinalizers.
} else if _, updated, fErr := r.updateFinalizers(ctx, ps); fErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update PullSubscription finalizers", zap.Error(fErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update finalizers for PullSubscription %q: %v", ps.Name, fErr)
return fErr
} else if updated {
// There was a difference and updateFinalizers said it updated and did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q finalizers", ps.Name)
}
if equality.Semantic.DeepEqual(original.Status, ps.Status) {
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the informer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
} else if uErr := r.updateStatus(ctx, original, ps); uErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update ps status", zap.Error(uErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for PullSubscription %q: %v", ps.Name, uErr)
return uErr
} else if reconcileErr == nil {
// There was a difference and updateStatus did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q", ps.Name)
}
if reconcileErr != nil {
r.Recorder.Event(ps, corev1.EventTypeWarning, "InternalError", reconcileErr.Error())
}
return reconcileErr
}
func (r *Base) reconcile(ctx context.Context, ps *v1alpha1.PullSubscription) error {
ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("pullsubscription", ps)))
ps.Status.InitializeConditions()
if ps.DeletionTimestamp != nil {
logging.FromContext(ctx).Desugar().Debug("Deleting Pub/Sub subscription")
if err := r.deleteSubscription(ctx, ps); err != nil {
ps.Status.MarkNoSubscription("SubscriptionDeleteFailed", "Failed to delete Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkNoSubscription("SubscriptionDeleted", "Successfully deleted Pub/Sub subscription %q", ps.Status.SubscriptionID)
ps.Status.SubscriptionID = ""
r.removeFinalizer(ps)
return nil
}
// Sink is required.
sinkURI, err := r.resolveDestination(ctx, ps.Spec.Sink, ps)
if err != nil {
ps.Status.MarkNoSink("InvalidSink", err.Error())
return err
} else {
ps.Status.MarkSink(sinkURI)
}
// Transformer is optional.
if ps.Spec.Transformer != nil | else {
// If the transformer is nil, mark is as nil and clean up the URI.
ps.Status.MarkNoTransformer("TransformerNil", "Transformer is nil")
ps.Status.TransformerURI = ""
}
r.addFinalizer(ps)
subscriptionID, err := r.reconcileSubscription(ctx, ps)
if err != nil {
ps.Status.MarkNoSubscription("SubscriptionReconcileFailed", "Failed to reconcile Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkSubscribed(subscriptionID)
err = r.reconcileDataPlaneResources(ctx, ps, r.ReconcileDataPlaneFn)
if err != nil {
ps.Status.MarkNotDeployed("DataPlaneReconcileFailed", "Failed to reconcile Data Plane resource(s): %s", err.Error())
return err
}
ps.Status.MarkDeployed()
return nil
}
func (r *Base) reconcileSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) (string, error) {
if ps.Status.ProjectID == "" {
projectID, err := utils.ProjectID(ps.Spec.Project)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to find project id", zap.Error(err))
return "", err
}
// Set the projectID in the status.
ps.Status.ProjectID = projectID
}
// Auth to GCP is handled by having the GOOGLE_APPLICATION_CREDENTIALS environment variable
// pointing at a credential file.
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return "", err
}
defer client.Close()
// Generate the subscription name
subID := resources.GenerateSubscriptionName(ps)
// Load the subscription.
sub := client.Subscription(subID)
subExists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return "", err
}
t := client.Topic(ps.Spec.Topic)
topicExists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return "", err
}
if !topicExists {
return "", fmt.Errorf("Topic %q does not exist", ps.Spec.Topic)
}
// subConfig is the wanted config based on settings.
subConfig := gpubsub.SubscriptionConfig{
Topic: t,
RetainAckedMessages: ps.Spec.RetainAckedMessages,
}
if ps.Spec.AckDeadline != nil {
ackDeadline, err := time.ParseDuration(*ps.Spec.AckDeadline)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid ackDeadline", zap.String("ackDeadline", *ps.Spec.AckDeadline))
return "", fmt.Errorf("invalid ackDeadline: %w", err)
}
subConfig.AckDeadline = ackDeadline
}
if ps.Spec.RetentionDuration != nil {
retentionDuration, err := time.ParseDuration(*ps.Spec.RetentionDuration)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid retentionDuration", zap.String("retentionDuration", *ps.Spec.RetentionDuration))
return "", fmt.Errorf("invalid retentionDuration: %w", err)
}
subConfig.RetentionDuration = retentionDuration
}
// If the subscription doesn't exist, create it.
if !subExists {
// Create a new subscription to the previous topic with the given name.
sub, err = client.CreateSubscription(ctx, subID, subConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create subscription", zap.Error(err))
return "", err
}
}
// TODO update the subscription's config if needed.
return subID, nil
}
// deleteSubscription looks at the status.SubscriptionID and if non-empty,
// hence indicating that we have created a subscription successfully
// in the PullSubscription, remove it.
func (r *Base) deleteSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) error {
if ps.Status.SubscriptionID == "" {
return nil
}
// At this point the project ID should have been populated in the status.
// Querying Pub/Sub as the subscription could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
// Load the subscription.
sub := client.Subscription(ps.Status.SubscriptionID)
exists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return err
}
if exists {
if err := sub.Delete(ctx); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to delete Pub/Sub subscription", zap.Error(err))
return err
}
}
return nil
}
func (r *Base) updateStatus(ctx context.Context, original *v1alpha1.PullSubscription, desired *v1alpha1.PullSubscription) error {
existing | {
transformerURI, err := r.resolveDestination(ctx, *ps.Spec.Transformer, ps)
if err != nil {
ps.Status.MarkNoTransformer("InvalidTransformer", err.Error())
} else {
ps.Status.MarkTransformer(transformerURI)
}
} | conditional_block |
reconciler.go | // If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the informer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
} else if uErr := r.updateStatus(ctx, original, ps); uErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update ps status", zap.Error(uErr))
r.Recorder.Eventf(ps, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for PullSubscription %q: %v", ps.Name, uErr)
return uErr
} else if reconcileErr == nil {
// There was a difference and updateStatus did not return an error.
r.Recorder.Eventf(ps, corev1.EventTypeNormal, "Updated", "Updated PullSubscription %q", ps.Name)
}
if reconcileErr != nil {
r.Recorder.Event(ps, corev1.EventTypeWarning, "InternalError", reconcileErr.Error())
}
return reconcileErr
}
func (r *Base) reconcile(ctx context.Context, ps *v1alpha1.PullSubscription) error {
ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("pullsubscription", ps)))
ps.Status.InitializeConditions()
if ps.DeletionTimestamp != nil {
logging.FromContext(ctx).Desugar().Debug("Deleting Pub/Sub subscription")
if err := r.deleteSubscription(ctx, ps); err != nil {
ps.Status.MarkNoSubscription("SubscriptionDeleteFailed", "Failed to delete Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkNoSubscription("SubscriptionDeleted", "Successfully deleted Pub/Sub subscription %q", ps.Status.SubscriptionID)
ps.Status.SubscriptionID = ""
r.removeFinalizer(ps)
return nil
}
// Sink is required.
sinkURI, err := r.resolveDestination(ctx, ps.Spec.Sink, ps)
if err != nil {
ps.Status.MarkNoSink("InvalidSink", err.Error())
return err
} else {
ps.Status.MarkSink(sinkURI)
}
// Transformer is optional.
if ps.Spec.Transformer != nil {
transformerURI, err := r.resolveDestination(ctx, *ps.Spec.Transformer, ps)
if err != nil {
ps.Status.MarkNoTransformer("InvalidTransformer", err.Error())
} else {
ps.Status.MarkTransformer(transformerURI)
}
} else {
// If the transformer is nil, mark is as nil and clean up the URI.
ps.Status.MarkNoTransformer("TransformerNil", "Transformer is nil")
ps.Status.TransformerURI = ""
}
r.addFinalizer(ps)
subscriptionID, err := r.reconcileSubscription(ctx, ps)
if err != nil {
ps.Status.MarkNoSubscription("SubscriptionReconcileFailed", "Failed to reconcile Pub/Sub subscription: %s", err.Error())
return err
}
ps.Status.MarkSubscribed(subscriptionID)
err = r.reconcileDataPlaneResources(ctx, ps, r.ReconcileDataPlaneFn)
if err != nil {
ps.Status.MarkNotDeployed("DataPlaneReconcileFailed", "Failed to reconcile Data Plane resource(s): %s", err.Error())
return err
}
ps.Status.MarkDeployed()
return nil
}
func (r *Base) reconcileSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) (string, error) {
if ps.Status.ProjectID == "" {
projectID, err := utils.ProjectID(ps.Spec.Project)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to find project id", zap.Error(err))
return "", err
}
// Set the projectID in the status.
ps.Status.ProjectID = projectID
}
// Auth to GCP is handled by having the GOOGLE_APPLICATION_CREDENTIALS environment variable
// pointing at a credential file.
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return "", err
}
defer client.Close()
// Generate the subscription name
subID := resources.GenerateSubscriptionName(ps)
// Load the subscription.
sub := client.Subscription(subID)
subExists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return "", err
}
t := client.Topic(ps.Spec.Topic)
topicExists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return "", err
}
if !topicExists {
return "", fmt.Errorf("Topic %q does not exist", ps.Spec.Topic)
}
// subConfig is the wanted config based on settings.
subConfig := gpubsub.SubscriptionConfig{
Topic: t,
RetainAckedMessages: ps.Spec.RetainAckedMessages,
}
if ps.Spec.AckDeadline != nil {
ackDeadline, err := time.ParseDuration(*ps.Spec.AckDeadline)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid ackDeadline", zap.String("ackDeadline", *ps.Spec.AckDeadline))
return "", fmt.Errorf("invalid ackDeadline: %w", err)
}
subConfig.AckDeadline = ackDeadline
}
if ps.Spec.RetentionDuration != nil {
retentionDuration, err := time.ParseDuration(*ps.Spec.RetentionDuration)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid retentionDuration", zap.String("retentionDuration", *ps.Spec.RetentionDuration))
return "", fmt.Errorf("invalid retentionDuration: %w", err)
}
subConfig.RetentionDuration = retentionDuration
}
// If the subscription doesn't exist, create it.
if !subExists {
// Create a new subscription to the previous topic with the given name.
sub, err = client.CreateSubscription(ctx, subID, subConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create subscription", zap.Error(err))
return "", err
}
}
// TODO update the subscription's config if needed.
return subID, nil
}
// deleteSubscription looks at the status.SubscriptionID and if non-empty,
// hence indicating that we have created a subscription successfully
// in the PullSubscription, remove it.
func (r *Base) deleteSubscription(ctx context.Context, ps *v1alpha1.PullSubscription) error {
if ps.Status.SubscriptionID == "" {
return nil
}
// At this point the project ID should have been populated in the status.
// Querying Pub/Sub as the subscription could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.CreateClientFn(ctx, ps.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
// Load the subscription.
sub := client.Subscription(ps.Status.SubscriptionID)
exists, err := sub.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub subscription exists", zap.Error(err))
return err
}
if exists {
if err := sub.Delete(ctx); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to delete Pub/Sub subscription", zap.Error(err))
return err
}
}
return nil
}
func (r *Base) updateStatus(ctx context.Context, original *v1alpha1.PullSubscription, desired *v1alpha1.PullSubscription) error {
existing := original.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
existing, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).Get(desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
becomesReady := desired.Status.IsReady() && !existing.Status.IsReady()
existing.Status = desired.Status
_, err = r.RunClientSet.PubsubV1alpha1().PullSubscriptions(desired.Namespace).UpdateStatus(existing)
if err == nil && becomesReady {
// TODO compute duration since last non-ready. See https://github.com/google/knative-gcp/issues/455.
duration := time.Since(existing.ObjectMeta.CreationTimestamp.Time)
logging.FromContext(ctx).Desugar().Info("PullSubscription became ready", zap.Any("after", duration))
r.Recorder.Event(existing, corev1.EventTypeNormal, "ReadinessChanged", fmt.Sprintf("PullSubscription %q became ready", existing.Name))
if metricErr := r.StatsReporter.ReportReady("PullSubscription", existing.Namespace, existing.Name, duration); metricErr != nil {
logging.FromContext(ctx).Desugar().Error("Failed to record ready for PullSubscription", zap.Error(metricErr))
} | }
| random_line_split | |
utils.py | Options()
opts.store_states = True
result = sesolve(H_evol, final_state, tlist,
e_ops=[observable], options=opts)
full_signal = result.expect
signal = full_signal[0].real
signal_fft = np.fft.fft(signal)
freq = np.fft.fftfreq(signal.shape[-1])
freq_normalized = np.abs(freq * N_sample * 2) / (tf / np.pi)
return signal_fft, freq_normalized
@numba.njit
def entropy(p):
|
@numba.njit
def jensen_shannon(hist1, hist2):
'''
Returns the Jensen Shannon divergence between two probabilities
distribution represented as histograms.
Arguments:
---------
- hist1: tuple of numpy.ndarray (density, bins),
len(bins) = len(density) + 1.
The integral of the density wrt bins sums to 1.
- hist2: same format.
Returns:
--------
- float, value of the Jensen Shannon divergence.
'''
bins = np.sort(np.unique(np.array(list(hist1[1]) + list(hist2[1]))))
masses1 = []
masses2 = []
for i, b in enumerate(bins[1::]):
if b <= hist1[1][0]:
masses1.append(0.)
elif b > hist1[1][-1]:
masses1.append(0.)
else:
j = 0
while b > hist1[1][j]:
j += 1
masses1.append((b-bins[i]) * hist1[0][j-1])
if b <= hist2[1][0]:
masses2.append(0.)
elif b > hist2[1][-1]:
masses2.append(0.)
else:
j = 0
while b > hist2[1][j]:
j += 1
masses2.append((b-bins[i]) * hist2[0][j-1])
masses1 = np.array(masses1)
masses2 = np.array(masses2)
masses12 = (masses1+masses2)/2
return entropy(masses12) - (entropy(masses1) + entropy(masses2))/2
# @ray.remote
def return_fourier_from_dataset(graph_list, rot_init=settings.rot_init):
"""
Returns the fourier transform of evolution for a list of graphs for
the hamiltonian ising and xy.
Arguments:
---------
- graph_list: list or numpy.Ndarray of networkx.Graph objects
Returns:
--------
- fs_xy: numpy.Ndarray of shape (2, len(graph_list), 1000)
[0,i]: Fourier signal of graph i at 1000 points for
hamiltonian XY
[1,i]: frequencies associated to graph i at 1000 points
for hamiltonian XY
- fs_is: same for the Ising hamiltonian
"""
fs_xy = np.zeros((2, len(graph_list), 1000))
fs_is = np.zeros((2, len(graph_list), 1000))
for i, graph in enumerate(graph_list):
fs_xy[0][i], fs_xy[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='xy')
fs_is[0][i], fs_is[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='ising')
return fs_xy, fs_is
def return_evolution(G, times, pulses, evol='xy'):
"""
Returns the final state after the following evolution:
- start with empty sate with as many qubits as vertices of G
- uniform superposition of all states
- alternating evolution of H_evol during times, and H_m during pulses
Arguments:
---------
- G: graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
Returns:
--------
- state: qutip.Qobj final state of evolution
"""
assert evol in ['xy', 'ising']
assert len(times) == len(pulses)
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=evol)
H_m = generate_mixing_Ham(N_nodes)
state = generate_empty_initial_state(N_nodes)
opts = Options()
opts.store_states = True
result = sesolve(H_m, state, [0, np.pi/4], options=opts)
state = result.states[-1]
for i, theta in enumerate(pulses):
if np.abs(times[i]) > 0:
if evol == 'xy':
result = sesolve(H_evol, state, [0, times[i]], options=opts)
state = result.states[-1]
else:
hexp = (- times[i] * 1j * H_evol).expm()
state = hexp * state
if np.abs(theta) > 0:
result = sesolve(H_m, state, [0, theta], options=opts)
state = result.states[-1]
return state
def return_list_of_states(graphs_list,
times, pulses, evol='xy', verbose=0):
"""
Returns the list of states after evolution for each graph following
return_evolution functions.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
- verbose: int, display the progression every verbose steps
Returns:
--------
- all_states: list of qutip.Qobj final states of evolution,
same lenght as graphs_list
"""
all_states = []
for G in tqdm(graphs_list, disable=verbose==0):
all_states.append(return_evolution(G, times, pulses, evol))
return all_states
def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):
"""
Returns all the discrete probability distributions of a diagonal
observable on a list of states each one associated with a graph. The
observable can be different for each state. The distribution is taken of
all possible values of all observables.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- all_states: list of qutip.Qobj states associated with graphs_list
- observable_func: function(networkx.Graph):
return qtip.Qobj diagonal observable
- return_energies: boolean
Returns:
--------
- all_e_masses: numpy.ndarray of shape (len(graphs_list), N_dim)
all discrete probability distributions
- e_values_unique: numpy.ndarray of shape (N_dim, )
if return_energies, all energies
"""
all_e_distrib = []
all_e_values_unique = []
for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):
if observable_func == None:
observable = generate_Ham_from_graph(
G, type_h='ising', type_ising='z'
)
else:
observable = observable_func(G)
e_values = observable.data.diagonal().real
e_values_unique = np.unique(e_values)
state = all_states[i]
e_distrib = np.zeros(len(e_values_unique))
for j, v in enumerate(e_values_unique):
e_distrib[j] = np.sum(
(np.abs(state.data.toarray()) ** 2)[e_values == v]
)
all_e_distrib.append(e_distrib)
all_e_values_unique.append(e_values_unique)
e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))
all_e_masses = []
for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):
masses = np.zeros_like(e_values_unique)
for d, e in zip(e_distrib, e_values):
masses[e_values_unique == e] = d
all_e_masses.append(masses)
all_e_masses = np.array(all_e_masses)
if return_energies:
return all_e_masses, e_values_unique
return all_e_masses
def extend_energies(target_energies, energies, masses):
"""
Extends masses array with columns | """
Returns the entropy of a discrete distribution p
Arguments:
---------
- p: numpy.Ndarray dimension 1 non-negative floats summing to 1
Returns:
--------
- float, value of the entropy
"""
assert (p >= 0).all()
assert abs(np.sum(p)-1) < 1e-6
return -np.sum(p*np.log(p+1e-12)) | identifier_body |
utils.py | , evol='xy'):
"""
Returns the final state after the following evolution:
- start with empty sate with as many qubits as vertices of G
- uniform superposition of all states
- alternating evolution of H_evol during times, and H_m during pulses
Arguments:
---------
- G: graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
Returns:
--------
- state: qutip.Qobj final state of evolution
"""
assert evol in ['xy', 'ising']
assert len(times) == len(pulses)
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=evol)
H_m = generate_mixing_Ham(N_nodes)
state = generate_empty_initial_state(N_nodes)
opts = Options()
opts.store_states = True
result = sesolve(H_m, state, [0, np.pi/4], options=opts)
state = result.states[-1]
for i, theta in enumerate(pulses):
if np.abs(times[i]) > 0:
if evol == 'xy':
result = sesolve(H_evol, state, [0, times[i]], options=opts)
state = result.states[-1]
else:
hexp = (- times[i] * 1j * H_evol).expm()
state = hexp * state
if np.abs(theta) > 0:
result = sesolve(H_m, state, [0, theta], options=opts)
state = result.states[-1]
return state
def return_list_of_states(graphs_list,
times, pulses, evol='xy', verbose=0):
"""
Returns the list of states after evolution for each graph following
return_evolution functions.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
- verbose: int, display the progression every verbose steps
Returns:
--------
- all_states: list of qutip.Qobj final states of evolution,
same lenght as graphs_list
"""
all_states = []
for G in tqdm(graphs_list, disable=verbose==0):
all_states.append(return_evolution(G, times, pulses, evol))
return all_states
def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):
"""
Returns all the discrete probability distributions of a diagonal
observable on a list of states each one associated with a graph. The
observable can be different for each state. The distribution is taken of
all possible values of all observables.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- all_states: list of qutip.Qobj states associated with graphs_list
- observable_func: function(networkx.Graph):
return qtip.Qobj diagonal observable
- return_energies: boolean
Returns:
--------
- all_e_masses: numpy.ndarray of shape (len(graphs_list), N_dim)
all discrete probability distributions
- e_values_unique: numpy.ndarray of shape (N_dim, )
if return_energies, all energies
"""
all_e_distrib = []
all_e_values_unique = []
for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):
if observable_func == None:
observable = generate_Ham_from_graph(
G, type_h='ising', type_ising='z'
)
else:
observable = observable_func(G)
e_values = observable.data.diagonal().real
e_values_unique = np.unique(e_values)
state = all_states[i]
e_distrib = np.zeros(len(e_values_unique))
for j, v in enumerate(e_values_unique):
e_distrib[j] = np.sum(
(np.abs(state.data.toarray()) ** 2)[e_values == v]
)
all_e_distrib.append(e_distrib)
all_e_values_unique.append(e_values_unique)
e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))
all_e_masses = []
for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):
masses = np.zeros_like(e_values_unique)
for d, e in zip(e_distrib, e_values):
masses[e_values_unique == e] = d
all_e_masses.append(masses)
all_e_masses = np.array(all_e_masses)
if return_energies:
return all_e_masses, e_values_unique
return all_e_masses
def extend_energies(target_energies, energies, masses):
"""
Extends masses array with columns of zeros for missing energies.
Arguments:
---------
- target_energies: numpy.ndarray of shape (N_dim, ) target energies
- energies: numpy.ndarray of shape (N_dim_init, ) energies of distributions
- masses: numpy.ndarray of shape (N, N_dim_init) discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N, N_dim)
all extended discrete probability distributions
"""
energies = list(energies)
N = masses.shape[0]
res = np.zeros((N, len(target_energies)))
for i, energy in enumerate(target_energies):
if energy not in energies:
res[:, i] = np.zeros((N, ))
else:
res[:, i] = masses[:, energies.index(energy)]
return res
def merge_energies(e1, m1, e2, m2):
"""
Merge the arrays of energy masses, filling with zeros the missing energies in each.
N_dim is the size of the union of the energies from the two distributions.
Arguments:
---------
- e1: numpy.ndarray of shape (N_dim1, ) energies of first distributions
- m1: numpy.ndarray of shape (N1, N_dim1) first discrete probability distributions
- e2: numpy.ndarray of shape (N_dim2, ) energies of first distributions
- m2: numpy.ndarray of shape (N2, N_dim2) first discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N1, N_dim)
all extended first discrete probability distributions
- numpy.ndarray of shape (N2, N_dim)
all extended second discrete probability distributions
"""
e = sorted(list(set(e1) | set(e2)))
return extend_energies(e, e1, m1), extend_energies(e, e2, m2)
def return_js_square_matrix(distributions, verbose=0):
"""
Returns the Jensen-Shannon distance matrix of discrete
distributions.
Arguments:
---------
- distributions: numpy.ndarray of shape (N_sample, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
js_matrix = np.zeros((len(distributions), len(distributions)))
for i in range(len(distributions)):
for j in range(i + 1):
masses1 = distributions[i]
masses2 = distributions[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
js_matrix[j, i] = js
return js_matrix
def return_js_matrix(distributions1, distributions2, verbose=0):
"""
Returns the Jensen-Shannon distance matrix between discrete
distributions.
Arguments:
---------
- distributions1: numpy.ndarray of shape (N_samples_1, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
- distributions2: numpy.ndarray of shape (N_samples_2, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
assert distributions1.shape[1] == distributions2.shape[1], \
"Distributions must have matching dimensions. Consider using merge_energies"
js_matrix = np.zeros((len(distributions1), len(distributions2)))
for i in trange(len(distributions1), desc='dist1 loop', disable=verbose<=0):
for j in trange(len(distributions2), desc='dist2 loop', disable=verbose<=1):
| masses1 = distributions1[i]
masses2 = distributions2[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js | conditional_block | |
utils.py | Options()
opts.store_states = True
result = sesolve(H_evol, final_state, tlist,
e_ops=[observable], options=opts)
full_signal = result.expect
signal = full_signal[0].real
signal_fft = np.fft.fft(signal)
freq = np.fft.fftfreq(signal.shape[-1])
freq_normalized = np.abs(freq * N_sample * 2) / (tf / np.pi)
return signal_fft, freq_normalized
@numba.njit
def entropy(p):
"""
Returns the entropy of a discrete distribution p
Arguments:
---------
- p: numpy.Ndarray dimension 1 non-negative floats summing to 1
Returns:
--------
- float, value of the entropy
"""
assert (p >= 0).all()
assert abs(np.sum(p)-1) < 1e-6
return -np.sum(p*np.log(p+1e-12))
@numba.njit
def jensen_shannon(hist1, hist2):
'''
Returns the Jensen Shannon divergence between two probabilities
distribution represented as histograms.
Arguments:
---------
- hist1: tuple of numpy.ndarray (density, bins),
len(bins) = len(density) + 1.
The integral of the density wrt bins sums to 1.
- hist2: same format.
Returns:
--------
- float, value of the Jensen Shannon divergence.
'''
bins = np.sort(np.unique(np.array(list(hist1[1]) + list(hist2[1]))))
masses1 = []
masses2 = []
for i, b in enumerate(bins[1::]):
if b <= hist1[1][0]:
masses1.append(0.)
elif b > hist1[1][-1]:
masses1.append(0.)
else:
j = 0
while b > hist1[1][j]:
j += 1
masses1.append((b-bins[i]) * hist1[0][j-1])
if b <= hist2[1][0]:
masses2.append(0.)
elif b > hist2[1][-1]:
masses2.append(0.)
else:
j = 0
while b > hist2[1][j]:
j += 1
masses2.append((b-bins[i]) * hist2[0][j-1])
masses1 = np.array(masses1)
masses2 = np.array(masses2)
masses12 = (masses1+masses2)/2
return entropy(masses12) - (entropy(masses1) + entropy(masses2))/2
# @ray.remote
def return_fourier_from_dataset(graph_list, rot_init=settings.rot_init):
"""
Returns the fourier transform of evolution for a list of graphs for
the hamiltonian ising and xy.
Arguments:
---------
- graph_list: list or numpy.Ndarray of networkx.Graph objects
Returns:
--------
- fs_xy: numpy.Ndarray of shape (2, len(graph_list), 1000)
[0,i]: Fourier signal of graph i at 1000 points for
hamiltonian XY
[1,i]: frequencies associated to graph i at 1000 points
for hamiltonian XY
- fs_is: same for the Ising hamiltonian
"""
fs_xy = np.zeros((2, len(graph_list), 1000))
fs_is = np.zeros((2, len(graph_list), 1000))
for i, graph in enumerate(graph_list):
fs_xy[0][i], fs_xy[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='xy')
fs_is[0][i], fs_is[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='ising')
return fs_xy, fs_is
def return_evolution(G, times, pulses, evol='xy'):
"""
Returns the final state after the following evolution:
- start with empty sate with as many qubits as vertices of G
- uniform superposition of all states
- alternating evolution of H_evol during times, and H_m during pulses
Arguments:
---------
- G: graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
Returns:
--------
- state: qutip.Qobj final state of evolution
"""
assert evol in ['xy', 'ising']
assert len(times) == len(pulses)
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=evol)
H_m = generate_mixing_Ham(N_nodes)
state = generate_empty_initial_state(N_nodes)
opts = Options()
opts.store_states = True
result = sesolve(H_m, state, [0, np.pi/4], options=opts)
state = result.states[-1]
for i, theta in enumerate(pulses):
if np.abs(times[i]) > 0:
if evol == 'xy':
result = sesolve(H_evol, state, [0, times[i]], options=opts)
state = result.states[-1]
else:
hexp = (- times[i] * 1j * H_evol).expm()
state = hexp * state
if np.abs(theta) > 0:
result = sesolve(H_m, state, [0, theta], options=opts)
state = result.states[-1]
return state
def return_list_of_states(graphs_list,
times, pulses, evol='xy', verbose=0):
"""
Returns the list of states after evolution for each graph following
return_evolution functions.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
- verbose: int, display the progression every verbose steps
Returns: | - all_states: list of qutip.Qobj final states of evolution,
same lenght as graphs_list
"""
all_states = []
for G in tqdm(graphs_list, disable=verbose==0):
all_states.append(return_evolution(G, times, pulses, evol))
return all_states
def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):
"""
Returns all the discrete probability distributions of a diagonal
observable on a list of states each one associated with a graph. The
observable can be different for each state. The distribution is taken of
all possible values of all observables.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- all_states: list of qutip.Qobj states associated with graphs_list
- observable_func: function(networkx.Graph):
return qtip.Qobj diagonal observable
- return_energies: boolean
Returns:
--------
- all_e_masses: numpy.ndarray of shape (len(graphs_list), N_dim)
all discrete probability distributions
- e_values_unique: numpy.ndarray of shape (N_dim, )
if return_energies, all energies
"""
all_e_distrib = []
all_e_values_unique = []
for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):
if observable_func == None:
observable = generate_Ham_from_graph(
G, type_h='ising', type_ising='z'
)
else:
observable = observable_func(G)
e_values = observable.data.diagonal().real
e_values_unique = np.unique(e_values)
state = all_states[i]
e_distrib = np.zeros(len(e_values_unique))
for j, v in enumerate(e_values_unique):
e_distrib[j] = np.sum(
(np.abs(state.data.toarray()) ** 2)[e_values == v]
)
all_e_distrib.append(e_distrib)
all_e_values_unique.append(e_values_unique)
e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))
all_e_masses = []
for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):
masses = np.zeros_like(e_values_unique)
for d, e in zip(e_distrib, e_values):
masses[e_values_unique == e] = d
all_e_masses.append(masses)
all_e_masses = np.array(all_e_masses)
if return_energies:
return all_e_masses, e_values_unique
return all_e_masses
def extend_energies(target_energies, energies, masses):
"""
Extends masses array with columns of | -------- | random_line_split |
utils.py | hist1[1][j]:
j += 1
masses1.append((b-bins[i]) * hist1[0][j-1])
if b <= hist2[1][0]:
masses2.append(0.)
elif b > hist2[1][-1]:
masses2.append(0.)
else:
j = 0
while b > hist2[1][j]:
j += 1
masses2.append((b-bins[i]) * hist2[0][j-1])
masses1 = np.array(masses1)
masses2 = np.array(masses2)
masses12 = (masses1+masses2)/2
return entropy(masses12) - (entropy(masses1) + entropy(masses2))/2
# @ray.remote
def return_fourier_from_dataset(graph_list, rot_init=settings.rot_init):
"""
Returns the fourier transform of evolution for a list of graphs for
the hamiltonian ising and xy.
Arguments:
---------
- graph_list: list or numpy.Ndarray of networkx.Graph objects
Returns:
--------
- fs_xy: numpy.Ndarray of shape (2, len(graph_list), 1000)
[0,i]: Fourier signal of graph i at 1000 points for
hamiltonian XY
[1,i]: frequencies associated to graph i at 1000 points
for hamiltonian XY
- fs_is: same for the Ising hamiltonian
"""
fs_xy = np.zeros((2, len(graph_list), 1000))
fs_is = np.zeros((2, len(graph_list), 1000))
for i, graph in enumerate(graph_list):
fs_xy[0][i], fs_xy[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='xy')
fs_is[0][i], fs_is[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='ising')
return fs_xy, fs_is
def return_evolution(G, times, pulses, evol='xy'):
"""
Returns the final state after the following evolution:
- start with empty sate with as many qubits as vertices of G
- uniform superposition of all states
- alternating evolution of H_evol during times, and H_m during pulses
Arguments:
---------
- G: graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
Returns:
--------
- state: qutip.Qobj final state of evolution
"""
assert evol in ['xy', 'ising']
assert len(times) == len(pulses)
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=evol)
H_m = generate_mixing_Ham(N_nodes)
state = generate_empty_initial_state(N_nodes)
opts = Options()
opts.store_states = True
result = sesolve(H_m, state, [0, np.pi/4], options=opts)
state = result.states[-1]
for i, theta in enumerate(pulses):
if np.abs(times[i]) > 0:
if evol == 'xy':
result = sesolve(H_evol, state, [0, times[i]], options=opts)
state = result.states[-1]
else:
hexp = (- times[i] * 1j * H_evol).expm()
state = hexp * state
if np.abs(theta) > 0:
result = sesolve(H_m, state, [0, theta], options=opts)
state = result.states[-1]
return state
def return_list_of_states(graphs_list,
times, pulses, evol='xy', verbose=0):
"""
Returns the list of states after evolution for each graph following
return_evolution functions.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
- verbose: int, display the progression every verbose steps
Returns:
--------
- all_states: list of qutip.Qobj final states of evolution,
same lenght as graphs_list
"""
all_states = []
for G in tqdm(graphs_list, disable=verbose==0):
all_states.append(return_evolution(G, times, pulses, evol))
return all_states
def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):
"""
Returns all the discrete probability distributions of a diagonal
observable on a list of states each one associated with a graph. The
observable can be different for each state. The distribution is taken of
all possible values of all observables.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- all_states: list of qutip.Qobj states associated with graphs_list
- observable_func: function(networkx.Graph):
return qtip.Qobj diagonal observable
- return_energies: boolean
Returns:
--------
- all_e_masses: numpy.ndarray of shape (len(graphs_list), N_dim)
all discrete probability distributions
- e_values_unique: numpy.ndarray of shape (N_dim, )
if return_energies, all energies
"""
all_e_distrib = []
all_e_values_unique = []
for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):
if observable_func == None:
observable = generate_Ham_from_graph(
G, type_h='ising', type_ising='z'
)
else:
observable = observable_func(G)
e_values = observable.data.diagonal().real
e_values_unique = np.unique(e_values)
state = all_states[i]
e_distrib = np.zeros(len(e_values_unique))
for j, v in enumerate(e_values_unique):
e_distrib[j] = np.sum(
(np.abs(state.data.toarray()) ** 2)[e_values == v]
)
all_e_distrib.append(e_distrib)
all_e_values_unique.append(e_values_unique)
e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))
all_e_masses = []
for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):
masses = np.zeros_like(e_values_unique)
for d, e in zip(e_distrib, e_values):
masses[e_values_unique == e] = d
all_e_masses.append(masses)
all_e_masses = np.array(all_e_masses)
if return_energies:
return all_e_masses, e_values_unique
return all_e_masses
def extend_energies(target_energies, energies, masses):
"""
Extends masses array with columns of zeros for missing energies.
Arguments:
---------
- target_energies: numpy.ndarray of shape (N_dim, ) target energies
- energies: numpy.ndarray of shape (N_dim_init, ) energies of distributions
- masses: numpy.ndarray of shape (N, N_dim_init) discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N, N_dim)
all extended discrete probability distributions
"""
energies = list(energies)
N = masses.shape[0]
res = np.zeros((N, len(target_energies)))
for i, energy in enumerate(target_energies):
if energy not in energies:
res[:, i] = np.zeros((N, ))
else:
res[:, i] = masses[:, energies.index(energy)]
return res
def merge_energies(e1, m1, e2, m2):
"""
Merge the arrays of energy masses, filling with zeros the missing energies in each.
N_dim is the size of the union of the energies from the two distributions.
Arguments:
---------
- e1: numpy.ndarray of shape (N_dim1, ) energies of first distributions
- m1: numpy.ndarray of shape (N1, N_dim1) first discrete probability distributions
- e2: numpy.ndarray of shape (N_dim2, ) energies of first distributions
- m2: numpy.ndarray of shape (N2, N_dim2) first discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N1, N_dim)
all extended first discrete probability distributions
- numpy.ndarray of shape (N2, N_dim)
all extended second discrete probability distributions
"""
e = sorted(list(set(e1) | set(e2)))
return extend_energies(e, e1, m1), extend_energies(e, e2, m2)
def | return_js_square_matrix | identifier_name | |
instance.go | }
cmdInstance = &Command{
Name: "instance",
Usage: "[OPTION]...",
Summary: "Operations to view instances.",
Subcommands: []*Command{
cmdInstanceListUpdates,
cmdInstanceListAppVersions,
cmdInstanceDeis,
},
}
cmdInstanceListUpdates = &Command{
Name: "instance list-updates",
Usage: "[OPTION]...",
Description: "Generates a list of instance updates.",
Run: instanceListUpdates,
}
cmdInstanceListAppVersions = &Command{
Name: "instance list-app-versions",
Usage: "[OPTION]...",
Description: "Generates a list of apps/versions with instance count.",
Run: instanceListAppVersions,
}
cmdInstanceDeis = &Command{
Name: "instance deis",
Usage: "[OPTION]...",
Description: "Simulate single deis to update instances.",
Run: instanceDeis,
}
)
func init() {
cmdInstanceListUpdates.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListUpdates.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceDeis.Flags.BoolVar(&instanceFlags.verbose, "verbose", false, "Print out the request bodies")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.clientsPerApp, "clients-per-app", 1, "Number of fake fents per appid.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.minSleep, "min-sleep", 5, "Minimum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.maxSleep, "max-sleep", 10, "Maximum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.errorRate, "errorrate", 1, "Chance of error (0-100)%.")
cmdInstanceDeis.Flags.StringVar(&instanceFlags.OEM, "oem", "fakeclient", "oem to report")
// simulate reboot lock.
cmdInstanceDeis.Flags.IntVar(&instanceFlags.pingOnly, "ping-only", 0, "halt update and just send ping requests this many times.")
cmdInstanceDeis.Flags.Var(&instanceFlags.appId, os.Getenv("DEISCTL_APP_ID"), "Application ID to update.")
instanceFlags.appId.required = true
cmdInstanceDeis.Flags.Var(&instanceFlags.groupId, os.Getenv("DEISCTL_GROUP_ID"), "Group ID to update.")
instanceFlags.groupId.required = true
cmdInstanceDeis.Flags.StringVar(&instanceFlags.version, "version", os.Getenv("DEISCTL_APP_VERSION"), "Version to report.")
}
func instanceListUpdates(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Clientupdate.List()
call.DateStart(instanceFlags.start)
call.DateEnd(instanceFlags.end)
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.groupId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tClientID\tVersion\tLastSeen\tGroup\tStatus\tOEM")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", cl.AppId,
cl.ClientId, cl.Version, cl.LastSeen, cl.GroupId,
cl.Status, cl.Oem)
}
out.Flush()
return OK
}
func instanceListAppVersions(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Appversion.List()
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.appId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
if instanceFlags.start != 0 {
call.DateStart(instanceFlags.start)
}
if instanceFlags.end != 0 {
call.DateEnd(instanceFlags.end)
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tGroupID\tVersion\tClients")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%d\n", cl.AppId, cl.GroupId, cl.Version, cl.Count)
}
out.Flush()
return OK
}
//+ downloadDir + "deis.tar.gz"
func expBackoff(interval time.Duration) time.Duration {
interval = interval * 2
if interval > maxInterval {
interval = maxInterval
}
return interval
}
type serverConfig struct {
server string
}
type Client struct {
Id string
SessionId string
Version string
AppId string
Track string
config *serverConfig
errorRate int
pingsRemaining int
}
func (c *Client) Log(format string, v ...interface{}) {
format = c.Id + ": " + format
fmt.Printf(format, v...)
}
func (c *Client) getCodebaseUrl(uc *omaha.UpdateCheck) string {
return uc.Urls.Urls[0].CodeBase
}
func (c *Client) updateservice() {
fmt.Println("starting systemd units")
files, _ := utils.ListFiles(downloadDir + "*.service")
fmt.Println(files)
}
func (c *Client) downloadFromUrl(url, fileName string) (err error) {
url = url + "deis.tar.gz"
fmt.Printf("Downloading %s to %s", url, fileName)
// TODO: check file existence first with io.IsExist
output, err := os.Create(downloadDir + fileName)
if err != nil {
fmt.Println("Error while creating", fileName, "-", err)
return
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
fmt.Println(n, "bytes downloaded.")
return
}
func (c *Client) OmahaRequest(otype, result string, updateCheck, isPing bool) *omaha.Request {
req := omaha.NewRequest("lsb", "CoreOS", "", "")
app := req.AddApp(c.AppId, c.Version)
app.MachineID = c.Id
app.BootId = c.SessionId
app.Track = c.Track
app.OEM = instanceFlags.OEM
if updateCheck {
app.AddUpdateCheck()
}
if isPing {
app.AddPing()
app.Ping.LastReportDays = "1"
app.Ping.Status = "1"
}
if otype != "" |
return req
}
func (c *Client) MakeRequest(otype, result string, updateCheck, isPing bool) (*omaha.Response, error) {
client := &http.Client{}
req := c.OmahaRequest(otype, result, updateCheck, isPing)
raw, err := xml.MarshalIndent(req, "", " ")
if err != nil {
return nil, err
}
resp, err := client.Post(c.config.server+"/v1/update/", "text/xml", bytes.NewReader(raw))
if err != nil {
return nil, err
}
defer resp.Body.Close()
oresp := new(omaha.Response)
err = xml.NewDecoder(resp.Body).Decode(oresp)
if err != nil {
return nil, err
}
if instanceFlags.verbose {
raw, _ := xml.MarshalIndent(req, "", " ")
c.Log("request: %s\n", string(raw))
raw, _ = xml.MarshalIndent(oresp, "", " ")
c.Log("response: %s\n", string(raw))
}
return oresp, nil
}
func (c *Client) SetVersion(resp *omaha.Response) {
// A field can potentially be nil.
defer func() {
if err := recover(); err != nil {
c.Log("%s: error setting version: %v", c.Id, err)
}
}()
uc := resp.Apps[0].UpdateCheck
url := c.getCodebaseUrl(uc)
c.MakeRequest("13", "1", false, false)
c.downloadFromUrl(url, "deis.tar.gz")
utils.Extract(downloadDir+"deis.tar.gz", downloadDir)
c.MakeRequest("14", "1", false, false)
c.updateservice()
fmt.Println("updated done")
c.MakeRequest("3", "1", false | {
event := app.AddEvent()
event.Type = otype
event.Result = result
if result == "0" {
event.ErrorCode = "2000"
} else {
event.ErrorCode = ""
}
} | conditional_block |
instance.go | 0, "End date filter")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceDeis.Flags.BoolVar(&instanceFlags.verbose, "verbose", false, "Print out the request bodies")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.clientsPerApp, "clients-per-app", 1, "Number of fake fents per appid.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.minSleep, "min-sleep", 5, "Minimum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.maxSleep, "max-sleep", 10, "Maximum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.errorRate, "errorrate", 1, "Chance of error (0-100)%.")
cmdInstanceDeis.Flags.StringVar(&instanceFlags.OEM, "oem", "fakeclient", "oem to report")
// simulate reboot lock.
cmdInstanceDeis.Flags.IntVar(&instanceFlags.pingOnly, "ping-only", 0, "halt update and just send ping requests this many times.")
cmdInstanceDeis.Flags.Var(&instanceFlags.appId, os.Getenv("DEISCTL_APP_ID"), "Application ID to update.")
instanceFlags.appId.required = true
cmdInstanceDeis.Flags.Var(&instanceFlags.groupId, os.Getenv("DEISCTL_GROUP_ID"), "Group ID to update.")
instanceFlags.groupId.required = true
cmdInstanceDeis.Flags.StringVar(&instanceFlags.version, "version", os.Getenv("DEISCTL_APP_VERSION"), "Version to report.")
}
func instanceListUpdates(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Clientupdate.List()
call.DateStart(instanceFlags.start)
call.DateEnd(instanceFlags.end)
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.groupId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tClientID\tVersion\tLastSeen\tGroup\tStatus\tOEM")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", cl.AppId,
cl.ClientId, cl.Version, cl.LastSeen, cl.GroupId,
cl.Status, cl.Oem)
}
out.Flush()
return OK
}
func instanceListAppVersions(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Appversion.List()
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.appId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
if instanceFlags.start != 0 {
call.DateStart(instanceFlags.start)
}
if instanceFlags.end != 0 {
call.DateEnd(instanceFlags.end)
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tGroupID\tVersion\tClients")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%d\n", cl.AppId, cl.GroupId, cl.Version, cl.Count)
}
out.Flush()
return OK
}
//+ downloadDir + "deis.tar.gz"
func expBackoff(interval time.Duration) time.Duration {
interval = interval * 2
if interval > maxInterval {
interval = maxInterval
}
return interval
}
type serverConfig struct {
server string
}
type Client struct {
Id string
SessionId string
Version string
AppId string
Track string
config *serverConfig
errorRate int
pingsRemaining int
}
func (c *Client) Log(format string, v ...interface{}) {
format = c.Id + ": " + format
fmt.Printf(format, v...)
}
func (c *Client) getCodebaseUrl(uc *omaha.UpdateCheck) string {
return uc.Urls.Urls[0].CodeBase
}
func (c *Client) updateservice() {
fmt.Println("starting systemd units")
files, _ := utils.ListFiles(downloadDir + "*.service")
fmt.Println(files)
}
func (c *Client) downloadFromUrl(url, fileName string) (err error) {
url = url + "deis.tar.gz"
fmt.Printf("Downloading %s to %s", url, fileName)
// TODO: check file existence first with io.IsExist
output, err := os.Create(downloadDir + fileName)
if err != nil {
fmt.Println("Error while creating", fileName, "-", err)
return
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
fmt.Println(n, "bytes downloaded.")
return
}
func (c *Client) OmahaRequest(otype, result string, updateCheck, isPing bool) *omaha.Request {
req := omaha.NewRequest("lsb", "CoreOS", "", "")
app := req.AddApp(c.AppId, c.Version)
app.MachineID = c.Id
app.BootId = c.SessionId
app.Track = c.Track
app.OEM = instanceFlags.OEM
if updateCheck {
app.AddUpdateCheck()
}
if isPing {
app.AddPing()
app.Ping.LastReportDays = "1"
app.Ping.Status = "1"
}
if otype != "" {
event := app.AddEvent()
event.Type = otype
event.Result = result
if result == "0" {
event.ErrorCode = "2000"
} else {
event.ErrorCode = ""
}
}
return req
}
func (c *Client) MakeRequest(otype, result string, updateCheck, isPing bool) (*omaha.Response, error) {
client := &http.Client{}
req := c.OmahaRequest(otype, result, updateCheck, isPing)
raw, err := xml.MarshalIndent(req, "", " ")
if err != nil {
return nil, err
}
resp, err := client.Post(c.config.server+"/v1/update/", "text/xml", bytes.NewReader(raw))
if err != nil {
return nil, err
}
defer resp.Body.Close()
oresp := new(omaha.Response)
err = xml.NewDecoder(resp.Body).Decode(oresp)
if err != nil {
return nil, err
}
if instanceFlags.verbose {
raw, _ := xml.MarshalIndent(req, "", " ")
c.Log("request: %s\n", string(raw))
raw, _ = xml.MarshalIndent(oresp, "", " ")
c.Log("response: %s\n", string(raw))
}
return oresp, nil
}
func (c *Client) SetVersion(resp *omaha.Response) {
// A field can potentially be nil.
defer func() {
if err := recover(); err != nil {
c.Log("%s: error setting version: %v", c.Id, err)
}
}()
uc := resp.Apps[0].UpdateCheck
url := c.getCodebaseUrl(uc)
c.MakeRequest("13", "1", false, false)
c.downloadFromUrl(url, "deis.tar.gz")
utils.Extract(downloadDir+"deis.tar.gz", downloadDir)
c.MakeRequest("14", "1", false, false)
c.updateservice()
fmt.Println("updated done")
c.MakeRequest("3", "1", false, false)
// installed
fmt.Println("updated done")
// simulate reboot lock for a while
for c.pingsRemaining > 0 {
c.MakeRequest("", "", false, true)
c.pingsRemaining--
time.Sleep(1 * time.Second)
}
c.Log("updated from %s to %s\n", c.Version, uc.Manifest.Version)
c.Version = uc.Manifest.Version
_, err := c.MakeRequest("3", "2", false, false) // Send complete with new version.
if err != nil {
log.Println(err)
}
c.SessionId = uuid.New()
}
// Sleep between n and m seconds
func (c *Client) Loop(n, m int) {
interval := initialInterval
for {
randSleep(n, m)
resp, err := c.MakeRequest("3", "2", true, false)
if err != nil {
log.Println(err)
continue
}
uc := resp.Apps[0].UpdateCheck
if uc.Status != "ok" {
c.Log("update check status: %s\n", uc.Status)
} else {
c.SetVersion(resp)
}
}
}
// Sleeps randomly between n and m seconds.
func randSleep(n, m int) | {
r := m
if m-n > 0 {
r = rand.Intn(m-n) + n
}
time.Sleep(time.Duration(r) * time.Second)
} | identifier_body | |
instance.go | }
cmdInstance = &Command{
Name: "instance",
Usage: "[OPTION]...",
Summary: "Operations to view instances.",
Subcommands: []*Command{
cmdInstanceListUpdates,
cmdInstanceListAppVersions,
cmdInstanceDeis,
},
}
cmdInstanceListUpdates = &Command{
Name: "instance list-updates",
Usage: "[OPTION]...",
Description: "Generates a list of instance updates.",
Run: instanceListUpdates,
}
cmdInstanceListAppVersions = &Command{
Name: "instance list-app-versions",
Usage: "[OPTION]...",
Description: "Generates a list of apps/versions with instance count.",
Run: instanceListAppVersions,
}
cmdInstanceDeis = &Command{
Name: "instance deis",
Usage: "[OPTION]...",
Description: "Simulate single deis to update instances.",
Run: instanceDeis,
}
)
func init() {
cmdInstanceListUpdates.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListUpdates.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceDeis.Flags.BoolVar(&instanceFlags.verbose, "verbose", false, "Print out the request bodies")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.clientsPerApp, "clients-per-app", 1, "Number of fake fents per appid.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.minSleep, "min-sleep", 5, "Minimum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.maxSleep, "max-sleep", 10, "Maximum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.errorRate, "errorrate", 1, "Chance of error (0-100)%.")
cmdInstanceDeis.Flags.StringVar(&instanceFlags.OEM, "oem", "fakeclient", "oem to report")
// simulate reboot lock.
cmdInstanceDeis.Flags.IntVar(&instanceFlags.pingOnly, "ping-only", 0, "halt update and just send ping requests this many times.")
cmdInstanceDeis.Flags.Var(&instanceFlags.appId, os.Getenv("DEISCTL_APP_ID"), "Application ID to update.")
instanceFlags.appId.required = true
cmdInstanceDeis.Flags.Var(&instanceFlags.groupId, os.Getenv("DEISCTL_GROUP_ID"), "Group ID to update.")
instanceFlags.groupId.required = true
cmdInstanceDeis.Flags.StringVar(&instanceFlags.version, "version", os.Getenv("DEISCTL_APP_VERSION"), "Version to report.")
}
func instanceListUpdates(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Clientupdate.List()
call.DateStart(instanceFlags.start)
call.DateEnd(instanceFlags.end)
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.groupId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tClientID\tVersion\tLastSeen\tGroup\tStatus\tOEM")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", cl.AppId,
cl.ClientId, cl.Version, cl.LastSeen, cl.GroupId,
cl.Status, cl.Oem)
}
out.Flush()
return OK
}
func instanceListAppVersions(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Appversion.List()
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.appId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
if instanceFlags.start != 0 {
call.DateStart(instanceFlags.start)
}
if instanceFlags.end != 0 {
call.DateEnd(instanceFlags.end)
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tGroupID\tVersion\tClients")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%d\n", cl.AppId, cl.GroupId, cl.Version, cl.Count)
}
out.Flush()
return OK
}
//+ downloadDir + "deis.tar.gz"
func expBackoff(interval time.Duration) time.Duration {
interval = interval * 2
if interval > maxInterval {
interval = maxInterval
}
return interval
}
type serverConfig struct {
server string
}
type Client struct {
Id string
SessionId string
Version string
AppId string
Track string
config *serverConfig
errorRate int
pingsRemaining int
}
func (c *Client) Log(format string, v ...interface{}) {
format = c.Id + ": " + format
fmt.Printf(format, v...)
}
func (c *Client) getCodebaseUrl(uc *omaha.UpdateCheck) string {
return uc.Urls.Urls[0].CodeBase
}
func (c *Client) updateservice() {
fmt.Println("starting systemd units")
files, _ := utils.ListFiles(downloadDir + "*.service")
fmt.Println(files)
}
func (c *Client) downloadFromUrl(url, fileName string) (err error) {
url = url + "deis.tar.gz"
fmt.Printf("Downloading %s to %s", url, fileName)
// TODO: check file existence first with io.IsExist
output, err := os.Create(downloadDir + fileName)
if err != nil {
fmt.Println("Error while creating", fileName, "-", err)
return
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
fmt.Println(n, "bytes downloaded.")
return
}
func (c *Client) | (otype, result string, updateCheck, isPing bool) *omaha.Request {
req := omaha.NewRequest("lsb", "CoreOS", "", "")
app := req.AddApp(c.AppId, c.Version)
app.MachineID = c.Id
app.BootId = c.SessionId
app.Track = c.Track
app.OEM = instanceFlags.OEM
if updateCheck {
app.AddUpdateCheck()
}
if isPing {
app.AddPing()
app.Ping.LastReportDays = "1"
app.Ping.Status = "1"
}
if otype != "" {
event := app.AddEvent()
event.Type = otype
event.Result = result
if result == "0" {
event.ErrorCode = "2000"
} else {
event.ErrorCode = ""
}
}
return req
}
func (c *Client) MakeRequest(otype, result string, updateCheck, isPing bool) (*omaha.Response, error) {
client := &http.Client{}
req := c.OmahaRequest(otype, result, updateCheck, isPing)
raw, err := xml.MarshalIndent(req, "", " ")
if err != nil {
return nil, err
}
resp, err := client.Post(c.config.server+"/v1/update/", "text/xml", bytes.NewReader(raw))
if err != nil {
return nil, err
}
defer resp.Body.Close()
oresp := new(omaha.Response)
err = xml.NewDecoder(resp.Body).Decode(oresp)
if err != nil {
return nil, err
}
if instanceFlags.verbose {
raw, _ := xml.MarshalIndent(req, "", " ")
c.Log("request: %s\n", string(raw))
raw, _ = xml.MarshalIndent(oresp, "", " ")
c.Log("response: %s\n", string(raw))
}
return oresp, nil
}
func (c *Client) SetVersion(resp *omaha.Response) {
// A field can potentially be nil.
defer func() {
if err := recover(); err != nil {
c.Log("%s: error setting version: %v", c.Id, err)
}
}()
uc := resp.Apps[0].UpdateCheck
url := c.getCodebaseUrl(uc)
c.MakeRequest("13", "1", false, false)
c.downloadFromUrl(url, "deis.tar.gz")
utils.Extract(downloadDir+"deis.tar.gz", downloadDir)
c.MakeRequest("14", "1", false, false)
c.updateservice()
fmt.Println("updated done")
c.MakeRequest("3", "1", false | OmahaRequest | identifier_name |
instance.go | }
cmdInstance = &Command{
Name: "instance",
Usage: "[OPTION]...",
Summary: "Operations to view instances.",
Subcommands: []*Command{
cmdInstanceListUpdates,
cmdInstanceListAppVersions,
cmdInstanceDeis,
},
}
cmdInstanceListUpdates = &Command{
Name: "instance list-updates",
Usage: "[OPTION]...",
Description: "Generates a list of instance updates.",
Run: instanceListUpdates,
}
cmdInstanceListAppVersions = &Command{
Name: "instance list-app-versions",
Usage: "[OPTION]...",
Description: "Generates a list of apps/versions with instance count.",
Run: instanceListAppVersions,
}
cmdInstanceDeis = &Command{
Name: "instance deis",
Usage: "[OPTION]...",
Description: "Simulate single deis to update instances.",
Run: instanceDeis,
}
)
func init() {
cmdInstanceListUpdates.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListUpdates.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListUpdates.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.groupId, "group-id", "Group id")
cmdInstanceListAppVersions.Flags.Var(&instanceFlags.appId, "app-id", "App id")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.start, "start", 0, "Start date filter")
cmdInstanceListAppVersions.Flags.Int64Var(&instanceFlags.end, "end", 0, "End date filter")
cmdInstanceDeis.Flags.BoolVar(&instanceFlags.verbose, "verbose", false, "Print out the request bodies")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.clientsPerApp, "clients-per-app", 1, "Number of fake fents per appid.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.minSleep, "min-sleep", 5, "Minimum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.maxSleep, "max-sleep", 10, "Maximum time between update checks.")
cmdInstanceDeis.Flags.IntVar(&instanceFlags.errorRate, "errorrate", 1, "Chance of error (0-100)%.")
cmdInstanceDeis.Flags.StringVar(&instanceFlags.OEM, "oem", "fakeclient", "oem to report")
// simulate reboot lock.
cmdInstanceDeis.Flags.IntVar(&instanceFlags.pingOnly, "ping-only", 0, "halt update and just send ping requests this many times.")
cmdInstanceDeis.Flags.Var(&instanceFlags.appId, os.Getenv("DEISCTL_APP_ID"), "Application ID to update.")
instanceFlags.appId.required = true
cmdInstanceDeis.Flags.Var(&instanceFlags.groupId, os.Getenv("DEISCTL_GROUP_ID"), "Group ID to update.")
instanceFlags.groupId.required = true
cmdInstanceDeis.Flags.StringVar(&instanceFlags.version, "version", os.Getenv("DEISCTL_APP_VERSION"), "Version to report.")
}
func instanceListUpdates(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Clientupdate.List()
call.DateStart(instanceFlags.start)
call.DateEnd(instanceFlags.end)
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.groupId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tClientID\tVersion\tLastSeen\tGroup\tStatus\tOEM")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", cl.AppId,
cl.ClientId, cl.Version, cl.LastSeen, cl.GroupId,
cl.Status, cl.Oem)
}
out.Flush()
return OK
}
func instanceListAppVersions(args []string, service *update.Service, out *tabwriter.Writer) int {
call := service.Appversion.List()
if instanceFlags.groupId.Get() != nil {
call.GroupId(instanceFlags.groupId.String())
}
if instanceFlags.appId.Get() != nil {
call.AppId(instanceFlags.appId.String())
}
if instanceFlags.start != 0 {
call.DateStart(instanceFlags.start)
}
if instanceFlags.end != 0 {
call.DateEnd(instanceFlags.end)
}
list, err := call.Do()
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(out, "AppID\tGroupID\tVersion\tClients")
for _, cl := range list.Items {
fmt.Fprintf(out, "%s\t%s\t%s\t%d\n", cl.AppId, cl.GroupId, cl.Version, cl.Count)
}
out.Flush()
return OK
}
//+ downloadDir + "deis.tar.gz"
func expBackoff(interval time.Duration) time.Duration {
interval = interval * 2
if interval > maxInterval {
interval = maxInterval
}
return interval
}
type serverConfig struct {
server string
}
type Client struct {
Id string | errorRate int
pingsRemaining int
}
func (c *Client) Log(format string, v ...interface{}) {
format = c.Id + ": " + format
fmt.Printf(format, v...)
}
func (c *Client) getCodebaseUrl(uc *omaha.UpdateCheck) string {
return uc.Urls.Urls[0].CodeBase
}
func (c *Client) updateservice() {
fmt.Println("starting systemd units")
files, _ := utils.ListFiles(downloadDir + "*.service")
fmt.Println(files)
}
func (c *Client) downloadFromUrl(url, fileName string) (err error) {
url = url + "deis.tar.gz"
fmt.Printf("Downloading %s to %s", url, fileName)
// TODO: check file existence first with io.IsExist
output, err := os.Create(downloadDir + fileName)
if err != nil {
fmt.Println("Error while creating", fileName, "-", err)
return
}
defer output.Close()
response, err := http.Get(url)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
defer response.Body.Close()
n, err := io.Copy(output, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
fmt.Println(n, "bytes downloaded.")
return
}
func (c *Client) OmahaRequest(otype, result string, updateCheck, isPing bool) *omaha.Request {
req := omaha.NewRequest("lsb", "CoreOS", "", "")
app := req.AddApp(c.AppId, c.Version)
app.MachineID = c.Id
app.BootId = c.SessionId
app.Track = c.Track
app.OEM = instanceFlags.OEM
if updateCheck {
app.AddUpdateCheck()
}
if isPing {
app.AddPing()
app.Ping.LastReportDays = "1"
app.Ping.Status = "1"
}
if otype != "" {
event := app.AddEvent()
event.Type = otype
event.Result = result
if result == "0" {
event.ErrorCode = "2000"
} else {
event.ErrorCode = ""
}
}
return req
}
func (c *Client) MakeRequest(otype, result string, updateCheck, isPing bool) (*omaha.Response, error) {
client := &http.Client{}
req := c.OmahaRequest(otype, result, updateCheck, isPing)
raw, err := xml.MarshalIndent(req, "", " ")
if err != nil {
return nil, err
}
resp, err := client.Post(c.config.server+"/v1/update/", "text/xml", bytes.NewReader(raw))
if err != nil {
return nil, err
}
defer resp.Body.Close()
oresp := new(omaha.Response)
err = xml.NewDecoder(resp.Body).Decode(oresp)
if err != nil {
return nil, err
}
if instanceFlags.verbose {
raw, _ := xml.MarshalIndent(req, "", " ")
c.Log("request: %s\n", string(raw))
raw, _ = xml.MarshalIndent(oresp, "", " ")
c.Log("response: %s\n", string(raw))
}
return oresp, nil
}
func (c *Client) SetVersion(resp *omaha.Response) {
// A field can potentially be nil.
defer func() {
if err := recover(); err != nil {
c.Log("%s: error setting version: %v", c.Id, err)
}
}()
uc := resp.Apps[0].UpdateCheck
url := c.getCodebaseUrl(uc)
c.MakeRequest("13", "1", false, false)
c.downloadFromUrl(url, "deis.tar.gz")
utils.Extract(downloadDir+"deis.tar.gz", downloadDir)
c.MakeRequest("14", "1", false, false)
c.updateservice()
fmt.Println("updated done")
c.MakeRequest("3", "1", false, false)
| SessionId string
Version string
AppId string
Track string
config *serverConfig | random_line_split |
peer.rs | && version.start_height >= self.min_start_height
&& version.services & (NODE_BITCOIN_CASH | NODE_NETWORK) != 0
}
}
/// Node on the network to send and receive messages
///
/// It will setup a connection, respond to pings, and store basic properties about the connection,
/// but any real logic to process messages will be handled outside. Network messages received will
/// be published to an observable on the peer's receiver thread. Messages may be sent via send()
/// from any thread. Once shutdown, the Peer may no longer be used.
pub struct Peer {
/// Unique id for this connection
pub id: ProcessUniqueId,
/// IP address
pub ip: IpAddr,
/// Port
pub port: u16,
/// Network
pub network: Network,
pub(crate) connected_event: Single<PeerConnected>,
pub(crate) disconnected_event: Single<PeerDisconnected>,
pub(crate) messages: Subject<PeerMessage>,
tcp_writer: Mutex<Option<TcpStream>>,
connected: AtomicBool,
time_delta: Mutex<i64>,
minfee: Mutex<u64>,
sendheaders: AtomicBool,
sendcmpct: AtomicBool,
version: Mutex<Option<Version>>,
/// Weak reference to self so we can pass ourselves in emitted events. This is a
/// bit ugly, but we hopefully can able to remove it once arbitrary self types goes in.
weak_self: Mutex<Option<Weak<Peer>>>,
}
impl Peer {
/// Creates a new peer and begins connecting
pub fn connect(
ip: IpAddr,
port: u16,
network: Network,
version: Version,
filter: Arc<dyn PeerFilter>,
) -> Arc<Peer> {
let peer = Arc::new(Peer {
id: ProcessUniqueId::new(),
ip,
port,
network,
connected_event: Single::new(),
disconnected_event: Single::new(),
messages: Subject::new(),
tcp_writer: Mutex::new(None),
connected: AtomicBool::new(false),
time_delta: Mutex::new(0),
minfee: Mutex::new(0),
sendheaders: AtomicBool::new(false),
sendcmpct: AtomicBool::new(false),
version: Mutex::new(None),
weak_self: Mutex::new(None),
});
*peer.weak_self.lock().unwrap() = Some(Arc::downgrade(&peer));
Peer::connect_internal(&peer, version, filter);
peer
}
/// Sends a message to the peer
pub fn send(&self, message: &Message) -> Result<()> {
if !self.connected.load(Ordering::Relaxed) {
return Err(Error::IllegalState("Not connected".to_string()));
}
let mut io_error: Option<io::Error> = None;
{
let mut tcp_writer = self.tcp_writer.lock().unwrap();
let mut tcp_writer = match tcp_writer.as_mut() {
Some(tcp_writer) => tcp_writer,
None => return Err(Error::IllegalState("No tcp stream".to_string())),
};
debug!("{:?} Write {:#?}", self, message);
if let Err(e) = message.write(&mut tcp_writer, self.network.magic()) {
io_error = Some(e);
} else {
if let Err(e) = tcp_writer.flush() {
io_error = Some(e);
}
}
}
match io_error {
Some(e) => {
self.disconnect();
Err(Error::IOError(e))
}
None => Ok(()),
}
}
/// Disconects and disables the peer
pub fn disconnect(&self) {
self.connected.swap(false, Ordering::Relaxed);
info!("{:?} Disconnecting", self);
let mut tcp_stream = self.tcp_writer.lock().unwrap();
if let Some(tcp_stream) = tcp_stream.as_mut() {
if let Err(e) = tcp_stream.shutdown(Shutdown::Both) {
warn!("{:?} Problem shutting down tcp stream: {:?}", self, e);
}
}
if let Some(peer) = self.strong_self() {
self.disconnected_event.next(&PeerDisconnected { peer });
}
}
/// Returns a Single that emits a message when connected
pub fn connected_event(&self) -> &impl Observable<PeerConnected> {
&self.connected_event
}
/// Returns a Single that emits a message when connected
pub fn disconnected_event(&self) -> &impl Observable<PeerDisconnected> {
&self.disconnected_event
}
/// Returns an Observable that emits network messages
pub fn messages(&self) -> &impl Observable<PeerMessage> {
&self.messages
}
/// Returns whether the peer is connected
pub fn connected(&self) -> bool {
self.connected.load(Ordering::Relaxed)
}
/// Returns the time difference in seconds between our time and theirs, which is valid after connecting
pub fn time_delta(&self) -> i64 {
*self.time_delta.lock().unwrap()
}
/// Returns the minimum fee this peer accepts in sats/1000bytes
pub fn minfee(&self) -> u64 {
*self.minfee.lock().unwrap()
}
/// Returns whether this peer may announce new blocks with headers instead of inv
pub fn sendheaders(&self) -> bool {
self.sendheaders.load(Ordering::Relaxed)
}
/// Returns whether compact blocks are supported
pub fn sendcmpct(&self) -> bool {
self.sendcmpct.load(Ordering::Relaxed)
}
/// Gets the version message received during the handshake
pub fn version(&self) -> Result<Version> {
match &*self.version.lock().unwrap() {
Some(ref version) => Ok(version.clone()),
None => Err(Error::IllegalState("Not connected".to_string())),
}
}
fn connect_internal(peer: &Arc<Peer>, version: Version, filter: Arc<dyn PeerFilter>) {
info!("{:?} Connecting to {:?}:{}", peer, peer.ip, peer.port);
let tpeer = peer.clone();
thread::spawn(move || {
let mut tcp_reader = match tpeer.handshake(version, filter) {
Ok(tcp_stream) => tcp_stream,
Err(e) => {
error!("Failed to complete handshake: {:?}", e);
tpeer.disconnect();
return;
}
};
// The peer is considered connected and may be written to now
info!("{:?} Connected to {:?}:{}", tpeer, tpeer.ip, tpeer.port);
tpeer.connected.store(true, Ordering::Relaxed);
tpeer.connected_event.next(&PeerConnected {
peer: tpeer.clone(),
});
let mut partial: Option<MessageHeader> = None;
let magic = tpeer.network.magic();
// Message reads over TCP must be all-or-nothing.
let mut tcp_reader = AtomicReader::new(&mut tcp_reader);
loop {
let message = match &partial {
Some(header) => Message::read_partial(&mut tcp_reader, header),
None => Message::read(&mut tcp_reader, magic),
};
// Always check the connected flag right after the blocking read so we exit right away,
// and also so that we don't mistake errors with the stream shutting down
if !tpeer.connected.load(Ordering::Relaxed) {
return;
}
match message {
Ok(message) => {
if let Message::Partial(header) = message {
partial = Some(header);
} else {
debug!("{:?} Read {:#?}", tpeer, message);
partial = None;
if let Err(e) = tpeer.handle_message(&message) {
error!("{:?} Error handling message: {:?}", tpeer, e);
tpeer.disconnect();
return;
}
tpeer.messages.next(&PeerMessage {
peer: tpeer.clone(),
message,
});
}
}
Err(e) => {
// If timeout, try again later. Otherwise, shutdown
if let Error::IOError(ref e) = e {
// Depending on platform, either TimedOut or WouldBlock may be returned to indicate a non-error timeout
if e.kind() == io::ErrorKind::TimedOut
|| e.kind() == io::ErrorKind::WouldBlock
{
continue;
}
}
error!("{:?} Error reading message {:?}", tpeer, e);
tpeer.disconnect();
return;
}
}
}
});
}
fn handshake(self: &Peer, version: Version, filter: Arc<dyn PeerFilter>) -> Result<TcpStream> | {
// Connect over TCP
let tcp_addr = SocketAddr::new(self.ip, self.port);
let mut tcp_stream = TcpStream::connect_timeout(&tcp_addr, CONNECT_TIMEOUT)?;
tcp_stream.set_nodelay(true)?; // Disable buffering
tcp_stream.set_read_timeout(Some(HANDSHAKE_READ_TIMEOUT))?;
tcp_stream.set_nonblocking(false)?;
// Write our version
let our_version = Message::Version(version);
debug!("{:?} Write {:#?}", self, our_version);
let magic = self.network.magic();
our_version.write(&mut tcp_stream, magic)?;
// Read their version
let msg = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, msg);
let their_version = match msg {
Message::Version(version) => version,
_ => return Err(Error::BadData("Unexpected command".to_string())), | identifier_body | |
peer.rs | all peers except for Bitcoin SV full nodes
#[derive(Clone, Default, Debug)]
pub struct SVPeerFilter {
pub min_start_height: i32,
}
impl SVPeerFilter {
/// Creates a new SV filter that requires a minimum starting chain height
pub fn new(min_start_height: i32) -> Arc<SVPeerFilter> {
Arc::new(SVPeerFilter { min_start_height })
}
}
impl PeerFilter for SVPeerFilter {
fn connectable(&self, version: &Version) -> bool {
version.user_agent.contains("Bitcoin SV")
&& version.start_height >= self.min_start_height
&& version.services & (NODE_BITCOIN_CASH | NODE_NETWORK) != 0
}
}
/// Node on the network to send and receive messages
///
/// It will setup a connection, respond to pings, and store basic properties about the connection,
/// but any real logic to process messages will be handled outside. Network messages received will
/// be published to an observable on the peer's receiver thread. Messages may be sent via send()
/// from any thread. Once shutdown, the Peer may no longer be used.
pub struct Peer {
/// Unique id for this connection
pub id: ProcessUniqueId,
/// IP address
pub ip: IpAddr,
/// Port
pub port: u16,
/// Network
pub network: Network,
pub(crate) connected_event: Single<PeerConnected>,
pub(crate) disconnected_event: Single<PeerDisconnected>,
pub(crate) messages: Subject<PeerMessage>,
tcp_writer: Mutex<Option<TcpStream>>,
connected: AtomicBool,
time_delta: Mutex<i64>,
minfee: Mutex<u64>,
sendheaders: AtomicBool,
sendcmpct: AtomicBool,
version: Mutex<Option<Version>>,
/// Weak reference to self so we can pass ourselves in emitted events. This is a
/// bit ugly, but we hopefully can able to remove it once arbitrary self types goes in.
weak_self: Mutex<Option<Weak<Peer>>>,
}
impl Peer {
/// Creates a new peer and begins connecting
pub fn connect(
ip: IpAddr,
port: u16,
network: Network,
version: Version,
filter: Arc<dyn PeerFilter>,
) -> Arc<Peer> {
let peer = Arc::new(Peer {
id: ProcessUniqueId::new(),
ip,
port,
network,
connected_event: Single::new(),
disconnected_event: Single::new(),
messages: Subject::new(),
tcp_writer: Mutex::new(None),
connected: AtomicBool::new(false),
time_delta: Mutex::new(0),
minfee: Mutex::new(0),
sendheaders: AtomicBool::new(false),
sendcmpct: AtomicBool::new(false),
version: Mutex::new(None),
weak_self: Mutex::new(None),
});
*peer.weak_self.lock().unwrap() = Some(Arc::downgrade(&peer));
Peer::connect_internal(&peer, version, filter);
peer
}
/// Sends a message to the peer
pub fn send(&self, message: &Message) -> Result<()> {
if !self.connected.load(Ordering::Relaxed) {
return Err(Error::IllegalState("Not connected".to_string()));
}
let mut io_error: Option<io::Error> = None;
{
let mut tcp_writer = self.tcp_writer.lock().unwrap();
let mut tcp_writer = match tcp_writer.as_mut() {
Some(tcp_writer) => tcp_writer,
None => return Err(Error::IllegalState("No tcp stream".to_string())),
};
debug!("{:?} Write {:#?}", self, message);
if let Err(e) = message.write(&mut tcp_writer, self.network.magic()) {
io_error = Some(e);
} else {
if let Err(e) = tcp_writer.flush() {
io_error = Some(e);
}
}
}
match io_error {
Some(e) => {
self.disconnect();
Err(Error::IOError(e))
}
None => Ok(()),
}
}
/// Disconects and disables the peer
pub fn disconnect(&self) {
self.connected.swap(false, Ordering::Relaxed);
info!("{:?} Disconnecting", self);
let mut tcp_stream = self.tcp_writer.lock().unwrap();
if let Some(tcp_stream) = tcp_stream.as_mut() {
if let Err(e) = tcp_stream.shutdown(Shutdown::Both) {
warn!("{:?} Problem shutting down tcp stream: {:?}", self, e);
}
}
if let Some(peer) = self.strong_self() {
self.disconnected_event.next(&PeerDisconnected { peer });
}
}
/// Returns a Single that emits a message when connected
pub fn connected_event(&self) -> &impl Observable<PeerConnected> {
&self.connected_event
}
/// Returns a Single that emits a message when connected
pub fn disconnected_event(&self) -> &impl Observable<PeerDisconnected> {
&self.disconnected_event
}
/// Returns an Observable that emits network messages
pub fn messages(&self) -> &impl Observable<PeerMessage> {
&self.messages
}
/// Returns whether the peer is connected
pub fn connected(&self) -> bool {
self.connected.load(Ordering::Relaxed)
}
/// Returns the time difference in seconds between our time and theirs, which is valid after connecting
pub fn time_delta(&self) -> i64 {
*self.time_delta.lock().unwrap()
}
/// Returns the minimum fee this peer accepts in sats/1000bytes
pub fn minfee(&self) -> u64 {
*self.minfee.lock().unwrap()
}
/// Returns whether this peer may announce new blocks with headers instead of inv
pub fn sendheaders(&self) -> bool {
self.sendheaders.load(Ordering::Relaxed)
}
/// Returns whether compact blocks are supported
pub fn sendcmpct(&self) -> bool {
self.sendcmpct.load(Ordering::Relaxed)
}
/// Gets the version message received during the handshake
pub fn version(&self) -> Result<Version> {
match &*self.version.lock().unwrap() {
Some(ref version) => Ok(version.clone()),
None => Err(Error::IllegalState("Not connected".to_string())),
}
}
fn connect_internal(peer: &Arc<Peer>, version: Version, filter: Arc<dyn PeerFilter>) {
info!("{:?} Connecting to {:?}:{}", peer, peer.ip, peer.port);
let tpeer = peer.clone();
thread::spawn(move || {
let mut tcp_reader = match tpeer.handshake(version, filter) {
Ok(tcp_stream) => tcp_stream,
Err(e) => {
error!("Failed to complete handshake: {:?}", e);
tpeer.disconnect();
return;
}
};
// The peer is considered connected and may be written to now
info!("{:?} Connected to {:?}:{}", tpeer, tpeer.ip, tpeer.port);
tpeer.connected.store(true, Ordering::Relaxed);
tpeer.connected_event.next(&PeerConnected {
peer: tpeer.clone(),
});
let mut partial: Option<MessageHeader> = None;
let magic = tpeer.network.magic();
// Message reads over TCP must be all-or-nothing.
let mut tcp_reader = AtomicReader::new(&mut tcp_reader);
loop {
let message = match &partial {
Some(header) => Message::read_partial(&mut tcp_reader, header), | // Always check the connected flag right after the blocking read so we exit right away,
// and also so that we don't mistake errors with the stream shutting down
if !tpeer.connected.load(Ordering::Relaxed) {
return;
}
match message {
Ok(message) => {
if let Message::Partial(header) = message {
partial = Some(header);
} else {
debug!("{:?} Read {:#?}", tpeer, message);
partial = None;
if let Err(e) = tpeer.handle_message(&message) {
error!("{:?} Error handling message: {:?}", tpeer, e);
tpeer.disconnect();
return;
}
tpeer.messages.next(&PeerMessage {
peer: tpeer.clone(),
message,
});
}
}
Err(e) => {
// If timeout, try again later. Otherwise, shutdown
if let Error::IOError(ref e) = e {
// Depending on platform, either TimedOut or WouldBlock may be returned to indicate a non-error timeout
if e.kind() == io::ErrorKind::TimedOut
|| e.kind() == io::ErrorKind::WouldBlock
{
continue;
}
}
error!("{:?} Error reading message {:?}", tpeer, e);
tpeer.disconnect();
return;
}
}
}
});
}
fn handshake(self: &Peer, version: Version, filter: Arc<dyn PeerFilter>) -> Result<TcpStream> {
// Connect over TCP
let tcp_addr = SocketAddr::new(self.ip, self.port);
let mut tcp_stream = TcpStream::connect_timeout(&tcp_addr, CONNECT_TIMEOUT)?;
tcp_stream.set_nodelay(true)?; // Disable buffering
tcp_stream.set_read_timeout(Some(HANDSHAKE_READ_TIMEOUT))?;
tcp | None => Message::read(&mut tcp_reader, magic),
};
| random_line_split |
peer.rs | all peers except for Bitcoin SV full nodes
#[derive(Clone, Default, Debug)]
pub struct SVPeerFilter {
pub min_start_height: i32,
}
impl SVPeerFilter {
/// Creates a new SV filter that requires a minimum starting chain height
pub fn new(min_start_height: i32) -> Arc<SVPeerFilter> {
Arc::new(SVPeerFilter { min_start_height })
}
}
impl PeerFilter for SVPeerFilter {
fn connectable(&self, version: &Version) -> bool {
version.user_agent.contains("Bitcoin SV")
&& version.start_height >= self.min_start_height
&& version.services & (NODE_BITCOIN_CASH | NODE_NETWORK) != 0
}
}
/// Node on the network to send and receive messages
///
/// It will setup a connection, respond to pings, and store basic properties about the connection,
/// but any real logic to process messages will be handled outside. Network messages received will
/// be published to an observable on the peer's receiver thread. Messages may be sent via send()
/// from any thread. Once shutdown, the Peer may no longer be used.
pub struct Peer {
/// Unique id for this connection
pub id: ProcessUniqueId,
/// IP address
pub ip: IpAddr,
/// Port
pub port: u16,
/// Network
pub network: Network,
pub(crate) connected_event: Single<PeerConnected>,
pub(crate) disconnected_event: Single<PeerDisconnected>,
pub(crate) messages: Subject<PeerMessage>,
tcp_writer: Mutex<Option<TcpStream>>,
connected: AtomicBool,
time_delta: Mutex<i64>,
minfee: Mutex<u64>,
sendheaders: AtomicBool,
sendcmpct: AtomicBool,
version: Mutex<Option<Version>>,
/// Weak reference to self so we can pass ourselves in emitted events. This is a
/// bit ugly, but we hopefully can able to remove it once arbitrary self types goes in.
weak_self: Mutex<Option<Weak<Peer>>>,
}
impl Peer {
/// Creates a new peer and begins connecting
pub fn connect(
ip: IpAddr,
port: u16,
network: Network,
version: Version,
filter: Arc<dyn PeerFilter>,
) -> Arc<Peer> {
let peer = Arc::new(Peer {
id: ProcessUniqueId::new(),
ip,
port,
network,
connected_event: Single::new(),
disconnected_event: Single::new(),
messages: Subject::new(),
tcp_writer: Mutex::new(None),
connected: AtomicBool::new(false),
time_delta: Mutex::new(0),
minfee: Mutex::new(0),
sendheaders: AtomicBool::new(false),
sendcmpct: AtomicBool::new(false),
version: Mutex::new(None),
weak_self: Mutex::new(None),
});
*peer.weak_self.lock().unwrap() = Some(Arc::downgrade(&peer));
Peer::connect_internal(&peer, version, filter);
peer
}
/// Sends a message to the peer
pub fn send(&self, message: &Message) -> Result<()> {
if !self.connected.load(Ordering::Relaxed) {
return Err(Error::IllegalState("Not connected".to_string()));
}
let mut io_error: Option<io::Error> = None;
{
let mut tcp_writer = self.tcp_writer.lock().unwrap();
let mut tcp_writer = match tcp_writer.as_mut() {
Some(tcp_writer) => tcp_writer,
None => return Err(Error::IllegalState("No tcp stream".to_string())),
};
debug!("{:?} Write {:#?}", self, message);
if let Err(e) = message.write(&mut tcp_writer, self.network.magic()) {
io_error = Some(e);
} else {
if let Err(e) = tcp_writer.flush() {
io_error = Some(e);
}
}
}
match io_error {
Some(e) => {
self.disconnect();
Err(Error::IOError(e))
}
None => Ok(()),
}
}
/// Disconects and disables the peer
pub fn disconnect(&self) {
self.connected.swap(false, Ordering::Relaxed);
info!("{:?} Disconnecting", self);
let mut tcp_stream = self.tcp_writer.lock().unwrap();
if let Some(tcp_stream) = tcp_stream.as_mut() {
if let Err(e) = tcp_stream.shutdown(Shutdown::Both) {
warn!("{:?} Problem shutting down tcp stream: {:?}", self, e);
}
}
if let Some(peer) = self.strong_self() {
self.disconnected_event.next(&PeerDisconnected { peer });
}
}
/// Returns a Single that emits a message when connected
pub fn connected_event(&self) -> &impl Observable<PeerConnected> {
&self.connected_event
}
/// Returns a Single that emits a message when connected
pub fn disconnected_event(&self) -> &impl Observable<PeerDisconnected> {
&self.disconnected_event
}
/// Returns an Observable that emits network messages
pub fn messages(&self) -> &impl Observable<PeerMessage> {
&self.messages
}
/// Returns whether the peer is connected
pub fn connected(&self) -> bool {
self.connected.load(Ordering::Relaxed)
}
/// Returns the time difference in seconds between our time and theirs, which is valid after connecting
pub fn time_delta(&self) -> i64 {
*self.time_delta.lock().unwrap()
}
/// Returns the minimum fee this peer accepts in sats/1000bytes
pub fn minfee(&self) -> u64 {
*self.minfee.lock().unwrap()
}
/// Returns whether this peer may announce new blocks with headers instead of inv
pub fn sendheaders(&self) -> bool {
self.sendheaders.load(Ordering::Relaxed)
}
/// Returns whether compact blocks are supported
pub fn sendcmpct(&self) -> bool {
self.sendcmpct.load(Ordering::Relaxed)
}
/// Gets the version message received during the handshake
pub fn | (&self) -> Result<Version> {
match &*self.version.lock().unwrap() {
Some(ref version) => Ok(version.clone()),
None => Err(Error::IllegalState("Not connected".to_string())),
}
}
fn connect_internal(peer: &Arc<Peer>, version: Version, filter: Arc<dyn PeerFilter>) {
info!("{:?} Connecting to {:?}:{}", peer, peer.ip, peer.port);
let tpeer = peer.clone();
thread::spawn(move || {
let mut tcp_reader = match tpeer.handshake(version, filter) {
Ok(tcp_stream) => tcp_stream,
Err(e) => {
error!("Failed to complete handshake: {:?}", e);
tpeer.disconnect();
return;
}
};
// The peer is considered connected and may be written to now
info!("{:?} Connected to {:?}:{}", tpeer, tpeer.ip, tpeer.port);
tpeer.connected.store(true, Ordering::Relaxed);
tpeer.connected_event.next(&PeerConnected {
peer: tpeer.clone(),
});
let mut partial: Option<MessageHeader> = None;
let magic = tpeer.network.magic();
// Message reads over TCP must be all-or-nothing.
let mut tcp_reader = AtomicReader::new(&mut tcp_reader);
loop {
let message = match &partial {
Some(header) => Message::read_partial(&mut tcp_reader, header),
None => Message::read(&mut tcp_reader, magic),
};
// Always check the connected flag right after the blocking read so we exit right away,
// and also so that we don't mistake errors with the stream shutting down
if !tpeer.connected.load(Ordering::Relaxed) {
return;
}
match message {
Ok(message) => {
if let Message::Partial(header) = message {
partial = Some(header);
} else {
debug!("{:?} Read {:#?}", tpeer, message);
partial = None;
if let Err(e) = tpeer.handle_message(&message) {
error!("{:?} Error handling message: {:?}", tpeer, e);
tpeer.disconnect();
return;
}
tpeer.messages.next(&PeerMessage {
peer: tpeer.clone(),
message,
});
}
}
Err(e) => {
// If timeout, try again later. Otherwise, shutdown
if let Error::IOError(ref e) = e {
// Depending on platform, either TimedOut or WouldBlock may be returned to indicate a non-error timeout
if e.kind() == io::ErrorKind::TimedOut
|| e.kind() == io::ErrorKind::WouldBlock
{
continue;
}
}
error!("{:?} Error reading message {:?}", tpeer, e);
tpeer.disconnect();
return;
}
}
}
});
}
fn handshake(self: &Peer, version: Version, filter: Arc<dyn PeerFilter>) -> Result<TcpStream> {
// Connect over TCP
let tcp_addr = SocketAddr::new(self.ip, self.port);
let mut tcp_stream = TcpStream::connect_timeout(&tcp_addr, CONNECT_TIMEOUT)?;
tcp_stream.set_nodelay(true)?; // Disable buffering
tcp_stream.set_read_timeout(Some(HANDSHAKE_READ_TIMEOUT))?;
| version | identifier_name |
peer.rs | id for this connection
pub id: ProcessUniqueId,
/// IP address
pub ip: IpAddr,
/// Port
pub port: u16,
/// Network
pub network: Network,
pub(crate) connected_event: Single<PeerConnected>,
pub(crate) disconnected_event: Single<PeerDisconnected>,
pub(crate) messages: Subject<PeerMessage>,
tcp_writer: Mutex<Option<TcpStream>>,
connected: AtomicBool,
time_delta: Mutex<i64>,
minfee: Mutex<u64>,
sendheaders: AtomicBool,
sendcmpct: AtomicBool,
version: Mutex<Option<Version>>,
/// Weak reference to self so we can pass ourselves in emitted events. This is a
/// bit ugly, but we hopefully can able to remove it once arbitrary self types goes in.
weak_self: Mutex<Option<Weak<Peer>>>,
}
impl Peer {
/// Creates a new peer and begins connecting
pub fn connect(
ip: IpAddr,
port: u16,
network: Network,
version: Version,
filter: Arc<dyn PeerFilter>,
) -> Arc<Peer> {
let peer = Arc::new(Peer {
id: ProcessUniqueId::new(),
ip,
port,
network,
connected_event: Single::new(),
disconnected_event: Single::new(),
messages: Subject::new(),
tcp_writer: Mutex::new(None),
connected: AtomicBool::new(false),
time_delta: Mutex::new(0),
minfee: Mutex::new(0),
sendheaders: AtomicBool::new(false),
sendcmpct: AtomicBool::new(false),
version: Mutex::new(None),
weak_self: Mutex::new(None),
});
*peer.weak_self.lock().unwrap() = Some(Arc::downgrade(&peer));
Peer::connect_internal(&peer, version, filter);
peer
}
/// Sends a message to the peer
pub fn send(&self, message: &Message) -> Result<()> {
if !self.connected.load(Ordering::Relaxed) {
return Err(Error::IllegalState("Not connected".to_string()));
}
let mut io_error: Option<io::Error> = None;
{
let mut tcp_writer = self.tcp_writer.lock().unwrap();
let mut tcp_writer = match tcp_writer.as_mut() {
Some(tcp_writer) => tcp_writer,
None => return Err(Error::IllegalState("No tcp stream".to_string())),
};
debug!("{:?} Write {:#?}", self, message);
if let Err(e) = message.write(&mut tcp_writer, self.network.magic()) {
io_error = Some(e);
} else {
if let Err(e) = tcp_writer.flush() {
io_error = Some(e);
}
}
}
match io_error {
Some(e) => {
self.disconnect();
Err(Error::IOError(e))
}
None => Ok(()),
}
}
/// Disconects and disables the peer
pub fn disconnect(&self) {
self.connected.swap(false, Ordering::Relaxed);
info!("{:?} Disconnecting", self);
let mut tcp_stream = self.tcp_writer.lock().unwrap();
if let Some(tcp_stream) = tcp_stream.as_mut() {
if let Err(e) = tcp_stream.shutdown(Shutdown::Both) {
warn!("{:?} Problem shutting down tcp stream: {:?}", self, e);
}
}
if let Some(peer) = self.strong_self() {
self.disconnected_event.next(&PeerDisconnected { peer });
}
}
/// Returns a Single that emits a message when connected
pub fn connected_event(&self) -> &impl Observable<PeerConnected> {
&self.connected_event
}
/// Returns a Single that emits a message when connected
pub fn disconnected_event(&self) -> &impl Observable<PeerDisconnected> {
&self.disconnected_event
}
/// Returns an Observable that emits network messages
pub fn messages(&self) -> &impl Observable<PeerMessage> {
&self.messages
}
/// Returns whether the peer is connected
pub fn connected(&self) -> bool {
self.connected.load(Ordering::Relaxed)
}
/// Returns the time difference in seconds between our time and theirs, which is valid after connecting
pub fn time_delta(&self) -> i64 {
*self.time_delta.lock().unwrap()
}
/// Returns the minimum fee this peer accepts in sats/1000bytes
pub fn minfee(&self) -> u64 {
*self.minfee.lock().unwrap()
}
/// Returns whether this peer may announce new blocks with headers instead of inv
pub fn sendheaders(&self) -> bool {
self.sendheaders.load(Ordering::Relaxed)
}
/// Returns whether compact blocks are supported
pub fn sendcmpct(&self) -> bool {
self.sendcmpct.load(Ordering::Relaxed)
}
/// Gets the version message received during the handshake
pub fn version(&self) -> Result<Version> {
match &*self.version.lock().unwrap() {
Some(ref version) => Ok(version.clone()),
None => Err(Error::IllegalState("Not connected".to_string())),
}
}
fn connect_internal(peer: &Arc<Peer>, version: Version, filter: Arc<dyn PeerFilter>) {
info!("{:?} Connecting to {:?}:{}", peer, peer.ip, peer.port);
let tpeer = peer.clone();
thread::spawn(move || {
let mut tcp_reader = match tpeer.handshake(version, filter) {
Ok(tcp_stream) => tcp_stream,
Err(e) => {
error!("Failed to complete handshake: {:?}", e);
tpeer.disconnect();
return;
}
};
// The peer is considered connected and may be written to now
info!("{:?} Connected to {:?}:{}", tpeer, tpeer.ip, tpeer.port);
tpeer.connected.store(true, Ordering::Relaxed);
tpeer.connected_event.next(&PeerConnected {
peer: tpeer.clone(),
});
let mut partial: Option<MessageHeader> = None;
let magic = tpeer.network.magic();
// Message reads over TCP must be all-or-nothing.
let mut tcp_reader = AtomicReader::new(&mut tcp_reader);
loop {
let message = match &partial {
Some(header) => Message::read_partial(&mut tcp_reader, header),
None => Message::read(&mut tcp_reader, magic),
};
// Always check the connected flag right after the blocking read so we exit right away,
// and also so that we don't mistake errors with the stream shutting down
if !tpeer.connected.load(Ordering::Relaxed) {
return;
}
match message {
Ok(message) => {
if let Message::Partial(header) = message {
partial = Some(header);
} else {
debug!("{:?} Read {:#?}", tpeer, message);
partial = None;
if let Err(e) = tpeer.handle_message(&message) {
error!("{:?} Error handling message: {:?}", tpeer, e);
tpeer.disconnect();
return;
}
tpeer.messages.next(&PeerMessage {
peer: tpeer.clone(),
message,
});
}
}
Err(e) => {
// If timeout, try again later. Otherwise, shutdown
if let Error::IOError(ref e) = e {
// Depending on platform, either TimedOut or WouldBlock may be returned to indicate a non-error timeout
if e.kind() == io::ErrorKind::TimedOut
|| e.kind() == io::ErrorKind::WouldBlock
{
continue;
}
}
error!("{:?} Error reading message {:?}", tpeer, e);
tpeer.disconnect();
return;
}
}
}
});
}
fn handshake(self: &Peer, version: Version, filter: Arc<dyn PeerFilter>) -> Result<TcpStream> {
// Connect over TCP
let tcp_addr = SocketAddr::new(self.ip, self.port);
let mut tcp_stream = TcpStream::connect_timeout(&tcp_addr, CONNECT_TIMEOUT)?;
tcp_stream.set_nodelay(true)?; // Disable buffering
tcp_stream.set_read_timeout(Some(HANDSHAKE_READ_TIMEOUT))?;
tcp_stream.set_nonblocking(false)?;
// Write our version
let our_version = Message::Version(version);
debug!("{:?} Write {:#?}", self, our_version);
let magic = self.network.magic();
our_version.write(&mut tcp_stream, magic)?;
// Read their version
let msg = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, msg);
let their_version = match msg {
Message::Version(version) => version,
_ => return Err(Error::BadData("Unexpected command".to_string())),
};
if !filter.connectable(&their_version) {
return Err(Error::IllegalState("Peer filtered out".to_string()));
}
let now = secs_since(UNIX_EPOCH) as i64;
*self.time_delta.lock().unwrap() = now - their_version.timestamp;
*self.version.lock().unwrap() = Some(their_version);
// Read their verack
let their_verack = Message::read(&mut tcp_stream, magic)?;
debug!("{:?} Read {:#?}", self, their_verack);
match their_verack {
Message::Verack => | {} | conditional_block | |
mod.rs | extension must be set in
/// `enabled_extensions`.
///
/// # Panics
///
/// - Panics if the `message_severity` or `message_type` members of any element of
/// `debug_utils_messengers` are empty.
///
/// # Safety
///
/// - The `user_callback` of each element of `debug_utils_messengers` must not make any calls
/// to the Vulkan API.
pub unsafe fn with_debug_utils_messengers(
library: Arc<VulkanLibrary>,
create_info: InstanceCreateInfo,
debug_utils_messengers: impl IntoIterator<Item = DebugUtilsMessengerCreateInfo>,
) -> Result<Arc<Instance>, InstanceCreationError> {
let InstanceCreateInfo {
application_name,
application_version,
mut enabled_extensions,
enabled_layers,
engine_name,
engine_version,
max_api_version,
enumerate_portability,
enabled_validation_features,
disabled_validation_features,
_ne: _,
} = create_info;
let (api_version, max_api_version) = {
let api_version = library.api_version();
let max_api_version = if let Some(max_api_version) = max_api_version {
max_api_version
} else if api_version < Version::V1_1 {
api_version
} else {
Version::HEADER_VERSION
};
(std::cmp::min(max_api_version, api_version), max_api_version)
};
// VUID-VkApplicationInfo-apiVersion-04010
assert!(max_api_version >= Version::V1_0);
let supported_extensions =
library.supported_extensions_with_layers(enabled_layers.iter().map(String::as_str))?;
let mut flags = ash::vk::InstanceCreateFlags::empty();
if enumerate_portability && supported_extensions.khr_portability_enumeration {
enabled_extensions.khr_portability_enumeration = true;
flags |= ash::vk::InstanceCreateFlags::ENUMERATE_PORTABILITY_KHR;
}
// Check if the extensions are correct
enabled_extensions.check_requirements(&supported_extensions, api_version)?;
// FIXME: check whether each layer is supported
let enabled_layers_cstr: Vec<CString> = enabled_layers
.iter()
.map(|name| CString::new(name.clone()).unwrap())
.collect();
let enabled_layers_ptrs = enabled_layers_cstr
.iter()
.map(|layer| layer.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let enabled_extensions_cstr: Vec<CString> = (&enabled_extensions).into();
let enabled_extensions_ptrs = enabled_extensions_cstr
.iter()
.map(|extension| extension.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let application_name_cstr = application_name.map(|name| CString::new(name).unwrap());
let engine_name_cstr = engine_name.map(|name| CString::new(name).unwrap());
let application_info = ash::vk::ApplicationInfo {
p_application_name: application_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
application_version: application_version
.try_into()
.expect("Version out of range"),
p_engine_name: engine_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
engine_version: engine_version.try_into().expect("Version out of range"),
api_version: max_api_version.try_into().expect("Version out of range"),
..Default::default()
};
let enable_validation_features_vk: SmallVec<[_; 5]> = enabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let disable_validation_features_vk: SmallVec<[_; 8]> = disabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let mut create_info_vk = ash::vk::InstanceCreateInfo {
flags,
p_application_info: &application_info,
enabled_layer_count: enabled_layers_ptrs.len() as u32,
pp_enabled_layer_names: enabled_layers_ptrs.as_ptr(),
enabled_extension_count: enabled_extensions_ptrs.len() as u32,
pp_enabled_extension_names: enabled_extensions_ptrs.as_ptr(),
..Default::default()
};
let mut validation_features_vk = None;
if !enabled_validation_features.is_empty() || !disabled_validation_features.is_empty() {
if !enabled_extensions.ext_validation_features {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.enabled_validation_features` or \
`create_info.disabled_validation_features` are not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_validation_features"],
..Default::default()
},
});
}
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02967
assert!(
!enabled_validation_features
.contains(&ValidationFeatureEnable::GpuAssistedReserveBindingSlot)
|| enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted)
);
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02968
assert!(
!(enabled_validation_features.contains(&ValidationFeatureEnable::DebugPrintf)
&& enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted))
);
let next = validation_features_vk.insert(ash::vk::ValidationFeaturesEXT {
enabled_validation_feature_count: enable_validation_features_vk.len() as u32,
p_enabled_validation_features: enable_validation_features_vk.as_ptr(),
disabled_validation_feature_count: disable_validation_features_vk.len() as u32,
p_disabled_validation_features: disable_validation_features_vk.as_ptr(),
..Default::default()
});
next.p_next = create_info_vk.p_next;
create_info_vk.p_next = next as *const _ as *const _;
}
// Handle debug messengers
let debug_utils_messengers = debug_utils_messengers.into_iter();
let mut debug_utils_messenger_create_infos =
Vec::with_capacity(debug_utils_messengers.size_hint().0);
let mut user_callbacks = Vec::with_capacity(debug_utils_messengers.size_hint().0);
for create_info in debug_utils_messengers {
let DebugUtilsMessengerCreateInfo {
message_type,
message_severity,
user_callback,
_ne: _,
} = create_info;
// VUID-VkInstanceCreateInfo-pNext-04926
if !enabled_extensions.ext_debug_utils {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.debug_utils_messengers` is not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_debug_utils"],
..Default::default()
},
});
}
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-parameter
// TODO: message_severity.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-requiredbitmask
assert!(!message_severity.is_empty());
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-parameter
// TODO: message_type.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-requiredbitmask
assert!(!message_type.is_empty());
// VUID-PFN_vkDebugUtilsMessengerCallbackEXT-None-04769
// Can't be checked, creation is unsafe.
let user_callback = Box::new(user_callback);
let create_info = ash::vk::DebugUtilsMessengerCreateInfoEXT {
flags: ash::vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: message_severity.into(),
message_type: message_type.into(),
pfn_user_callback: Some(trampoline),
p_user_data: &*user_callback as &Arc<_> as *const Arc<_> as *const c_void as *mut _,
..Default::default()
};
debug_utils_messenger_create_infos.push(create_info);
user_callbacks.push(user_callback);
}
for i in 1..debug_utils_messenger_create_infos.len() {
debug_utils_messenger_create_infos[i - 1].p_next =
&debug_utils_messenger_create_infos[i] as *const _ as *const _;
}
if let Some(info) = debug_utils_messenger_create_infos.first() {
create_info_vk.p_next = info as *const _ as *const _;
}
// Creating the Vulkan instance.
let handle = {
let mut output = MaybeUninit::uninit();
let fns = library.fns();
(fns.v1_0.create_instance)(&create_info_vk, ptr::null(), output.as_mut_ptr())
.result()
.map_err(VulkanError::from)?;
output.assume_init()
};
// Loading the function pointers of the newly-created instance.
let fns = {
InstanceFunctions::load(|name| {
library
.get_instance_proc_addr(handle, name.as_ptr())
.map_or(ptr::null(), |func| func as _)
})
};
Ok(Arc::new(Instance {
handle,
fns,
id: Self::next_id(),
api_version,
enabled_extensions,
enabled_layers,
library,
max_api_version,
_user_callbacks: user_callbacks,
}))
} |
/// Returns the Vulkan library used to create this instance.
#[inline] | random_line_split | |
mod.rs | self.enabled_extensions
}
/// Returns the layers that have been enabled on the instance.
#[inline]
pub fn enabled_layers(&self) -> &[String] {
&self.enabled_layers
}
/// Returns an iterator that enumerates the physical devices available.
///
/// # Examples
///
/// ```no_run
/// # use vulkano::{
/// # instance::{Instance, InstanceExtensions},
/// # Version, VulkanLibrary,
/// # };
///
/// # let library = VulkanLibrary::new().unwrap();
/// # let instance = Instance::new(library, Default::default()).unwrap();
/// for physical_device in instance.enumerate_physical_devices().unwrap() {
/// println!("Available device: {}", physical_device.properties().device_name);
/// }
/// ```
pub fn enumerate_physical_devices(
self: &Arc<Self>,
) -> Result<impl ExactSizeIterator<Item = Arc<PhysicalDevice>>, VulkanError> {
let fns = self.fns();
unsafe {
let handles = loop {
let mut count = 0;
(fns.v1_0.enumerate_physical_devices)(self.handle, &mut count, ptr::null_mut())
.result()
.map_err(VulkanError::from)?;
let mut handles = Vec::with_capacity(count as usize);
let result = (fns.v1_0.enumerate_physical_devices)(
self.handle,
&mut count,
handles.as_mut_ptr(),
);
match result {
ash::vk::Result::SUCCESS => {
handles.set_len(count as usize);
break handles;
}
ash::vk::Result::INCOMPLETE => (),
err => return Err(VulkanError::from(err)),
}
};
let physical_devices: SmallVec<[_; 4]> = handles
.into_iter()
.map(|handle| PhysicalDevice::from_handle(self.clone(), handle))
.collect::<Result<_, _>>()?;
Ok(physical_devices.into_iter())
}
}
}
impl Drop for Instance {
#[inline]
fn drop(&mut self) {
let fns = self.fns();
unsafe {
(fns.v1_0.destroy_instance)(self.handle, ptr::null());
}
}
}
unsafe impl VulkanObject for Instance {
type Handle = ash::vk::Instance;
#[inline]
fn handle(&self) -> Self::Handle {
self.handle
}
}
crate::impl_id_counter!(Instance);
impl Debug for Instance {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
let Self {
handle,
fns,
id: _,
api_version,
enabled_extensions,
enabled_layers,
library: function_pointers,
max_api_version,
_user_callbacks: _,
} = self;
f.debug_struct("Instance")
.field("handle", handle)
.field("fns", fns)
.field("api_version", api_version)
.field("enabled_extensions", enabled_extensions)
.field("enabled_layers", enabled_layers)
.field("function_pointers", function_pointers)
.field("max_api_version", max_api_version)
.finish_non_exhaustive()
}
}
/// Parameters to create a new `Instance`.
#[derive(Debug)]
pub struct InstanceCreateInfo {
/// A string of your choice stating the name of your application.
///
/// The default value is `None`.
pub application_name: Option<String>,
/// A version number of your choice specifying the version of your application.
///
/// The default value is zero.
pub application_version: Version,
/// The extensions to enable on the instance.
///
/// The default value is [`InstanceExtensions::empty()`].
pub enabled_extensions: InstanceExtensions,
/// The layers to enable on the instance.
///
/// The default value is empty.
pub enabled_layers: Vec<String>,
/// A string of your choice stating the name of the engine used to power the application.
pub engine_name: Option<String>,
/// A version number of your choice specifying the version of the engine used to power the
/// application.
///
/// The default value is zero.
pub engine_version: Version,
/// The highest Vulkan API version that the application will use with the instance.
///
/// Usually, you will want to leave this at the default.
///
/// The default value is [`Version::HEADER_VERSION`], but if the
/// supported instance version is 1.0, then it will be 1.0.
pub max_api_version: Option<Version>,
/// Include [portability subset](crate::instance#portability-subset-devices-and-the-enumerate_portability-flag)
/// devices when enumerating physical devices.
///
/// If you enable this flag, you must ensure that your program is prepared to handle the
/// non-conformant aspects of these devices.
///
/// If this flag is not enabled, and there are no fully-conformant devices on the system, then
/// [`Instance::new`] will return an `IncompatibleDriver` error.
///
/// The default value is `false`.
///
/// # Notes
///
/// If this flag is enabled, and the
/// [`khr_portability_enumeration`](crate::instance::InstanceExtensions::khr_portability_enumeration)
/// extension is supported, it will be enabled automatically when creating the instance.
/// If the extension is not supported, this flag will be ignored.
pub enumerate_portability: bool,
/// Features of the validation layer to enable.
///
/// If not empty, the
/// [`ext_validation_features`](crate::instance::InstanceExtensions::ext_validation_features)
/// extension must be enabled on the instance.
pub enabled_validation_features: Vec<ValidationFeatureEnable>,
/// Features of the validation layer to disable.
///
/// If not empty, the
/// [`ext_validation_features`](crate::instance::InstanceExtensions::ext_validation_features)
/// extension must be enabled on the instance.
pub disabled_validation_features: Vec<ValidationFeatureDisable>,
pub _ne: crate::NonExhaustive,
}
impl Default for InstanceCreateInfo {
#[inline]
fn default() -> Self {
Self {
application_name: None,
application_version: Version::major_minor(0, 0),
enabled_extensions: InstanceExtensions::empty(),
enabled_layers: Vec::new(),
engine_name: None,
engine_version: Version::major_minor(0, 0),
max_api_version: None,
enumerate_portability: false,
enabled_validation_features: Vec::new(),
disabled_validation_features: Vec::new(),
_ne: crate::NonExhaustive(()),
}
}
}
impl InstanceCreateInfo {
/// Returns an `InstanceCreateInfo` with the `application_name` and `application_version` set
/// from information in your crate's Cargo.toml file.
///
/// # Panics
///
/// - Panics if the required environment variables are missing, which happens if the project
/// wasn't built by Cargo.
#[inline]
pub fn application_from_cargo_toml() -> Self {
Self {
application_name: Some(env!("CARGO_PKG_NAME").to_owned()),
application_version: Version {
major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(),
},
..Default::default()
}
}
}
/// Error that can happen when creating an instance.
#[derive(Clone, Debug)]
pub enum InstanceCreationError {
/// Not enough memory.
OomError(OomError),
/// Failed to initialize for an implementation-specific reason.
InitializationFailed,
/// One of the requested layers is missing.
LayerNotPresent,
/// One of the requested extensions is not supported by the implementation.
ExtensionNotPresent,
/// The version requested is not supported by the implementation.
IncompatibleDriver,
/// A restriction for an extension was not met.
ExtensionRestrictionNotMet(ExtensionRestrictionError),
RequirementNotMet {
required_for: &'static str,
requires_one_of: RequiresOneOf,
},
}
impl Error for InstanceCreationError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::OomError(err) => Some(err),
_ => None,
}
}
}
impl Display for InstanceCreationError {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
match self {
Self::OomError(_) => write!(f, "not enough memory available"),
Self::InitializationFailed => write!(f, "initialization failed"),
Self::LayerNotPresent => write!(f, "layer not present"),
Self::ExtensionNotPresent => write!(f, "extension not present"),
Self::IncompatibleDriver => write!(f, "incompatible driver"),
Self::ExtensionRestrictionNotMet(err) => Display::fmt(err, f),
Self::RequirementNotMet {
required_for,
requires_one_of,
} => write!(
f,
"a requirement was not met for: {}; requires one of: {}",
required_for, requires_one_of,
),
}
}
}
impl From<OomError> for InstanceCreationError {
fn | from | identifier_name | |
mod.rs | _version) = max_api_version {
max_api_version
} else if api_version < Version::V1_1 {
api_version
} else {
Version::HEADER_VERSION
};
(std::cmp::min(max_api_version, api_version), max_api_version)
};
// VUID-VkApplicationInfo-apiVersion-04010
assert!(max_api_version >= Version::V1_0);
let supported_extensions =
library.supported_extensions_with_layers(enabled_layers.iter().map(String::as_str))?;
let mut flags = ash::vk::InstanceCreateFlags::empty();
if enumerate_portability && supported_extensions.khr_portability_enumeration {
enabled_extensions.khr_portability_enumeration = true;
flags |= ash::vk::InstanceCreateFlags::ENUMERATE_PORTABILITY_KHR;
}
// Check if the extensions are correct
enabled_extensions.check_requirements(&supported_extensions, api_version)?;
// FIXME: check whether each layer is supported
let enabled_layers_cstr: Vec<CString> = enabled_layers
.iter()
.map(|name| CString::new(name.clone()).unwrap())
.collect();
let enabled_layers_ptrs = enabled_layers_cstr
.iter()
.map(|layer| layer.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let enabled_extensions_cstr: Vec<CString> = (&enabled_extensions).into();
let enabled_extensions_ptrs = enabled_extensions_cstr
.iter()
.map(|extension| extension.as_ptr())
.collect::<SmallVec<[_; 2]>>();
let application_name_cstr = application_name.map(|name| CString::new(name).unwrap());
let engine_name_cstr = engine_name.map(|name| CString::new(name).unwrap());
let application_info = ash::vk::ApplicationInfo {
p_application_name: application_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
application_version: application_version
.try_into()
.expect("Version out of range"),
p_engine_name: engine_name_cstr
.as_ref()
.map(|s| s.as_ptr())
.unwrap_or(ptr::null()),
engine_version: engine_version.try_into().expect("Version out of range"),
api_version: max_api_version.try_into().expect("Version out of range"),
..Default::default()
};
let enable_validation_features_vk: SmallVec<[_; 5]> = enabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let disable_validation_features_vk: SmallVec<[_; 8]> = disabled_validation_features
.iter()
.copied()
.map(Into::into)
.collect();
let mut create_info_vk = ash::vk::InstanceCreateInfo {
flags,
p_application_info: &application_info,
enabled_layer_count: enabled_layers_ptrs.len() as u32,
pp_enabled_layer_names: enabled_layers_ptrs.as_ptr(),
enabled_extension_count: enabled_extensions_ptrs.len() as u32,
pp_enabled_extension_names: enabled_extensions_ptrs.as_ptr(),
..Default::default()
};
let mut validation_features_vk = None;
if !enabled_validation_features.is_empty() || !disabled_validation_features.is_empty() {
if !enabled_extensions.ext_validation_features {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.enabled_validation_features` or \
`create_info.disabled_validation_features` are not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_validation_features"],
..Default::default()
},
});
}
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02967
assert!(
!enabled_validation_features
.contains(&ValidationFeatureEnable::GpuAssistedReserveBindingSlot)
|| enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted)
);
// VUID-VkValidationFeaturesEXT-pEnabledValidationFeatures-02968
assert!(
!(enabled_validation_features.contains(&ValidationFeatureEnable::DebugPrintf)
&& enabled_validation_features.contains(&ValidationFeatureEnable::GpuAssisted))
);
let next = validation_features_vk.insert(ash::vk::ValidationFeaturesEXT {
enabled_validation_feature_count: enable_validation_features_vk.len() as u32,
p_enabled_validation_features: enable_validation_features_vk.as_ptr(),
disabled_validation_feature_count: disable_validation_features_vk.len() as u32,
p_disabled_validation_features: disable_validation_features_vk.as_ptr(),
..Default::default()
});
next.p_next = create_info_vk.p_next;
create_info_vk.p_next = next as *const _ as *const _;
}
// Handle debug messengers
let debug_utils_messengers = debug_utils_messengers.into_iter();
let mut debug_utils_messenger_create_infos =
Vec::with_capacity(debug_utils_messengers.size_hint().0);
let mut user_callbacks = Vec::with_capacity(debug_utils_messengers.size_hint().0);
for create_info in debug_utils_messengers {
let DebugUtilsMessengerCreateInfo {
message_type,
message_severity,
user_callback,
_ne: _,
} = create_info;
// VUID-VkInstanceCreateInfo-pNext-04926
if !enabled_extensions.ext_debug_utils {
return Err(InstanceCreationError::RequirementNotMet {
required_for: "`create_info.debug_utils_messengers` is not empty",
requires_one_of: RequiresOneOf {
instance_extensions: &["ext_debug_utils"],
..Default::default()
},
});
}
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-parameter
// TODO: message_severity.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageSeverity-requiredbitmask
assert!(!message_severity.is_empty());
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-parameter
// TODO: message_type.validate_instance()?;
// VUID-VkDebugUtilsMessengerCreateInfoEXT-messageType-requiredbitmask
assert!(!message_type.is_empty());
// VUID-PFN_vkDebugUtilsMessengerCallbackEXT-None-04769
// Can't be checked, creation is unsafe.
let user_callback = Box::new(user_callback);
let create_info = ash::vk::DebugUtilsMessengerCreateInfoEXT {
flags: ash::vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: message_severity.into(),
message_type: message_type.into(),
pfn_user_callback: Some(trampoline),
p_user_data: &*user_callback as &Arc<_> as *const Arc<_> as *const c_void as *mut _,
..Default::default()
};
debug_utils_messenger_create_infos.push(create_info);
user_callbacks.push(user_callback);
}
for i in 1..debug_utils_messenger_create_infos.len() {
debug_utils_messenger_create_infos[i - 1].p_next =
&debug_utils_messenger_create_infos[i] as *const _ as *const _;
}
if let Some(info) = debug_utils_messenger_create_infos.first() {
create_info_vk.p_next = info as *const _ as *const _;
}
// Creating the Vulkan instance.
let handle = {
let mut output = MaybeUninit::uninit();
let fns = library.fns();
(fns.v1_0.create_instance)(&create_info_vk, ptr::null(), output.as_mut_ptr())
.result()
.map_err(VulkanError::from)?;
output.assume_init()
};
// Loading the function pointers of the newly-created instance.
let fns = {
InstanceFunctions::load(|name| {
library
.get_instance_proc_addr(handle, name.as_ptr())
.map_or(ptr::null(), |func| func as _)
})
};
Ok(Arc::new(Instance {
handle,
fns,
id: Self::next_id(),
api_version,
enabled_extensions,
enabled_layers,
library,
max_api_version,
_user_callbacks: user_callbacks,
}))
}
/// Returns the Vulkan library used to create this instance.
#[inline]
pub fn library(&self) -> &Arc<VulkanLibrary> {
&self.library
}
/// Returns the Vulkan version supported by the instance.
///
/// This is the lower of the
/// [driver's supported version](crate::VulkanLibrary::api_version) and
/// [`max_api_version`](Instance::max_api_version).
#[inline]
pub fn api_version(&self) -> Version {
self.api_version
}
/// Returns the maximum Vulkan version that was specified when creating the instance.
#[inline]
pub fn max_api_version(&self) -> Version {
self.max_api_version
}
/// Returns pointers to the raw Vulkan functions of the instance.
#[inline]
pub fn fns(&self) -> &InstanceFunctions {
&self.fns
}
/// Returns the extensions that have been enabled on the instance.
#[inline]
pub fn enabled_extensions(&self) -> &InstanceExtensions {
&self.enabled_extensions
}
/// Returns the layers that have been enabled on the instance.
#[inline]
pub fn enabled_layers(&self) -> &[String] | {
&self.enabled_layers
} | identifier_body | |
copy_check.py | try:
dirList.remove(copied)
except:
continue
#removing folder names begining with '.' and '$'
withDot=[i for i in dirList if i.startswith('.')]
withDol=[i for i in dirList if i.startswith('$')]
dirList = [item for item in dirList if item not in withDot]
dirList = [item for item in dirList if item not in withDol]
for folderName in dirList:
subjFolder = os.path.join(backUpFrom,folderName)
stat = os.stat(subjFolder)
created = os.stat(subjFolder).st_mtime
asciiTime = time.asctime( time.gmtime( created ) )
print '''
------------------------------------
------{0}
created on ( {1} )
------------------------------------
'''.format(folderName,asciiTime)
response = raw_input('\nIs this the name of the subject you want to back up? [Yes/No/Quit/noCall] :')
if re.search('[yY]|[yY][Ee][Ss]',response):
backUpAppend(subjFolder)
elif re.search('[Dd][Oo][Nn][Ee]|stop|[Qq][Uu][Ii][Tt]|exit',response):
break
elif re.search('[Nn][Oo][Cc][Aa][Ll][Ll]',response):
alreadyCopied.append(folderName)
post_check(backUpFrom)
else:
continue
def backUpAppend(subjFolder):
print '\n'
#countFile contains the tuples of image name and count number
#countFile=[(image name,count number)]
groupName,countFile=countCheck(subjFolder)
#groupName=(group,baseline)
subjInitial,fullname,subjNum=getName(subjFolder)
#if it is a follow up study
if groupName[1]=='baseline':
targetDir=os.path.join(backUpTo,groupName[0])
#For BADUK
if groupName[0]=='BADUK':
cntpro=raw_input('\tCNT / PRO ? :')
targetDir=os.path.join(backUpTo,groupName[0]+'/'+cntpro.upper())
maxNum = maxGroupNum(targetDir)
else:
targetDir=os.path.join(backUpTo,groupName[0])
targetDir=os.path.join(targetDir,'Follow_up')
maxNum = maxGroupNum(targetDir)
if groupName[1]=='baseline':
targetName=groupName[0]+maxNum+'_'+subjInitial
if groupName[0]=='BADUK':
targetName='BADUK_'+cntpro.upper()+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
else:
targetName='fu_'+groupName[0]+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
print '\t{0} will be saved as {1} in \n\t{2}'.format(os.path.basename(subjFolder),targetName,targetFolder)
os.system('touch .tmp{0}'.format(maxNum))
if re.search('[yY]|[yY][eE][sS]',raw_input('\tCheck? [Yes/No] :')):
birthday=raw_input('\tDate of birth? [yyyy-mm-dd] : ')
note=raw_input('\tAny note ? :')
toBackUp=(subjFolder,targetFolder,fullname,subjNum,groupName,note,targetDir,birthday)
backupList.append(toBackUp)
copiedAtThisRound.append(os.path.basename(subjFolder))
print '\t------\n\tQued to be copied!'
makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile)
def makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile):
print fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,countFile
print '{}\t{}\t{}\t{}\t{}\t'.format(fullname,subjInitial,subjNum,groupName[0],targetName),
#grep image numbers
t1 = re.compile(r'TFL\S*|\S*T1\S*|\S*t1\S*')
#dti = re.compile(r'\S*[Dd][Tt][Ii]\S*')
#dki = re.compile(r'\S*[Dd][Kk][Ii]\S*')
dti = re.compile(r'[Dd][Tt][Ii]\S*\(.\)_\d+\S*')
dki = re.compile(r'[Dd][Kk][Ii]\S*\(.\)_\d+\S*')
rest = re.compile(r'\S*[Rr][Ee][Ss][Tt]\S*')
t2flair = re.compile(r'\S*[Ff][Ll][Aa][Ii][Rr]\S*')
t2tse = re.compile(r'\S*[Tt][Ss][Ee]\S*')
#T1, DTI, DKI, REST, T2FLAIR, T2TSE
imageNums=[]
for imagePattern in t1,dti,dki,rest,t2flair,t2tse:
nameUsed = imagePattern.search(' '.join(countFile.viewkeys()))
if nameUsed:
imageNums.append(str(countFile.get(nameUsed.group(0))))
else:
imageNums.append(str(0))
print '{}\t{}\t{}\t{}\t{}\t{}'.format(imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5])
totalList=[fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5],time.ctime(time.time()),getpass.getuser()]
print totalList
f = open(os.path.join(backUpFrom,'spread.txt'),'a')
f.write('\t'.join(totalList))
f.write('\n')
f.close()
#'DKI_30D_B-VALUE_NB_06_(3)_0010' 'DTI_64D_B1K(2)_FA_0008' 'DKI_30D_B-VALUE_NB_06_(3)_COLFA_0013' 'PHOENIXZIPREPORT_0099' 'DKI_30D_B-VALUE_NB_06_(3)_EXP_0011' 'REST_FMRI_PHASE_116_(1)_0005' 'DKI_30D_B-VALUE_NB_06_(3)_FA_0012'
def countCheck(subjFolder):
emptyList = {}
countResult=tree.tree(subjFolder,emptyList,'\t')
print '\n'
if re.search('[yY]|[yY][eE][sS]',raw_input('\tDo file numbers match? [Yes/No] :')):
initialList.append(countResult)
#groupName is a tuple
groupName = group()
return groupName,countResult
print '\t{0}\n\tadded to the back up list\n\n'.format(subjFolder)
else:
print '\tNumbers does not match, will return error.\n\tCheck the directory manually'
def group():
possibleGroups = str('BADUK,CHR,DNO,EMO,FEP,GHR,NOR,OCM,ONS,OXY,PAIN,SPR,UMO').split(',')
groupName=''
while groupName=='':
groupName=raw_input('\twhich group ? [BADUK/CHR/DNO/EMO/FEP/GHR/NOR/OCM/ONS/OXY/PAIN/SPR/UMO] :')
followUp=raw_input('\tfollow up (if follow up, type the period) ? [baseline/period] :')
groupName = groupName.upper()
if groupName not in possibleGroups:
print 'not in groups, let Kevin know.'
groupName=''
else:
return (groupName,followUp)
def | (subjFolder):
'''
will try getting the name and subj number from the source folder first
if it fails,
will require user to type in the subjs' name
'''
if re.findall('\d{8}',os.path.basename(subjFolder)):
subjNum = re.search('(\d{8})',os.path.basename(subjFolder)).group(0)
subjName = re.findall('[^\W\d_]+',os.path.basename(subjFolder))
#Appending first letters
subjInitial=''
for i in subjName:
subjInitial = subjInitial + i[0]
fullname=''
for i in subjName:
fullname = fullname + i[0] + i[1:].lower()
return subjInitial, fullname, subjNum
#if the folder shows no pattern
else:
subjName = raw_input('\tEnter the name of the subject in English eg.Cho Kang Ik:')
subjNum = raw_input("\tEnter subject's 8digit number eg.45291835:")
subjwords=subjName.split(' ')
fullname=''
subjInitial=''
for i in subjwords:
fullname=fullname + i[0 | getName | identifier_name |
copy_check.py | try:
dirList.remove(copied)
except:
continue
#removing folder names begining with '.' and '$'
withDot=[i for i in dirList if i.startswith('.')]
withDol=[i for i in dirList if i.startswith('$')]
dirList = [item for item in dirList if item not in withDot]
dirList = [item for item in dirList if item not in withDol]
for folderName in dirList:
subjFolder = os.path.join(backUpFrom,folderName)
stat = os.stat(subjFolder)
created = os.stat(subjFolder).st_mtime
asciiTime = time.asctime( time.gmtime( created ) )
print '''
------------------------------------
------{0}
created on ( {1} )
------------------------------------
'''.format(folderName,asciiTime)
response = raw_input('\nIs this the name of the subject you want to back up? [Yes/No/Quit/noCall] :')
if re.search('[yY]|[yY][Ee][Ss]',response):
backUpAppend(subjFolder)
elif re.search('[Dd][Oo][Nn][Ee]|stop|[Qq][Uu][Ii][Tt]|exit',response):
break
elif re.search('[Nn][Oo][Cc][Aa][Ll][Ll]',response):
alreadyCopied.append(folderName)
post_check(backUpFrom)
else:
continue
def backUpAppend(subjFolder):
print '\n'
#countFile contains the tuples of image name and count number
#countFile=[(image name,count number)]
groupName,countFile=countCheck(subjFolder)
#groupName=(group,baseline)
subjInitial,fullname,subjNum=getName(subjFolder)
#if it is a follow up study
if groupName[1]=='baseline':
targetDir=os.path.join(backUpTo,groupName[0])
#For BADUK
if groupName[0]=='BADUK':
cntpro=raw_input('\tCNT / PRO ? :')
targetDir=os.path.join(backUpTo,groupName[0]+'/'+cntpro.upper())
maxNum = maxGroupNum(targetDir)
else:
targetDir=os.path.join(backUpTo,groupName[0])
targetDir=os.path.join(targetDir,'Follow_up')
maxNum = maxGroupNum(targetDir)
if groupName[1]=='baseline':
targetName=groupName[0]+maxNum+'_'+subjInitial
if groupName[0]=='BADUK':
targetName='BADUK_'+cntpro.upper()+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
else:
targetName='fu_'+groupName[0]+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
print '\t{0} will be saved as {1} in \n\t{2}'.format(os.path.basename(subjFolder),targetName,targetFolder)
os.system('touch .tmp{0}'.format(maxNum))
if re.search('[yY]|[yY][eE][sS]',raw_input('\tCheck? [Yes/No] :')):
birthday=raw_input('\tDate of birth? [yyyy-mm-dd] : ')
note=raw_input('\tAny note ? :')
toBackUp=(subjFolder,targetFolder,fullname,subjNum,groupName,note,targetDir,birthday)
backupList.append(toBackUp)
copiedAtThisRound.append(os.path.basename(subjFolder))
print '\t------\n\tQued to be copied!'
makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile)
def makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile):
print fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,countFile
print '{}\t{}\t{}\t{}\t{}\t'.format(fullname,subjInitial,subjNum,groupName[0],targetName),
#grep image numbers
t1 = re.compile(r'TFL\S*|\S*T1\S*|\S*t1\S*')
#dti = re.compile(r'\S*[Dd][Tt][Ii]\S*')
#dki = re.compile(r'\S*[Dd][Kk][Ii]\S*')
dti = re.compile(r'[Dd][Tt][Ii]\S*\(.\)_\d+\S*')
dki = re.compile(r'[Dd][Kk][Ii]\S*\(.\)_\d+\S*')
rest = re.compile(r'\S*[Rr][Ee][Ss][Tt]\S*')
t2flair = re.compile(r'\S*[Ff][Ll][Aa][Ii][Rr]\S*')
t2tse = re.compile(r'\S*[Tt][Ss][Ee]\S*')
#T1, DTI, DKI, REST, T2FLAIR, T2TSE
imageNums=[]
for imagePattern in t1,dti,dki,rest,t2flair,t2tse:
nameUsed = imagePattern.search(' '.join(countFile.viewkeys()))
if nameUsed:
imageNums.append(str(countFile.get(nameUsed.group(0))))
else:
imageNums.append(str(0))
print '{}\t{}\t{}\t{}\t{}\t{}'.format(imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5])
totalList=[fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5],time.ctime(time.time()),getpass.getuser()]
print totalList
f = open(os.path.join(backUpFrom,'spread.txt'),'a')
f.write('\t'.join(totalList))
f.write('\n')
f.close()
#'DKI_30D_B-VALUE_NB_06_(3)_0010' 'DTI_64D_B1K(2)_FA_0008' 'DKI_30D_B-VALUE_NB_06_(3)_COLFA_0013' 'PHOENIXZIPREPORT_0099' 'DKI_30D_B-VALUE_NB_06_(3)_EXP_0011' 'REST_FMRI_PHASE_116_(1)_0005' 'DKI_30D_B-VALUE_NB_06_(3)_FA_0012'
def countCheck(subjFolder):
emptyList = {}
countResult=tree.tree(subjFolder,emptyList,'\t')
print '\n'
if re.search('[yY]|[yY][eE][sS]',raw_input('\tDo file numbers match? [Yes/No] :')):
initialList.append(countResult)
#groupName is a tuple
groupName = group()
return groupName,countResult
print '\t{0}\n\tadded to the back up list\n\n'.format(subjFolder)
else:
print '\tNumbers does not match, will return error.\n\tCheck the directory manually'
def group():
|
def getName(subjFolder):
'''
will try getting the name and subj number from the source folder first
if it fails,
will require user to type in the subjs' name
'''
if re.findall('\d{8}',os.path.basename(subjFolder)):
subjNum = re.search('(\d{8})',os.path.basename(subjFolder)).group(0)
subjName = re.findall('[^\W\d_]+',os.path.basename(subjFolder))
#Appending first letters
subjInitial=''
for i in subjName:
subjInitial = subjInitial + i[0]
fullname=''
for i in subjName:
fullname = fullname + i[0] + i[1:].lower()
return subjInitial, fullname, subjNum
#if the folder shows no pattern
else:
subjName = raw_input('\tEnter the name of the subject in English eg.Cho Kang Ik:')
subjNum = raw_input("\tEnter subject's 8digit number eg.45291835:")
subjwords=subjName.split(' ')
fullname=''
subjInitial=''
for i in subjwords:
fullname=fullname + i[0 | possibleGroups = str('BADUK,CHR,DNO,EMO,FEP,GHR,NOR,OCM,ONS,OXY,PAIN,SPR,UMO').split(',')
groupName=''
while groupName=='':
groupName=raw_input('\twhich group ? [BADUK/CHR/DNO/EMO/FEP/GHR/NOR/OCM/ONS/OXY/PAIN/SPR/UMO] :')
followUp=raw_input('\tfollow up (if follow up, type the period) ? [baseline/period] :')
groupName = groupName.upper()
if groupName not in possibleGroups:
print 'not in groups, let Kevin know.'
groupName=''
else:
return (groupName,followUp) | identifier_body |
copy_check.py | :
try:
dirList.remove(copied)
except:
continue
#removing folder names begining with '.' and '$'
withDot=[i for i in dirList if i.startswith('.')]
withDol=[i for i in dirList if i.startswith('$')]
dirList = [item for item in dirList if item not in withDot]
dirList = [item for item in dirList if item not in withDol]
for folderName in dirList:
subjFolder = os.path.join(backUpFrom,folderName)
stat = os.stat(subjFolder)
created = os.stat(subjFolder).st_mtime
asciiTime = time.asctime( time.gmtime( created ) )
print '''
------------------------------------
------{0}
created on ( {1} )
------------------------------------
'''.format(folderName,asciiTime)
response = raw_input('\nIs this the name of the subject you want to back up? [Yes/No/Quit/noCall] :')
if re.search('[yY]|[yY][Ee][Ss]',response):
backUpAppend(subjFolder)
elif re.search('[Dd][Oo][Nn][Ee]|stop|[Qq][Uu][Ii][Tt]|exit',response):
break
elif re.search('[Nn][Oo][Cc][Aa][Ll][Ll]',response): |
def backUpAppend(subjFolder):
print '\n'
#countFile contains the tuples of image name and count number
#countFile=[(image name,count number)]
groupName,countFile=countCheck(subjFolder)
#groupName=(group,baseline)
subjInitial,fullname,subjNum=getName(subjFolder)
#if it is a follow up study
if groupName[1]=='baseline':
targetDir=os.path.join(backUpTo,groupName[0])
#For BADUK
if groupName[0]=='BADUK':
cntpro=raw_input('\tCNT / PRO ? :')
targetDir=os.path.join(backUpTo,groupName[0]+'/'+cntpro.upper())
maxNum = maxGroupNum(targetDir)
else:
targetDir=os.path.join(backUpTo,groupName[0])
targetDir=os.path.join(targetDir,'Follow_up')
maxNum = maxGroupNum(targetDir)
if groupName[1]=='baseline':
targetName=groupName[0]+maxNum+'_'+subjInitial
if groupName[0]=='BADUK':
targetName='BADUK_'+cntpro.upper()+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
else:
targetName='fu_'+groupName[0]+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
print '\t{0} will be saved as {1} in \n\t{2}'.format(os.path.basename(subjFolder),targetName,targetFolder)
os.system('touch .tmp{0}'.format(maxNum))
if re.search('[yY]|[yY][eE][sS]',raw_input('\tCheck? [Yes/No] :')):
birthday=raw_input('\tDate of birth? [yyyy-mm-dd] : ')
note=raw_input('\tAny note ? :')
toBackUp=(subjFolder,targetFolder,fullname,subjNum,groupName,note,targetDir,birthday)
backupList.append(toBackUp)
copiedAtThisRound.append(os.path.basename(subjFolder))
print '\t------\n\tQued to be copied!'
makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile)
def makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile):
print fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,countFile
print '{}\t{}\t{}\t{}\t{}\t'.format(fullname,subjInitial,subjNum,groupName[0],targetName),
#grep image numbers
t1 = re.compile(r'TFL\S*|\S*T1\S*|\S*t1\S*')
#dti = re.compile(r'\S*[Dd][Tt][Ii]\S*')
#dki = re.compile(r'\S*[Dd][Kk][Ii]\S*')
dti = re.compile(r'[Dd][Tt][Ii]\S*\(.\)_\d+\S*')
dki = re.compile(r'[Dd][Kk][Ii]\S*\(.\)_\d+\S*')
rest = re.compile(r'\S*[Rr][Ee][Ss][Tt]\S*')
t2flair = re.compile(r'\S*[Ff][Ll][Aa][Ii][Rr]\S*')
t2tse = re.compile(r'\S*[Tt][Ss][Ee]\S*')
#T1, DTI, DKI, REST, T2FLAIR, T2TSE
imageNums=[]
for imagePattern in t1,dti,dki,rest,t2flair,t2tse:
nameUsed = imagePattern.search(' '.join(countFile.viewkeys()))
if nameUsed:
imageNums.append(str(countFile.get(nameUsed.group(0))))
else:
imageNums.append(str(0))
print '{}\t{}\t{}\t{}\t{}\t{}'.format(imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5])
totalList=[fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5],time.ctime(time.time()),getpass.getuser()]
print totalList
f = open(os.path.join(backUpFrom,'spread.txt'),'a')
f.write('\t'.join(totalList))
f.write('\n')
f.close()
#'DKI_30D_B-VALUE_NB_06_(3)_0010' 'DTI_64D_B1K(2)_FA_0008' 'DKI_30D_B-VALUE_NB_06_(3)_COLFA_0013' 'PHOENIXZIPREPORT_0099' 'DKI_30D_B-VALUE_NB_06_(3)_EXP_0011' 'REST_FMRI_PHASE_116_(1)_0005' 'DKI_30D_B-VALUE_NB_06_(3)_FA_0012'
def countCheck(subjFolder):
emptyList = {}
countResult=tree.tree(subjFolder,emptyList,'\t')
print '\n'
if re.search('[yY]|[yY][eE][sS]',raw_input('\tDo file numbers match? [Yes/No] :')):
initialList.append(countResult)
#groupName is a tuple
groupName = group()
return groupName,countResult
print '\t{0}\n\tadded to the back up list\n\n'.format(subjFolder)
else:
print '\tNumbers does not match, will return error.\n\tCheck the directory manually'
def group():
possibleGroups = str('BADUK,CHR,DNO,EMO,FEP,GHR,NOR,OCM,ONS,OXY,PAIN,SPR,UMO').split(',')
groupName=''
while groupName=='':
groupName=raw_input('\twhich group ? [BADUK/CHR/DNO/EMO/FEP/GHR/NOR/OCM/ONS/OXY/PAIN/SPR/UMO] :')
followUp=raw_input('\tfollow up (if follow up, type the period) ? [baseline/period] :')
groupName = groupName.upper()
if groupName not in possibleGroups:
print 'not in groups, let Kevin know.'
groupName=''
else:
return (groupName,followUp)
def getName(subjFolder):
'''
will try getting the name and subj number from the source folder first
if it fails,
will require user to type in the subjs' name
'''
if re.findall('\d{8}',os.path.basename(subjFolder)):
subjNum = re.search('(\d{8})',os.path.basename(subjFolder)).group(0)
subjName = re.findall('[^\W\d_]+',os.path.basename(subjFolder))
#Appending first letters
subjInitial=''
for i in subjName:
subjInitial = subjInitial + i[0]
fullname=''
for i in subjName:
fullname = fullname + i[0] + i[1:].lower()
return subjInitial, fullname, subjNum
#if the folder shows no pattern
else:
subjName = raw_input('\tEnter the name of the subject in English eg.Cho Kang Ik:')
subjNum = raw_input("\tEnter subject's 8digit number eg.45291835:")
subjwords=subjName.split(' ')
fullname=''
subjInitial=''
for i in subjwords:
fullname=fullname + i[0 | alreadyCopied.append(folderName)
post_check(backUpFrom)
else:
continue | random_line_split |
copy_check.py | try:
dirList.remove(copied)
except:
continue
#removing folder names begining with '.' and '$'
withDot=[i for i in dirList if i.startswith('.')]
withDol=[i for i in dirList if i.startswith('$')]
dirList = [item for item in dirList if item not in withDot]
dirList = [item for item in dirList if item not in withDol]
for folderName in dirList:
subjFolder = os.path.join(backUpFrom,folderName)
stat = os.stat(subjFolder)
created = os.stat(subjFolder).st_mtime
asciiTime = time.asctime( time.gmtime( created ) )
print '''
------------------------------------
------{0}
created on ( {1} )
------------------------------------
'''.format(folderName,asciiTime)
response = raw_input('\nIs this the name of the subject you want to back up? [Yes/No/Quit/noCall] :')
if re.search('[yY]|[yY][Ee][Ss]',response):
backUpAppend(subjFolder)
elif re.search('[Dd][Oo][Nn][Ee]|stop|[Qq][Uu][Ii][Tt]|exit',response):
break
elif re.search('[Nn][Oo][Cc][Aa][Ll][Ll]',response):
alreadyCopied.append(folderName)
post_check(backUpFrom)
else:
continue
def backUpAppend(subjFolder):
print '\n'
#countFile contains the tuples of image name and count number
#countFile=[(image name,count number)]
groupName,countFile=countCheck(subjFolder)
#groupName=(group,baseline)
subjInitial,fullname,subjNum=getName(subjFolder)
#if it is a follow up study
if groupName[1]=='baseline':
targetDir=os.path.join(backUpTo,groupName[0])
#For BADUK
if groupName[0]=='BADUK':
cntpro=raw_input('\tCNT / PRO ? :')
targetDir=os.path.join(backUpTo,groupName[0]+'/'+cntpro.upper())
maxNum = maxGroupNum(targetDir)
else:
targetDir=os.path.join(backUpTo,groupName[0])
targetDir=os.path.join(targetDir,'Follow_up')
maxNum = maxGroupNum(targetDir)
if groupName[1]=='baseline':
targetName=groupName[0]+maxNum+'_'+subjInitial
if groupName[0]=='BADUK':
targetName='BADUK_'+cntpro.upper()+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
else:
targetName='fu_'+groupName[0]+maxNum+'_'+subjInitial
targetFolder=os.path.join(targetDir,targetName)
print '\t{0} will be saved as {1} in \n\t{2}'.format(os.path.basename(subjFolder),targetName,targetFolder)
os.system('touch .tmp{0}'.format(maxNum))
if re.search('[yY]|[yY][eE][sS]',raw_input('\tCheck? [Yes/No] :')):
birthday=raw_input('\tDate of birth? [yyyy-mm-dd] : ')
note=raw_input('\tAny note ? :')
toBackUp=(subjFolder,targetFolder,fullname,subjNum,groupName,note,targetDir,birthday)
backupList.append(toBackUp)
copiedAtThisRound.append(os.path.basename(subjFolder))
print '\t------\n\tQued to be copied!'
makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile)
def makeTable(fullname,subjInitial,subjNum,groupName,targetName,countFile):
print fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,countFile
print '{}\t{}\t{}\t{}\t{}\t'.format(fullname,subjInitial,subjNum,groupName[0],targetName),
#grep image numbers
t1 = re.compile(r'TFL\S*|\S*T1\S*|\S*t1\S*')
#dti = re.compile(r'\S*[Dd][Tt][Ii]\S*')
#dki = re.compile(r'\S*[Dd][Kk][Ii]\S*')
dti = re.compile(r'[Dd][Tt][Ii]\S*\(.\)_\d+\S*')
dki = re.compile(r'[Dd][Kk][Ii]\S*\(.\)_\d+\S*')
rest = re.compile(r'\S*[Rr][Ee][Ss][Tt]\S*')
t2flair = re.compile(r'\S*[Ff][Ll][Aa][Ii][Rr]\S*')
t2tse = re.compile(r'\S*[Tt][Ss][Ee]\S*')
#T1, DTI, DKI, REST, T2FLAIR, T2TSE
imageNums=[]
for imagePattern in t1,dti,dki,rest,t2flair,t2tse:
nameUsed = imagePattern.search(' '.join(countFile.viewkeys()))
if nameUsed:
imageNums.append(str(countFile.get(nameUsed.group(0))))
else:
imageNums.append(str(0))
print '{}\t{}\t{}\t{}\t{}\t{}'.format(imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5])
totalList=[fullname,subjInitial,subjNum,groupName[0],groupName[1],targetName,imageNums[0],imageNums[1],imageNums[2],imageNums[3],imageNums[4],imageNums[5],time.ctime(time.time()),getpass.getuser()]
print totalList
f = open(os.path.join(backUpFrom,'spread.txt'),'a')
f.write('\t'.join(totalList))
f.write('\n')
f.close()
#'DKI_30D_B-VALUE_NB_06_(3)_0010' 'DTI_64D_B1K(2)_FA_0008' 'DKI_30D_B-VALUE_NB_06_(3)_COLFA_0013' 'PHOENIXZIPREPORT_0099' 'DKI_30D_B-VALUE_NB_06_(3)_EXP_0011' 'REST_FMRI_PHASE_116_(1)_0005' 'DKI_30D_B-VALUE_NB_06_(3)_FA_0012'
def countCheck(subjFolder):
emptyList = {}
countResult=tree.tree(subjFolder,emptyList,'\t')
print '\n'
if re.search('[yY]|[yY][eE][sS]',raw_input('\tDo file numbers match? [Yes/No] :')):
initialList.append(countResult)
#groupName is a tuple
groupName = group()
return groupName,countResult
print '\t{0}\n\tadded to the back up list\n\n'.format(subjFolder)
else:
print '\tNumbers does not match, will return error.\n\tCheck the directory manually'
def group():
possibleGroups = str('BADUK,CHR,DNO,EMO,FEP,GHR,NOR,OCM,ONS,OXY,PAIN,SPR,UMO').split(',')
groupName=''
while groupName=='':
|
def getName(subjFolder):
'''
will try getting the name and subj number from the source folder first
if it fails,
will require user to type in the subjs' name
'''
if re.findall('\d{8}',os.path.basename(subjFolder)):
subjNum = re.search('(\d{8})',os.path.basename(subjFolder)).group(0)
subjName = re.findall('[^\W\d_]+',os.path.basename(subjFolder))
#Appending first letters
subjInitial=''
for i in subjName:
subjInitial = subjInitial + i[0]
fullname=''
for i in subjName:
fullname = fullname + i[0] + i[1:].lower()
return subjInitial, fullname, subjNum
#if the folder shows no pattern
else:
subjName = raw_input('\tEnter the name of the subject in English eg.Cho Kang Ik:')
subjNum = raw_input("\tEnter subject's 8digit number eg.45291835:")
subjwords=subjName.split(' ')
fullname=''
subjInitial=''
for i in subjwords:
fullname=fullname + i[0 | groupName=raw_input('\twhich group ? [BADUK/CHR/DNO/EMO/FEP/GHR/NOR/OCM/ONS/OXY/PAIN/SPR/UMO] :')
followUp=raw_input('\tfollow up (if follow up, type the period) ? [baseline/period] :')
groupName = groupName.upper()
if groupName not in possibleGroups:
print 'not in groups, let Kevin know.'
groupName=''
else:
return (groupName,followUp) | conditional_block |
client.js | ,
// From http://www.whatwg.org/specs/web-apps/current-work/multipage/states-of-the-type-attribute.html#e-mail-state-%28type=email%29
emailRegex = /^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/,
name = $( "#name" ),
email = $( "#email" ),
password = $( "#password" ),
allFields = $( [] ).add( name ).add( email ).add( password ),
tips = $( ".validateTips" );
function updateTips( t ) {
tips
.text( t )
.addClass( "ui-state-highlight" );
setTimeout(function() {
tips.removeClass( "ui-state-highlight", 1500 );
}, 500 ); | }
function checkLength( o, n, min, max ) {
if ( o.val().length > max || o.val().length < min ) {
o.addClass( "ui-state-error" );
updateTips( "Length of " + n + " must be between " +
min + " and " + max + "." );
return false;
} else {
return true;
}
}
function checkRegexp( o, regexp, n ) {
if ( !( regexp.test( o.val() ) ) ) {
o.addClass( "ui-state-error" );
updateTips( n );
return false;
} else {
return true;
}
}
function addUser() {
// bypass all validations
//
var jFrm = jQuery("form.cls_frm_add_farmer");
data = jFrm.serializeObject();
console.log(jFrm.serializeObject());
var valid = true;
jQuery.ajax({
url: "/api/farmers/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
$( "#farmers tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.contact_no + "</td>" +
"<td>" + r.pin + "</td>" +
"<td>" + r.address + "</td>" +
"<td>remove</td>" +
"</tr>" );
dialog.dialog( "close" );
}
}
});
allFields.removeClass( "ui-state-error" );
return valid;
}
dialog = $( "#farmer-dialog-form" ).dialog({
autoOpen: false,
height: 400,
width: 350,
modal: true,
buttons: {
"Create farmer": addUser,
Cancel: function() {
dialog.dialog( "close" );
}
},
close: function() {
form[ 0 ].reset();
allFields.removeClass( "ui-state-error" );
}
});
jQuery("body").on("submit", "form.cls_frm_add_farmer", function( e ) {
e.preventDefault();
addUser();
});
jQuery('body').on("click", ".cls_add_farmer", function(){
dialog.dialog( "open" );
});
// cls_add_farmer, cls_remove_farmer, cls_farmer_edit
jQuery.get("/api/farmers/", function(r){
jQuery.each(r.results, function(i, obj){
obj.remove = "<a href='#' data-id='"+obj.id+"' class='cls_remove_farmer'>remove</a>";
obj.name1 = '<a href="#" class="cls_farmer_edit" data-id="'+obj.id+'">'+obj.name+'</a>';
});
jQuery("#farmers").DataTable( {
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "contact_no" },
{ "data": "pin" },
{ "data": "address" },
{ "data": "remove" },
]
} );
});
}
jQuery("body").on("click touch", "a.cls_add_farm", function(){
jQuery("div#id_frm_html").show('slow');
});
jQuery("body").on("submit", "form.cls_frm_save_farm", function(){
var jFrm = jQuery("form.cls_frm_save_farm");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.details + "</td>" +
"<td>" + r.farmer.name + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_frm_html").hide();
}
}
});
});
if(jQuery("table#farm").length > 0){
jQuery.get("/api/farm/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farmer_name = obj.farmer.name;
});
jQuery("table#farm").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "details" },
{ "data": "farmer_name" },
{ "data": "remove" },
]
});
});
}
if(jQuery("table#farm_field").length > 0){
jQuery.get("/api/farm-field/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farm_name = obj.farm.name;
});
jQuery("table#farm_field").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "farm_name" },
{ "data": "crop_type" },
{ "data": "season" },
{ "data": "field_from" },
{ "data": "field_to" },
{ "data": "remove" },
]
});
});
}
jQuery("body").on("submit", "form#id_frm_add_farm_field", function(){
var jFrm = jQuery("form#id_frm_add_farm_field");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm-field/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.farm.name + "</td>" +
"<td>" + r.crop_type + "</td>" +
"<td>" + r.season + "</td>" +
"<td>" + r.field_from + "</td>" +
"<td>" + r.field_to + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_html_add_farm_field").hide('slow');
}
}
});
});
jQuery("body").on("click", ".cls_open_add_farm_field", function(){
jQuery("#id_html_add_farm_field").show('slow');
if(window.leaf_map){
var mymap = L.map('mapid', {drawControl: true}).setView([17.4062917, 78.4390537], 16);
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', {
attribution: 'Map data © <a href="http://openstreetmap.org">OpenStreetMap</a> contributors, <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, Imagery © <a href="http://mapbox.com">Mapbox</a>',
maxZoom: 18,
id: 'mapbox.streets',
accessToken: 'pk.eyJ1IjoiamtuYXJ | random_line_split | |
client.js | ,
// From http://www.whatwg.org/specs/web-apps/current-work/multipage/states-of-the-type-attribute.html#e-mail-state-%28type=email%29
emailRegex = /^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/,
name = $( "#name" ),
email = $( "#email" ),
password = $( "#password" ),
allFields = $( [] ).add( name ).add( email ).add( password ),
tips = $( ".validateTips" );
function updateTips( t ) {
tips
.text( t )
.addClass( "ui-state-highlight" );
setTimeout(function() {
tips.removeClass( "ui-state-highlight", 1500 );
}, 500 );
}
function | ( o, n, min, max ) {
if ( o.val().length > max || o.val().length < min ) {
o.addClass( "ui-state-error" );
updateTips( "Length of " + n + " must be between " +
min + " and " + max + "." );
return false;
} else {
return true;
}
}
function checkRegexp( o, regexp, n ) {
if ( !( regexp.test( o.val() ) ) ) {
o.addClass( "ui-state-error" );
updateTips( n );
return false;
} else {
return true;
}
}
function addUser() {
// bypass all validations
//
var jFrm = jQuery("form.cls_frm_add_farmer");
data = jFrm.serializeObject();
console.log(jFrm.serializeObject());
var valid = true;
jQuery.ajax({
url: "/api/farmers/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
$( "#farmers tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.contact_no + "</td>" +
"<td>" + r.pin + "</td>" +
"<td>" + r.address + "</td>" +
"<td>remove</td>" +
"</tr>" );
dialog.dialog( "close" );
}
}
});
allFields.removeClass( "ui-state-error" );
return valid;
}
dialog = $( "#farmer-dialog-form" ).dialog({
autoOpen: false,
height: 400,
width: 350,
modal: true,
buttons: {
"Create farmer": addUser,
Cancel: function() {
dialog.dialog( "close" );
}
},
close: function() {
form[ 0 ].reset();
allFields.removeClass( "ui-state-error" );
}
});
jQuery("body").on("submit", "form.cls_frm_add_farmer", function( e ) {
e.preventDefault();
addUser();
});
jQuery('body').on("click", ".cls_add_farmer", function(){
dialog.dialog( "open" );
});
// cls_add_farmer, cls_remove_farmer, cls_farmer_edit
jQuery.get("/api/farmers/", function(r){
jQuery.each(r.results, function(i, obj){
obj.remove = "<a href='#' data-id='"+obj.id+"' class='cls_remove_farmer'>remove</a>";
obj.name1 = '<a href="#" class="cls_farmer_edit" data-id="'+obj.id+'">'+obj.name+'</a>';
});
jQuery("#farmers").DataTable( {
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "contact_no" },
{ "data": "pin" },
{ "data": "address" },
{ "data": "remove" },
]
} );
});
}
jQuery("body").on("click touch", "a.cls_add_farm", function(){
jQuery("div#id_frm_html").show('slow');
});
jQuery("body").on("submit", "form.cls_frm_save_farm", function(){
var jFrm = jQuery("form.cls_frm_save_farm");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.details + "</td>" +
"<td>" + r.farmer.name + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_frm_html").hide();
}
}
});
});
if(jQuery("table#farm").length > 0){
jQuery.get("/api/farm/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farmer_name = obj.farmer.name;
});
jQuery("table#farm").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "details" },
{ "data": "farmer_name" },
{ "data": "remove" },
]
});
});
}
if(jQuery("table#farm_field").length > 0){
jQuery.get("/api/farm-field/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farm_name = obj.farm.name;
});
jQuery("table#farm_field").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "farm_name" },
{ "data": "crop_type" },
{ "data": "season" },
{ "data": "field_from" },
{ "data": "field_to" },
{ "data": "remove" },
]
});
});
}
jQuery("body").on("submit", "form#id_frm_add_farm_field", function(){
var jFrm = jQuery("form#id_frm_add_farm_field");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm-field/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.farm.name + "</td>" +
"<td>" + r.crop_type + "</td>" +
"<td>" + r.season + "</td>" +
"<td>" + r.field_from + "</td>" +
"<td>" + r.field_to + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_html_add_farm_field").hide('slow');
}
}
});
});
jQuery("body").on("click", ".cls_open_add_farm_field", function(){
jQuery("#id_html_add_farm_field").show('slow');
if(window.leaf_map){
var mymap = L.map('mapid', {drawControl: true}).setView([17.4062917, 78.4390537], 16);
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', {
attribution: 'Map data © <a href="http://openstreetmap.org">OpenStreetMap</a> contributors, <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, Imagery © <a href="http://mapbox.com">Mapbox</a>',
maxZoom: 18,
id: 'mapbox.streets',
accessToken: 'pk.eyJ1IjoiamtuYX | checkLength | identifier_name |
client.js |
jQuery(function(){
if(jQuery("#farmers").length > 0){
var dialog, form,
// From http://www.whatwg.org/specs/web-apps/current-work/multipage/states-of-the-type-attribute.html#e-mail-state-%28type=email%29
emailRegex = /^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/,
name = $( "#name" ),
email = $( "#email" ),
password = $( "#password" ),
allFields = $( [] ).add( name ).add( email ).add( password ),
tips = $( ".validateTips" );
function updateTips( t ) {
tips
.text( t )
.addClass( "ui-state-highlight" );
setTimeout(function() {
tips.removeClass( "ui-state-highlight", 1500 );
}, 500 );
}
function checkLength( o, n, min, max ) {
if ( o.val().length > max || o.val().length < min ) {
o.addClass( "ui-state-error" );
updateTips( "Length of " + n + " must be between " +
min + " and " + max + "." );
return false;
} else {
return true;
}
}
function checkRegexp( o, regexp, n ) {
if ( !( regexp.test( o.val() ) ) ) {
o.addClass( "ui-state-error" );
updateTips( n );
return false;
} else {
return true;
}
}
function addUser() {
// bypass all validations
//
var jFrm = jQuery("form.cls_frm_add_farmer");
data = jFrm.serializeObject();
console.log(jFrm.serializeObject());
var valid = true;
jQuery.ajax({
url: "/api/farmers/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
$( "#farmers tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.contact_no + "</td>" +
"<td>" + r.pin + "</td>" +
"<td>" + r.address + "</td>" +
"<td>remove</td>" +
"</tr>" );
dialog.dialog( "close" );
}
}
});
allFields.removeClass( "ui-state-error" );
return valid;
}
dialog = $( "#farmer-dialog-form" ).dialog({
autoOpen: false,
height: 400,
width: 350,
modal: true,
buttons: {
"Create farmer": addUser,
Cancel: function() {
dialog.dialog( "close" );
}
},
close: function() {
form[ 0 ].reset();
allFields.removeClass( "ui-state-error" );
}
});
jQuery("body").on("submit", "form.cls_frm_add_farmer", function( e ) {
e.preventDefault();
addUser();
});
jQuery('body').on("click", ".cls_add_farmer", function(){
dialog.dialog( "open" );
});
// cls_add_farmer, cls_remove_farmer, cls_farmer_edit
jQuery.get("/api/farmers/", function(r){
jQuery.each(r.results, function(i, obj){
obj.remove = "<a href='#' data-id='"+obj.id+"' class='cls_remove_farmer'>remove</a>";
obj.name1 = '<a href="#" class="cls_farmer_edit" data-id="'+obj.id+'">'+obj.name+'</a>';
});
jQuery("#farmers").DataTable( {
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "contact_no" },
{ "data": "pin" },
{ "data": "address" },
{ "data": "remove" },
]
} );
});
}
jQuery("body").on("click touch", "a.cls_add_farm", function(){
jQuery("div#id_frm_html").show('slow');
});
jQuery("body").on("submit", "form.cls_frm_save_farm", function(){
var jFrm = jQuery("form.cls_frm_save_farm");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.details + "</td>" +
"<td>" + r.farmer.name + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_frm_html").hide();
}
}
});
});
if(jQuery("table#farm").length > 0){
jQuery.get("/api/farm/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farmer_name = obj.farmer.name;
});
jQuery("table#farm").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "details" },
{ "data": "farmer_name" },
{ "data": "remove" },
]
});
});
}
if(jQuery("table#farm_field").length > 0){
jQuery.get("/api/farm-field/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farm_name = obj.farm.name;
});
jQuery("table#farm_field").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "farm_name" },
{ "data": "crop_type" },
{ "data": "season" },
{ "data": "field_from" },
{ "data": "field_to" },
{ "data": "remove" },
]
});
});
}
jQuery("body").on("submit", "form#id_frm_add_farm_field", function(){
var jFrm = jQuery("form#id_frm_add_farm_field");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm-field/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.farm.name + "</td>" +
"<td>" + r.crop_type + "</td>" +
"<td>" + r.season + "</td>" +
"<td>" + r.field_from + "</td>" +
"<td>" + r.field_to + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_html_add_farm_field").hide('slow');
}
}
});
});
jQuery("body").on("click", ".cls_open_add_farm_field", function(){
jQuery("#id_html_add_farm_field").show('slow');
if(window.leaf_map){
var mymap = L.map('mapid', {drawControl: true}).setView([17.4062917, 78.4390537], 16);
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={ | {
var nameEQ = name + "=";
var ca = document.cookie.split(';');
for(var i=0;i < ca.length;i++) {
var c = ca[i];
while (c.charAt(0)==' ') c = c.substring(1,c.length);
if (c.indexOf(nameEQ) == 0) return c.substring(nameEQ.length,c.length);
}
return null;
} | identifier_body | |
client.js | ,
// From http://www.whatwg.org/specs/web-apps/current-work/multipage/states-of-the-type-attribute.html#e-mail-state-%28type=email%29
emailRegex = /^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/,
name = $( "#name" ),
email = $( "#email" ),
password = $( "#password" ),
allFields = $( [] ).add( name ).add( email ).add( password ),
tips = $( ".validateTips" );
function updateTips( t ) {
tips
.text( t )
.addClass( "ui-state-highlight" );
setTimeout(function() {
tips.removeClass( "ui-state-highlight", 1500 );
}, 500 );
}
function checkLength( o, n, min, max ) {
if ( o.val().length > max || o.val().length < min ) {
o.addClass( "ui-state-error" );
updateTips( "Length of " + n + " must be between " +
min + " and " + max + "." );
return false;
} else {
return true;
}
}
function checkRegexp( o, regexp, n ) {
if ( !( regexp.test( o.val() ) ) ) {
o.addClass( "ui-state-error" );
updateTips( n );
return false;
} else {
return true;
}
}
function addUser() {
// bypass all validations
//
var jFrm = jQuery("form.cls_frm_add_farmer");
data = jFrm.serializeObject();
console.log(jFrm.serializeObject());
var valid = true;
jQuery.ajax({
url: "/api/farmers/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
$( "#farmers tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.contact_no + "</td>" +
"<td>" + r.pin + "</td>" +
"<td>" + r.address + "</td>" +
"<td>remove</td>" +
"</tr>" );
dialog.dialog( "close" );
}
}
});
allFields.removeClass( "ui-state-error" );
return valid;
}
dialog = $( "#farmer-dialog-form" ).dialog({
autoOpen: false,
height: 400,
width: 350,
modal: true,
buttons: {
"Create farmer": addUser,
Cancel: function() {
dialog.dialog( "close" );
}
},
close: function() {
form[ 0 ].reset();
allFields.removeClass( "ui-state-error" );
}
});
jQuery("body").on("submit", "form.cls_frm_add_farmer", function( e ) {
e.preventDefault();
addUser();
});
jQuery('body').on("click", ".cls_add_farmer", function(){
dialog.dialog( "open" );
});
// cls_add_farmer, cls_remove_farmer, cls_farmer_edit
jQuery.get("/api/farmers/", function(r){
jQuery.each(r.results, function(i, obj){
obj.remove = "<a href='#' data-id='"+obj.id+"' class='cls_remove_farmer'>remove</a>";
obj.name1 = '<a href="#" class="cls_farmer_edit" data-id="'+obj.id+'">'+obj.name+'</a>';
});
jQuery("#farmers").DataTable( {
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "contact_no" },
{ "data": "pin" },
{ "data": "address" },
{ "data": "remove" },
]
} );
});
}
jQuery("body").on("click touch", "a.cls_add_farm", function(){
jQuery("div#id_frm_html").show('slow');
});
jQuery("body").on("submit", "form.cls_frm_save_farm", function(){
var jFrm = jQuery("form.cls_frm_save_farm");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) |
}
});
});
if(jQuery("table#farm").length > 0){
jQuery.get("/api/farm/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farmer_name = obj.farmer.name;
});
jQuery("table#farm").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "details" },
{ "data": "farmer_name" },
{ "data": "remove" },
]
});
});
}
if(jQuery("table#farm_field").length > 0){
jQuery.get("/api/farm-field/", function(r){
console.log(r);
jQuery.each(r.results, function(i, obj){
obj.name1 = '<a href="#" >'+obj.name+'</a>';
obj.remove = '<a>remove</a>';
obj.farm_name = obj.farm.name;
});
jQuery("table#farm_field").DataTable({
"data": r.results,
"columns": [
{ "data": "name1" },
{ "data": "farm_name" },
{ "data": "crop_type" },
{ "data": "season" },
{ "data": "field_from" },
{ "data": "field_to" },
{ "data": "remove" },
]
});
});
}
jQuery("body").on("submit", "form#id_frm_add_farm_field", function(){
var jFrm = jQuery("form#id_frm_add_farm_field");
data = jFrm.serializeObject();
var valid = true;
jQuery.ajax({
url: "/api/farm-field/",
type: "POST",
data: JSON.stringify(data),
headers: {
'X-CSRFToken': getCookie('csrftoken'),
"Content-Type": "application/json"
},
dataType: "json",
success: function(r){
console.log(r);
if ( r.id != void 0 ) {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.farm.name + "</td>" +
"<td>" + r.crop_type + "</td>" +
"<td>" + r.season + "</td>" +
"<td>" + r.field_from + "</td>" +
"<td>" + r.field_to + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_html_add_farm_field").hide('slow');
}
}
});
});
jQuery("body").on("click", ".cls_open_add_farm_field", function(){
jQuery("#id_html_add_farm_field").show('slow');
if(window.leaf_map){
var mymap = L.map('mapid', {drawControl: true}).setView([17.4062917, 78.4390537], 16);
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token={accessToken}', {
attribution: 'Map data © <a href="http://openstreetmap.org">OpenStreetMap</a> contributors, <a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, Imagery © <a href="http://mapbox.com">Mapbox</a>',
maxZoom: 18,
id: 'mapbox.streets',
accessToken: 'pk.eyJ1IjoiamtuY | {
jQuery( "table#farm tbody" ).append( "<tr>" +
"<td>" + r.name + "</td>" +
"<td>" + r.details + "</td>" +
"<td>" + r.farmer.name + "</td>" +
"<td>remove</td>" +
"</tr>" );
jFrm[0].reset();
jQuery("div#id_frm_html").hide();
} | conditional_block |
vcf.rs | x[0] + x[1]).collect();
// Sort possible alleles by reverse numeric order on counts except that the reference base is always first
let alleles: Vec<usize> = {
let mut ix: Vec<usize> = (0..n_alls).filter(|x| *x != ref_ix).collect();
ix.sort_unstable_by(|a, b| jcts[*b].cmp(&jcts[*a]));
// Remove alleles where alleles not seen on both strands (apart from reference)
let mut ix1 = Vec::with_capacity(n_alls);
ix1.push(ref_ix); // Reference allele;
for &k in ix.iter() {
if cts[k][0] > 0 && cts[k][1] > 0 && cts[k][0] + cts[k][1] > 2 {
ix1.push(k)
}
}
ix1
};
let mut mr = freq_mle(&alleles, qcts, qual_model);
if log_enabled!(Trace) {
trace!("mle freq. estimates");
for &k in alleles.iter() {
trace!("{}\t{}\t{}", k, mr.alleles[k].freq, indel_flags[k]);
}
}
// Remove non-reference alleles where the frequencies were estimated below the thresholds
let snv_lim = self.cfg.snv_threshold(ThresholdType::Hard);
let indel_lim = self.cfg.indel_threshold(ThresholdType::Hard);
for ar in mr.alleles.iter_mut() { ar.flag = false }
let alleles: Vec<_> = alleles.iter().enumerate()
.filter(|(i, &k)| *i == 0 || mr.alleles[k].freq >= match indel_flags[k] {
true => indel_lim,
false => snv_lim,
})
.map(|(_, &k)| k).collect();
for &i in alleles.iter() {
mr.alleles[i].flag =true;
}
// Adjust the frequencies to account for any alleles that have been filtered
let tot = alleles.iter().fold(0.0, |s, &x| s + mr.alleles[x].freq);
// Rescale if necessary
if tot < 1.0 {
assert!(tot > 0.0);
for ar in mr.alleles.iter_mut() {
if ar.flag {
ar.freq /= tot
} else {
ar.freq = 0.0;
}
}
}
let ModelRes{alleles: all, phred} = mr;
let (all, phred) = (all, phred);
let alleles: Vec<_> = alleles.iter().filter(|&&k| all[k].flag)
.map(|&k| Allele{ix: k, res: all[k]}).collect();
VcfRes{alleles, adesc: None, x, phred}
}
// Generate VCF output line for large deletions
pub fn del_output(&self, del: &LargeDeletion) -> String {
let mut f = String::new();
let cts = del.counts;
let fq = del.fq();
let sd = (fq * (1.0 - fq) / (del.n() as f64)).sqrt();
let z = fq / sd;
let phred = if z > 10.0 { MAX_PHRED }
else {
(pnormc(z).log10()*-10.0).round().min(MAX_PHRED as f64) as u8
};
let flt = if phred >= 30 { 0 } else { FLT_Q30 };
// ALT, QUAL, FILTER
let _ = write!(f, "<DEL>\t{}\t{}", phred, Filter(flt));
// INFO
let _ = write!(f, "\tSVTYPE=DEL;END={};SVLEN={};CIPOS={},{};CILEN={},{}", del.end() + 1,
del.length, del.pos_ci(0), del.pos_ci(1), del.len_ci(0), del.len_ci(1));
// FORMAT
let _ = write!(f, "\tGT:ADF:ADR:HPL\t0/1:{},{}:{},{}:{:.5}",
cts[0][0], cts[1][0], cts[0][1], cts[1][1], fq);
f
}
// Generate Optional String with VCF output line
pub fn output(&self, vr: &mut VcfRes, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> Option<String> {
let x = vr.x;
let raw_depth = cts.iter().fold(0, |t, x| t + x[0] + x[1]);
let thresh = 0.05 / (self.seq_len as f64);
// Sort alleles by frequency (keeping reference alleles at position 0)
vr.alleles[1..].sort_unstable_by(|a1, a2| a2.res.freq.partial_cmp(&a1.res.freq).unwrap());
// Find index of major allele
let (major_idx, mj_idx) = vr.alleles.iter().enumerate().max_by(|(_, ar1), (_, ar2)| ar1.res.freq.partial_cmp(&ar2.res.freq).unwrap())
.map(|(i, ar)| (ar.ix, i)).unwrap(); |
// Filter cutoffs
let snv_soft_lim = self.cfg.snv_threshold(ThresholdType::Soft);
let indel_soft_lim = self.cfg.indel_threshold(ThresholdType::Soft);
let desc = vr.adesc.as_ref().unwrap_or(&self.all_desc);
// Extra per allele results
let mut all_res: Vec<_> = vr.alleles.iter().map(|ar| {
// Average quality
let (n, s) = qcts[ar.ix].iter().enumerate().fold((0, 0), |(n, s), (q, &ct)| {
(n + ct, s + ct * q)
});
let avg_qual = if n > 0 { s as f64 / n as f64 } else { 0.0 };
let mut flt = 0;
// Fisher strand test
let fisher_strand = if ar.ix != major_idx {
let k = ar.ix;
let k1 = major_idx;
let all_cts = [cts[k][0], cts[k1][0], cts[k][1], cts[k1][1]];
let fs = self.ftest.fisher(&all_cts);
if ar.res.freq < 0.75 && fs <= thresh {
flt |= FLT_FS
}
fs
} else { 1.0 };
// Wilcoxon-Mann-Whitney test for quality bias between the major (most frequent)
// allele and all minor alleles
let wilcox = if ar.ix != major_idx {
mann_whitney(qcts, major_idx, ar.ix).unwrap_or(1.0)
} else { 1.0 };
// Set allele freq. flags
let (lim, pos_adjust) = if desc[ar.ix].len() != desc[ref_ix].len() {
// This is an indel (size difference from reference)
let hp_size = (self.ref_seq[x].hpoly() & 0xf).max(self.ref_seq[x + 1].hpoly() & 0xf) + 1;
if hp_size >= self.homopolymer_limit {
flt |= FLT_HOMO_POLY
}
(indel_soft_lim, 0)
} else {
// In a complex variant, a SNV could start a few bases after the location of the variant
let x = if ar.ix == ref_ix { 0 } else {
// Find position of first base that differs between this allele and the reference
desc[ref_ix].iter().zip(desc[ar.ix].iter()).enumerate()
.find(|(_, (c1, c2))| *c1 != *c2).map(|(ix, _)| ix as u32).unwrap()
};
(snv_soft_lim, x)
};
if ar.res.freq < lim {
flt |= FLT_LOW_FREQ
}
// LR flags
if ar.res.lr_test < 30 {
flt |= FLT_Q30
}
// Blacklist
if self.cfg.blacklist(x + 1 + pos_adjust as usize) {
flt |= FLT_BLACKLIST
}
ExtraRes{ flt, avg_qual, fisher_strand, wilcox}
}).collect();
// Set wilcox allele flags
let mjr_qual = all_res[mj_idx].avg_qual;
for res in all_res.iter_mut() {
if res.wilcox <= thresh && mjr_qual - res.avg_qual > 2.0 {
res.flt |= FLT_QUAL_BIAS
}
}
// Genotype call
let f0 = vr.alleles[0].res.freq >= 1.0e-5;
let gt = match vr.alleles.len() |
// Reference allele
let ref_ix = vr.alleles[0].ix; | random_line_split |
vcf.rs | | x[0] + x[1]).collect();
// Sort possible alleles by reverse numeric order on counts except that the reference base is always first
let alleles: Vec<usize> = {
let mut ix: Vec<usize> = (0..n_alls).filter(|x| *x != ref_ix).collect();
ix.sort_unstable_by(|a, b| jcts[*b].cmp(&jcts[*a]));
// Remove alleles where alleles not seen on both strands (apart from reference)
let mut ix1 = Vec::with_capacity(n_alls);
ix1.push(ref_ix); // Reference allele;
for &k in ix.iter() {
if cts[k][0] > 0 && cts[k][1] > 0 && cts[k][0] + cts[k][1] > 2 {
ix1.push(k)
}
}
ix1
};
let mut mr = freq_mle(&alleles, qcts, qual_model);
if log_enabled!(Trace) {
trace!("mle freq. estimates");
for &k in alleles.iter() {
trace!("{}\t{}\t{}", k, mr.alleles[k].freq, indel_flags[k]);
}
}
// Remove non-reference alleles where the frequencies were estimated below the thresholds
let snv_lim = self.cfg.snv_threshold(ThresholdType::Hard);
let indel_lim = self.cfg.indel_threshold(ThresholdType::Hard);
for ar in mr.alleles.iter_mut() { ar.flag = false }
let alleles: Vec<_> = alleles.iter().enumerate()
.filter(|(i, &k)| *i == 0 || mr.alleles[k].freq >= match indel_flags[k] {
true => indel_lim,
false => snv_lim,
})
.map(|(_, &k)| k).collect();
for &i in alleles.iter() {
mr.alleles[i].flag =true;
}
// Adjust the frequencies to account for any alleles that have been filtered
let tot = alleles.iter().fold(0.0, |s, &x| s + mr.alleles[x].freq);
// Rescale if necessary
if tot < 1.0 {
assert!(tot > 0.0);
for ar in mr.alleles.iter_mut() {
if ar.flag {
ar.freq /= tot
} else {
ar.freq = 0.0;
}
}
}
let ModelRes{alleles: all, phred} = mr;
let (all, phred) = (all, phred);
let alleles: Vec<_> = alleles.iter().filter(|&&k| all[k].flag)
.map(|&k| Allele{ix: k, res: all[k]}).collect();
VcfRes{alleles, adesc: None, x, phred}
}
// Generate VCF output line for large deletions
pub fn del_output(&self, del: &LargeDeletion) -> String {
let mut f = String::new();
let cts = del.counts;
let fq = del.fq();
let sd = (fq * (1.0 - fq) / (del.n() as f64)).sqrt();
let z = fq / sd;
let phred = if z > 10.0 { MAX_PHRED }
else {
(pnormc(z).log10()*-10.0).round().min(MAX_PHRED as f64) as u8
};
let flt = if phred >= 30 { 0 } else { FLT_Q30 };
// ALT, QUAL, FILTER
let _ = write!(f, "<DEL>\t{}\t{}", phred, Filter(flt));
// INFO
let _ = write!(f, "\tSVTYPE=DEL;END={};SVLEN={};CIPOS={},{};CILEN={},{}", del.end() + 1,
del.length, del.pos_ci(0), del.pos_ci(1), del.len_ci(0), del.len_ci(1));
// FORMAT
let _ = write!(f, "\tGT:ADF:ADR:HPL\t0/1:{},{}:{},{}:{:.5}",
cts[0][0], cts[1][0], cts[0][1], cts[1][1], fq);
f
}
// Generate Optional String with VCF output line
pub fn output(&self, vr: &mut VcfRes, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> Option<String> |
let snv_soft_lim = self.cfg.snv_threshold(ThresholdType::Soft);
let indel_soft_lim = self.cfg.indel_threshold(ThresholdType::Soft);
let desc = vr.adesc.as_ref().unwrap_or(&self.all_desc);
// Extra per allele results
let mut all_res: Vec<_> = vr.alleles.iter().map(|ar| {
// Average quality
let (n, s) = qcts[ar.ix].iter().enumerate().fold((0, 0), |(n, s), (q, &ct)| {
(n + ct, s + ct * q)
});
let avg_qual = if n > 0 { s as f64 / n as f64 } else { 0.0 };
let mut flt = 0;
// Fisher strand test
let fisher_strand = if ar.ix != major_idx {
let k = ar.ix;
let k1 = major_idx;
let all_cts = [cts[k][0], cts[k1][0], cts[k][1], cts[k1][1]];
let fs = self.ftest.fisher(&all_cts);
if ar.res.freq < 0.75 && fs <= thresh {
flt |= FLT_FS
}
fs
} else { 1.0 };
// Wilcoxon-Mann-Whitney test for quality bias between the major (most frequent)
// allele and all minor alleles
let wilcox = if ar.ix != major_idx {
mann_whitney(qcts, major_idx, ar.ix).unwrap_or(1.0)
} else { 1.0 };
// Set allele freq. flags
let (lim, pos_adjust) = if desc[ar.ix].len() != desc[ref_ix].len() {
// This is an indel (size difference from reference)
let hp_size = (self.ref_seq[x].hpoly() & 0xf).max(self.ref_seq[x + 1].hpoly() & 0xf) + 1;
if hp_size >= self.homopolymer_limit {
flt |= FLT_HOMO_POLY
}
(indel_soft_lim, 0)
} else {
// In a complex variant, a SNV could start a few bases after the location of the variant
let x = if ar.ix == ref_ix { 0 } else {
// Find position of first base that differs between this allele and the reference
desc[ref_ix].iter().zip(desc[ar.ix].iter()).enumerate()
.find(|(_, (c1, c2))| *c1 != *c2).map(|(ix, _)| ix as u32).unwrap()
};
(snv_soft_lim, x)
};
if ar.res.freq < lim {
flt |= FLT_LOW_FREQ
}
// LR flags
if ar.res.lr_test < 30 {
flt |= FLT_Q30
}
// Blacklist
if self.cfg.blacklist(x + 1 + pos_adjust as usize) {
flt |= FLT_BLACKLIST
}
ExtraRes{ flt, avg_qual, fisher_strand, wilcox}
}).collect();
// Set wilcox allele flags
let mjr_qual = all_res[mj_idx].avg_qual;
for res in all_res.iter_mut() {
if res.wilcox <= thresh && mjr_qual - res.avg_qual > 2.0 {
res.flt |= FLT_QUAL_BIAS
}
}
// Genotype call
let f0 = vr.alleles[0].res.freq >= 1.0e-5;
let gt = match vr.alleles.len() | {
let x = vr.x;
let raw_depth = cts.iter().fold(0, |t, x| t + x[0] + x[1]);
let thresh = 0.05 / (self.seq_len as f64);
// Sort alleles by frequency (keeping reference alleles at position 0)
vr.alleles[1..].sort_unstable_by(|a1, a2| a2.res.freq.partial_cmp(&a1.res.freq).unwrap());
// Find index of major allele
let (major_idx, mj_idx) = vr.alleles.iter().enumerate().max_by(|(_, ar1), (_, ar2)| ar1.res.freq.partial_cmp(&ar2.res.freq).unwrap())
.map(|(i, ar)| (ar.ix, i)).unwrap();
// Reference allele
let ref_ix = vr.alleles[0].ix;
// Filter cutoffs | identifier_body |
vcf.rs | {
pub(crate) res: AlleleRes,
pub(crate) ix: usize,
}
pub(crate) struct VcfRes {
pub(crate) alleles: Vec<Allele>,
pub(crate) adesc: Option<Vec<AllDesc>>,
pub(crate) x: usize,
pub(crate) phred: u8,
}
// Additional allele specific results
#[derive(Default, Copy, Clone)]
struct ExtraRes {
flt: u32,
avg_qual: f64,
fisher_strand: f64,
wilcox: f64,
}
impl<'a, 'b, 'c> VcfCalc<'a, 'b, 'c> {
pub fn get_allele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> VcfRes {
// Should only be used for single base variants where we expect 5 'alleles'
// for A, C, G, T and Del
assert_eq!(cts.len(), 5);
let indel_flags = [false, false, false, false, true];
let ref_ix = (self.ref_seq[x].base()) as usize;
self.est_allele_freqs(x, ref_ix, cts, qcts, &indel_flags)
}
pub fn get_mallele_freqs(&self, x: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
self.est_allele_freqs(x,0, cts, qcts, indel_flags)
}
pub fn est_allele_freqs(&self, x: usize, ref_ix: usize, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]], indel_flags: &[bool]) -> VcfRes {
let n_alls = cts.len();
assert_eq!(n_alls, qcts.len());
let qual_model = self.cfg.qual_table();
// Fold across strands
let jcts: Vec<usize> = cts.iter().map(|x| x[0] + x[1]).collect();
// Sort possible alleles by reverse numeric order on counts except that the reference base is always first
let alleles: Vec<usize> = {
let mut ix: Vec<usize> = (0..n_alls).filter(|x| *x != ref_ix).collect();
ix.sort_unstable_by(|a, b| jcts[*b].cmp(&jcts[*a]));
// Remove alleles where alleles not seen on both strands (apart from reference)
let mut ix1 = Vec::with_capacity(n_alls);
ix1.push(ref_ix); // Reference allele;
for &k in ix.iter() {
if cts[k][0] > 0 && cts[k][1] > 0 && cts[k][0] + cts[k][1] > 2 {
ix1.push(k)
}
}
ix1
};
let mut mr = freq_mle(&alleles, qcts, qual_model);
if log_enabled!(Trace) {
trace!("mle freq. estimates");
for &k in alleles.iter() {
trace!("{}\t{}\t{}", k, mr.alleles[k].freq, indel_flags[k]);
}
}
// Remove non-reference alleles where the frequencies were estimated below the thresholds
let snv_lim = self.cfg.snv_threshold(ThresholdType::Hard);
let indel_lim = self.cfg.indel_threshold(ThresholdType::Hard);
for ar in mr.alleles.iter_mut() { ar.flag = false }
let alleles: Vec<_> = alleles.iter().enumerate()
.filter(|(i, &k)| *i == 0 || mr.alleles[k].freq >= match indel_flags[k] {
true => indel_lim,
false => snv_lim,
})
.map(|(_, &k)| k).collect();
for &i in alleles.iter() {
mr.alleles[i].flag =true;
}
// Adjust the frequencies to account for any alleles that have been filtered
let tot = alleles.iter().fold(0.0, |s, &x| s + mr.alleles[x].freq);
// Rescale if necessary
if tot < 1.0 {
assert!(tot > 0.0);
for ar in mr.alleles.iter_mut() {
if ar.flag {
ar.freq /= tot
} else {
ar.freq = 0.0;
}
}
}
let ModelRes{alleles: all, phred} = mr;
let (all, phred) = (all, phred);
let alleles: Vec<_> = alleles.iter().filter(|&&k| all[k].flag)
.map(|&k| Allele{ix: k, res: all[k]}).collect();
VcfRes{alleles, adesc: None, x, phred}
}
// Generate VCF output line for large deletions
pub fn del_output(&self, del: &LargeDeletion) -> String {
let mut f = String::new();
let cts = del.counts;
let fq = del.fq();
let sd = (fq * (1.0 - fq) / (del.n() as f64)).sqrt();
let z = fq / sd;
let phred = if z > 10.0 { MAX_PHRED }
else {
(pnormc(z).log10()*-10.0).round().min(MAX_PHRED as f64) as u8
};
let flt = if phred >= 30 { 0 } else { FLT_Q30 };
// ALT, QUAL, FILTER
let _ = write!(f, "<DEL>\t{}\t{}", phred, Filter(flt));
// INFO
let _ = write!(f, "\tSVTYPE=DEL;END={};SVLEN={};CIPOS={},{};CILEN={},{}", del.end() + 1,
del.length, del.pos_ci(0), del.pos_ci(1), del.len_ci(0), del.len_ci(1));
// FORMAT
let _ = write!(f, "\tGT:ADF:ADR:HPL\t0/1:{},{}:{},{}:{:.5}",
cts[0][0], cts[1][0], cts[0][1], cts[1][1], fq);
f
}
// Generate Optional String with VCF output line
pub fn output(&self, vr: &mut VcfRes, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> Option<String> {
let x = vr.x;
let raw_depth = cts.iter().fold(0, |t, x| t + x[0] + x[1]);
let thresh = 0.05 / (self.seq_len as f64);
// Sort alleles by frequency (keeping reference alleles at position 0)
vr.alleles[1..].sort_unstable_by(|a1, a2| a2.res.freq.partial_cmp(&a1.res.freq).unwrap());
// Find index of major allele
let (major_idx, mj_idx) = vr.alleles.iter().enumerate().max_by(|(_, ar1), (_, ar2)| ar1.res.freq.partial_cmp(&ar2.res.freq).unwrap())
.map(|(i, ar)| (ar.ix, i)).unwrap();
// Reference allele
let ref_ix = vr.alleles[0].ix;
// Filter cutoffs
let snv_soft_lim = self.cfg.snv_threshold(ThresholdType::Soft);
let indel_soft_lim = self.cfg.indel_threshold(ThresholdType::Soft);
let desc = vr.adesc.as_ref().unwrap_or(&self.all_desc);
// Extra per allele results
let mut all_res: Vec<_> = vr.alleles.iter().map(|ar| {
// Average quality
let (n, s) = qcts[ar.ix].iter().enumerate().fold((0, 0), |(n, s), (q, &ct)| {
(n + ct, s + ct * q)
});
let avg_qual = if n > 0 { s as f64 / n as f64 } else { 0.0 };
let mut flt = 0;
// Fisher strand test
let fisher_strand = if ar.ix != major_idx {
let k = ar.ix;
let k1 = major_idx;
let all_cts = [cts[k][0], cts[k1][0], cts[k][1], cts[k1][1]];
let fs = self.ftest.fisher(&all_cts);
if ar.res.freq < 0.75 && fs <= thresh {
flt |= FLT_FS
}
fs
} else { 1.0 };
// Wilcoxon-Mann-Whitney test for quality bias between the major (most frequent)
// allele and all minor alleles
let wilcox = if ar.ix != major_idx {
mann_whitney(qcts, major_idx, ar.ix).unwrap_or(1.0 | Allele | identifier_name | |
vcf.rs | | x[0] + x[1]).collect();
// Sort possible alleles by reverse numeric order on counts except that the reference base is always first
let alleles: Vec<usize> = {
let mut ix: Vec<usize> = (0..n_alls).filter(|x| *x != ref_ix).collect();
ix.sort_unstable_by(|a, b| jcts[*b].cmp(&jcts[*a]));
// Remove alleles where alleles not seen on both strands (apart from reference)
let mut ix1 = Vec::with_capacity(n_alls);
ix1.push(ref_ix); // Reference allele;
for &k in ix.iter() {
if cts[k][0] > 0 && cts[k][1] > 0 && cts[k][0] + cts[k][1] > 2 {
ix1.push(k)
}
}
ix1
};
let mut mr = freq_mle(&alleles, qcts, qual_model);
if log_enabled!(Trace) {
trace!("mle freq. estimates");
for &k in alleles.iter() {
trace!("{}\t{}\t{}", k, mr.alleles[k].freq, indel_flags[k]);
}
}
// Remove non-reference alleles where the frequencies were estimated below the thresholds
let snv_lim = self.cfg.snv_threshold(ThresholdType::Hard);
let indel_lim = self.cfg.indel_threshold(ThresholdType::Hard);
for ar in mr.alleles.iter_mut() { ar.flag = false }
let alleles: Vec<_> = alleles.iter().enumerate()
.filter(|(i, &k)| *i == 0 || mr.alleles[k].freq >= match indel_flags[k] {
true => indel_lim,
false => snv_lim,
})
.map(|(_, &k)| k).collect();
for &i in alleles.iter() {
mr.alleles[i].flag =true;
}
// Adjust the frequencies to account for any alleles that have been filtered
let tot = alleles.iter().fold(0.0, |s, &x| s + mr.alleles[x].freq);
// Rescale if necessary
if tot < 1.0 {
assert!(tot > 0.0);
for ar in mr.alleles.iter_mut() {
if ar.flag {
ar.freq /= tot
} else {
ar.freq = 0.0;
}
}
}
let ModelRes{alleles: all, phred} = mr;
let (all, phred) = (all, phred);
let alleles: Vec<_> = alleles.iter().filter(|&&k| all[k].flag)
.map(|&k| Allele{ix: k, res: all[k]}).collect();
VcfRes{alleles, adesc: None, x, phred}
}
// Generate VCF output line for large deletions
pub fn del_output(&self, del: &LargeDeletion) -> String {
let mut f = String::new();
let cts = del.counts;
let fq = del.fq();
let sd = (fq * (1.0 - fq) / (del.n() as f64)).sqrt();
let z = fq / sd;
let phred = if z > 10.0 { MAX_PHRED }
else {
(pnormc(z).log10()*-10.0).round().min(MAX_PHRED as f64) as u8
};
let flt = if phred >= 30 { 0 } else { FLT_Q30 };
// ALT, QUAL, FILTER
let _ = write!(f, "<DEL>\t{}\t{}", phred, Filter(flt));
// INFO
let _ = write!(f, "\tSVTYPE=DEL;END={};SVLEN={};CIPOS={},{};CILEN={},{}", del.end() + 1,
del.length, del.pos_ci(0), del.pos_ci(1), del.len_ci(0), del.len_ci(1));
// FORMAT
let _ = write!(f, "\tGT:ADF:ADR:HPL\t0/1:{},{}:{},{}:{:.5}",
cts[0][0], cts[1][0], cts[0][1], cts[1][1], fq);
f
}
// Generate Optional String with VCF output line
pub fn output(&self, vr: &mut VcfRes, cts: &[[usize; 2]], qcts: &[[usize; N_QUAL]]) -> Option<String> {
let x = vr.x;
let raw_depth = cts.iter().fold(0, |t, x| t + x[0] + x[1]);
let thresh = 0.05 / (self.seq_len as f64);
// Sort alleles by frequency (keeping reference alleles at position 0)
vr.alleles[1..].sort_unstable_by(|a1, a2| a2.res.freq.partial_cmp(&a1.res.freq).unwrap());
// Find index of major allele
let (major_idx, mj_idx) = vr.alleles.iter().enumerate().max_by(|(_, ar1), (_, ar2)| ar1.res.freq.partial_cmp(&ar2.res.freq).unwrap())
.map(|(i, ar)| (ar.ix, i)).unwrap();
// Reference allele
let ref_ix = vr.alleles[0].ix;
// Filter cutoffs
let snv_soft_lim = self.cfg.snv_threshold(ThresholdType::Soft);
let indel_soft_lim = self.cfg.indel_threshold(ThresholdType::Soft);
let desc = vr.adesc.as_ref().unwrap_or(&self.all_desc);
// Extra per allele results
let mut all_res: Vec<_> = vr.alleles.iter().map(|ar| {
// Average quality
let (n, s) = qcts[ar.ix].iter().enumerate().fold((0, 0), |(n, s), (q, &ct)| {
(n + ct, s + ct * q)
});
let avg_qual = if n > 0 { s as f64 / n as f64 } else { 0.0 };
let mut flt = 0;
// Fisher strand test
let fisher_strand = if ar.ix != major_idx | else { 1.0 };
// Wilcoxon-Mann-Whitney test for quality bias between the major (most frequent)
// allele and all minor alleles
let wilcox = if ar.ix != major_idx {
mann_whitney(qcts, major_idx, ar.ix).unwrap_or(1.0)
} else { 1.0 };
// Set allele freq. flags
let (lim, pos_adjust) = if desc[ar.ix].len() != desc[ref_ix].len() {
// This is an indel (size difference from reference)
let hp_size = (self.ref_seq[x].hpoly() & 0xf).max(self.ref_seq[x + 1].hpoly() & 0xf) + 1;
if hp_size >= self.homopolymer_limit {
flt |= FLT_HOMO_POLY
}
(indel_soft_lim, 0)
} else {
// In a complex variant, a SNV could start a few bases after the location of the variant
let x = if ar.ix == ref_ix { 0 } else {
// Find position of first base that differs between this allele and the reference
desc[ref_ix].iter().zip(desc[ar.ix].iter()).enumerate()
.find(|(_, (c1, c2))| *c1 != *c2).map(|(ix, _)| ix as u32).unwrap()
};
(snv_soft_lim, x)
};
if ar.res.freq < lim {
flt |= FLT_LOW_FREQ
}
// LR flags
if ar.res.lr_test < 30 {
flt |= FLT_Q30
}
// Blacklist
if self.cfg.blacklist(x + 1 + pos_adjust as usize) {
flt |= FLT_BLACKLIST
}
ExtraRes{ flt, avg_qual, fisher_strand, wilcox}
}).collect();
// Set wilcox allele flags
let mjr_qual = all_res[mj_idx].avg_qual;
for res in all_res.iter_mut() {
if res.wilcox <= thresh && mjr_qual - res.avg_qual > 2.0 {
res.flt |= FLT_QUAL_BIAS
}
}
// Genotype call
let f0 = vr.alleles[0].res.freq >= 1.0e-5;
let gt = match vr.alleles.len() | {
let k = ar.ix;
let k1 = major_idx;
let all_cts = [cts[k][0], cts[k1][0], cts[k][1], cts[k1][1]];
let fs = self.ftest.fisher(&all_cts);
if ar.res.freq < 0.75 && fs <= thresh {
flt |= FLT_FS
}
fs
} | conditional_block |
Pak.py | cheon/issues/48
if head in unit:
continue
unit_list.append(unit)
produced_unit_list = unit_list
if self.stage_name == "build" and not self.is_nested:
if self.pak_format == "dpk":
is_deleted = False
if self.since_reference:
Ui.laconic("looking for deleted files")
# Unvanquished game did not support DELETED file until after 0.52.1.
workaround_no_delete = self.source_tree.game_name == "unvanquished" and self.since_reference in ["unvanquished/0.52.1", "v0.52.1"]
git_repo = Repository.Git(self.source_dir, "dpk", workaround_no_delete=workaround_no_delete)
previous_version = git_repo.computeVersion(self.since_reference, named_reference=True)
self.deps.set(self.pak_name, previous_version)
for deleted_file in git_repo.getDeletedFileList(self.since_reference):
if deleted_file not in deleted_file_list:
is_deleted = True
deleted_file_list.append(deleted_file)
if deleted_file_list:
is_deleted = True
for deleted_file in deleted_file_list:
self.deleted.set(self.pak_name, deleted_file)
if self.deleted.read():
is_deleted = True
if is_deleted:
deleted_part_list = self.deleted.translate()
# TODO: No need to mark as DELETED a file from the same
# package if it does not depend on itself.
# TODO: A way to not translate DELETED files may be needed
# in some cases.
# If flamer.jpg producing flamer.crn was replaced
# by flamer.png also producing flamer.crn, the
# flamer.crn file will be listed as deleted
# while it will be shipped, but built from another
# source file, so we must check deleted files
# aren't built in other way to avoid listing
# as deleted a file that is actually shipped.
for deleted_part_dict in deleted_part_list:
is_built = False
if deleted_part_dict["pak_name"] == self.pak_name:
deleted_part = deleted_part_dict["file_path"]
if deleted_part.startswith(Default.repository_config_dir + os.path.sep):
continue
if deleted_part.startswith(Default.legacy_pakinfo_dir + os.path.sep):
continue
if deleted_part in produced_file_list:
is_built = True
Ui.laconic(deleted_part + ": do nothing because it is produced by another source file.")
self.deleted.removePart(self.pak_name, deleted_part)
if not is_built:
Ui.laconic(deleted_part + ": will mark as deleted.")
# Writing DELETED file.
for deleted_part in deleted_part_list:
self.deleted.set(self.source_tree.pak_name, deleted_part)
is_deleted = self.deleted.write()
if is_deleted:
unit = {
"head": "DELETED",
"body": [ "DELETED" ],
}
produced_unit_list.append(unit)
else:
# Remove DELETED leftover from partial build.
self.deps.remove(self.test_dir)
is_deps = False
# add itself to DEPS if partial build,
# also look for deleted files
if self.since_reference:
is_deps = True
if self.deps.read():
is_deps = True
if is_deps:
# translating DEPS file
self.deps.translateTest()
self.deps.write()
unit = {
"head": "DEPS",
"body": [ "DEPS" ],
}
produced_unit_list.append(unit)
else:
# Remove DEPS leftover from partial build.
self.deps.remove(self.test_dir)
logging.debug("produced unit list:" + str(produced_unit_list))
# do not clean-up if building from temporary directories
# or if user asked to not clean-up
if clean_dust:
cleaner.cleanDust(self.test_dir, produced_unit_list, previous_file_list)
return produced_unit_list
def threadExtendRes(self, func, args, res):
# magic: only works if res is a mutable object (like a list)
res.extend(func(*args))
class Packager():
# TODO: reuse paktraces, do not walk for file,s
def __init__(self, source_tree, args):
self.source_dir = source_tree.dir
self.pak_vfs = source_tree.pak_vfs
self.pak_config = source_tree.pak_config
self.pak_format = source_tree.pak_format
self.allow_dirty = args.allow_dirty
self.no_compress = args.no_compress
self.test_dir = self.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
self.pak_file = self.pak_config.getPakFile(build_prefix=args.build_prefix, pak_prefix=args.pak_prefix, pak_file=args.pak_file, version_suffix=args.version_suffix)
self.game_profile = Game.Game(source_tree)
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(source_tree, self.test_dir, None)
self.deps = Repository.Deps(source_tree, self.test_dir)
def createSubdirs(self, pak_file):
pak_subdir = os.path.dirname(pak_file)
if pak_subdir == "":
pak_subdir = "."
if os.path.isdir(pak_subdir):
logging.debug("found pak subdir: " + pak_subdir)
else:
logging.debug("create pak subdir: " + pak_subdir)
os.makedirs(pak_subdir, exist_ok=True)
def run(self):
if not os.path.isdir(self.test_dir):
Ui.error("test pakdir not built: " + self.test_dir)
source_repository = Repository.Git(self.source_dir, self.pak_format)
if source_repository.isGit() and source_repository.isDirty():
if self.allow_dirty:
Ui.warning("Dirty repository: " + self.source_dir)
else:
Ui.error("Dirty repository isn't allowed to be packaged (use --allow-dirty to override): " + self.source_dir)
Ui.print("Packaging “" + self.test_dir + "” as: " + self.pak_file)
self.createSubdirs(self.pak_file)
logging.debug("opening: " + self.pak_file)
# remove existing file (do not write in place) to force the game engine to reread the file
if os.path.isfile(self.pak_file):
logging.debug("remove existing package: " + self.pak_file)
os.remove(self.pak_file)
if self.no_compress:
# why zlib.Z_NO_COMPRESSION not defined?
zipfile.zlib.Z_DEFAULT_COMPRESSION = 0
else:
# maximum compression
zipfile.zlib.Z_DEFAULT_COMPRESSION = zipfile.zlib.Z_BEST_COMPRESSION
found_file = False
paktrace_dir = Default.getPakTraceDir(self.test_dir)
relative_paktrace_dir = os.path.relpath(paktrace_dir, self.test_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
for file_name in file_name_list:
found_file = True
break
if found_file:
break
# FIXME: if only the DEPS file is modified, the package will
# not be created (it should be).
if not found_file:
Ui.print("Not writing empty package: " + self.pak_file)
return
pak = zipfile.ZipFile(self.pak_file, "w", zipfile.ZIP_DEFLATED)
for dir_name, subdir_name_list, file_name_list in os.walk(self.test_dir):
for file_name in file_name_list:
rel_dir_name = os.path.relpath(dir_name, self.test_dir)
full_path = os.path.join(dir_name, file_name)
file_path = os.path.relpath(full_path, self.test_dir)
# ignore paktrace files
if file_path.startswith(relative_paktrace_dir + os.path.sep):
continue
# ignore DELETED and DEPS file, will add it later
if self.pak_format == "dpk" and file_path in Repository.dpk_special_files:
continue
found_file = True
# TODO: add a mechanism to know if VFS supports
# symbolic links in packages or not.
# Dæmon's DPK VFS is supporting symbolic links.
# DarkPlaces' PK3 VFS is supporting symbolic links.
# Others may not.
is_symlink_supported = True
if is_symlink_supported and os.path.islink(full_path):
Ui.print("add symlink to package " + os.path.basename(self.pak_file) + ": " + file_path)
# TODO: Remove this test when Urcheon deletes extra
# files in build directory. Currently a deleted but not | # committed file is kept.
if os.path.exists(full_path):
# FIXME: getmtime reads realpath datetime, not symbolic link datetime.
file_date_time = (datetime.fromtimestamp(os.path.getmtime(full_path))) | random_line_split | |
Pak.py | , input_file_path)):
logging.debug("missing prepared files for “" + file_path + "”: " + input_file_path)
else:
logging.debug("found prepared files for “" + file_path + "”: " + input_file_path)
file_list.append(input_file_path)
else:
file_list = source_tree. | actions:
action_list.computeActions(file_list)
self.action_list = action_list
self.game_profile = Game.Game(source_tree)
if not self.map_profile:
map_config = MapCompiler.Config(source_tree)
self.map_profile = map_config.requireDefaultProfile()
def run(self):
if self.source_dir == self.test_dir:
Ui.print("Preparing: " + self.source_dir)
else:
Ui.print("Building “" + self.source_dir + "” as: " + self.test_dir)
# TODO: check if not a directory
if os.path.isdir(self.test_dir):
logging.debug("found build dir: " + self.test_dir)
else:
logging.debug("create build dir: " + self.test_dir)
os.makedirs(self.test_dir, exist_ok=True)
if not self.is_nested and not self.keep_dust:
clean_dust = True
else:
clean_dust = False
if clean_dust:
# do not read paktrace from temporary directories
# do not read paktrace if dust will be kept
paktrace = Repository.Paktrace(self.source_tree, self.test_dir)
previous_file_list = paktrace.listAll()
if self.clean_map or clean_dust:
cleaner = Cleaner(self.source_tree)
if self.clean_map:
cleaner.cleanMap(self.test_dir)
cpu_count = Parallelism.countCPU()
action_thread_list = []
produced_unit_list = []
main_process = Parallelism.getProcess()
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
# no need to use multiprocessing module to manage task contention, since each task will call its own process
# using threads on one core is faster, and it does not prevent tasks to be able to use other cores
# the is_nested argument is there to tell action to not do specific stuff because of recursion
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, map_profile=self.map_profile, is_nested=self.is_nested)
# check if task is already done (usually comparing timestamps the make way)
if action.isDone():
produced_unit_list.extend(action.getOldProducedUnitList())
continue
if not self.is_parallel or not action_type.is_parallel:
# tasks are run sequentially but they can
# use multiple threads themselves
thread_count = cpu_count
else:
# this compute is super slow because of process.children()
child_thread_count = Parallelism.countChildThread(main_process)
thread_count = max(1, cpu_count - child_thread_count)
action.thread_count = thread_count
if not self.is_parallel or not action_type.is_parallel:
# sequential build explicitely requested (like in recursion)
# or action that can't be run concurrently to others (like MergeBsp)
produced_unit_list.extend(action.run())
else:
# do not use >= in case of there is some extra thread we don't think about
# it's better to spawn an extra one than looping forever
while child_thread_count > cpu_count:
# no need to loop at full cpu speed
time.sleep(.05)
child_thread_count = Parallelism.countChildThread(main_process)
pass
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
action.thread_count = max(2, cpu_count - child_thread_count)
# wrapper does: produced_unit_list.extend(action.run())
action_thread = Parallelism.Thread(target=self.threadExtendRes, args=(action.run, (), produced_unit_list))
action_thread_list.append(action_thread)
action_thread.start()
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
# wait for all threads to end, otherwise it will start packaging next
# package while the building task for the current one is not ended
# and well, we now have to read that list to purge old files, so we
# must wait
Parallelism.joinThreads(action_thread_list)
# Handle symbolic links.
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, action_list=self.action_list, map_profile=self.map_profile, is_nested=self.is_nested)
# TODO: check for symbolic link to missing or deleted files.
produced_unit_list.extend(action.symlink())
# deduplication
unit_list = []
deleted_file_list = []
produced_file_list = []
for unit in produced_unit_list:
if unit == []:
continue
logging.debug("unit: " + str(unit))
head = unit["head"]
body = unit["body"]
action = unit["action"]
if action == "ignore":
continue
if action == "delete":
deleted_file_list.append( head )
if head not in produced_file_list:
produced_file_list.append(head)
for part in body:
if part not in produced_file_list:
# FIXME: only if action was not “ignore”
produced_file_list.append(part)
# if multiple calls produce the same files (like merge_bsp)
# FIXME: that can't work, this is probably a leftover
# or we may have to do “if head in body” instead.
# See https://github.com/DaemonEngine/Urcheon/issues/48
if head in unit:
continue
unit_list.append(unit)
produced_unit_list = unit_list
if self.stage_name == "build" and not self.is_nested:
if self.pak_format == "dpk":
is_deleted = False
if self.since_reference:
Ui.laconic("looking for deleted files")
# Unvanquished game did not support DELETED file until after 0.52.1.
workaround_no_delete = self.source_tree.game_name == "unvanquished" and self.since_reference in ["unvanquished/0.52.1", "v0.52.1"]
git_repo = Repository.Git(self.source_dir, "dpk", workaround_no_delete=workaround_no_delete)
previous_version = git_repo.computeVersion(self.since_reference, named_reference=True)
self.deps.set(self.pak_name, previous_version)
for deleted_file in git_repo.getDeletedFileList(self.since_reference):
if deleted_file not in deleted_file_list:
is_deleted = True
deleted_file_list.append(deleted_file)
if deleted_file_list:
is_deleted = True
for deleted_file in deleted_file_list:
self.deleted.set(self.pak_name, deleted_file)
if self.deleted.read():
is_deleted = True
if is_deleted:
deleted_part_list = self.deleted.translate()
# TODO: No need to mark as DELETED a file from the same
# package if it does not depend on itself.
# TODO: A way to not translate DELETED files may be needed
# in some cases.
# If flamer.jpg producing flamer.crn was replaced
# by flamer.png also producing flamer.crn, the
# flamer.crn file will be listed as deleted
# while it will be shipped, but built from another
# source file, so we must check deleted files
# aren't built in other way to avoid listing
# as deleted a file that is actually shipped.
for deleted_part_dict in deleted_part_list:
is_built = False
if deleted_part_dict["pak_name"] == self.pak_name:
deleted_part = deleted_part_dict["file_path"]
if deleted_part.startswith(Default.repository_config_dir + os.path.sep):
continue
if deleted_part.startswith(Default.legacy_pakinfo_dir + os.path.sep):
continue
if deleted_part in produced_file_list:
is_built = True
Ui.laconic(deleted_part + ": do nothing because it is produced by another source file.")
self.deleted.removePart(self.pak_name, deleted_part)
if not is_built:
Ui.laconic(deleted_part + ": will mark as deleted.")
# Writing DELETED file.
for deleted_part in deleted_part_list:
self.deleted.set(self.source_tree.pak_name, deleted_part)
is_deleted = self.deleted.write()
if is_deleted:
unit = {
"head": "DELETED",
"body": [ "DELETED" ],
}
produced_unit_list.append(unit)
else:
# Remove DELETED leftover from partial build.
self.deps.remove(self.test_dir)
is_deps = False
# | listFiles()
if not self.no_auto_ | conditional_block |
Pak.py | # also look for deleted files
if self.since_reference:
is_deps = True
if self.deps.read():
is_deps = True
if is_deps:
# translating DEPS file
self.deps.translateTest()
self.deps.write()
unit = {
"head": "DEPS",
"body": [ "DEPS" ],
}
produced_unit_list.append(unit)
else:
# Remove DEPS leftover from partial build.
self.deps.remove(self.test_dir)
logging.debug("produced unit list:" + str(produced_unit_list))
# do not clean-up if building from temporary directories
# or if user asked to not clean-up
if clean_dust:
cleaner.cleanDust(self.test_dir, produced_unit_list, previous_file_list)
return produced_unit_list
def threadExtendRes(self, func, args, res):
# magic: only works if res is a mutable object (like a list)
res.extend(func(*args))
class Packager():
# TODO: reuse paktraces, do not walk for file,s
def __init__(self, source_tree, args):
self.source_dir = source_tree.dir
self.pak_vfs = source_tree.pak_vfs
self.pak_config = source_tree.pak_config
self.pak_format = source_tree.pak_format
self.allow_dirty = args.allow_dirty
self.no_compress = args.no_compress
self.test_dir = self.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
self.pak_file = self.pak_config.getPakFile(build_prefix=args.build_prefix, pak_prefix=args.pak_prefix, pak_file=args.pak_file, version_suffix=args.version_suffix)
self.game_profile = Game.Game(source_tree)
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(source_tree, self.test_dir, None)
self.deps = Repository.Deps(source_tree, self.test_dir)
def createSubdirs(self, pak_file):
pak_subdir = os.path.dirname(pak_file)
if pak_subdir == "":
pak_subdir = "."
if os.path.isdir(pak_subdir):
logging.debug("found pak subdir: " + pak_subdir)
else:
logging.debug("create pak subdir: " + pak_subdir)
os.makedirs(pak_subdir, exist_ok=True)
def run(self):
if not os.path.isdir(self.test_dir):
Ui.error("test pakdir not built: " + self.test_dir)
source_repository = Repository.Git(self.source_dir, self.pak_format)
if source_repository.isGit() and source_repository.isDirty():
if self.allow_dirty:
Ui.warning("Dirty repository: " + self.source_dir)
else:
Ui.error("Dirty repository isn't allowed to be packaged (use --allow-dirty to override): " + self.source_dir)
Ui.print("Packaging “" + self.test_dir + "” as: " + self.pak_file)
self.createSubdirs(self.pak_file)
logging.debug("opening: " + self.pak_file)
# remove existing file (do not write in place) to force the game engine to reread the file
if os.path.isfile(self.pak_file):
logging.debug("remove existing package: " + self.pak_file)
os.remove(self.pak_file)
if self.no_compress:
# why zlib.Z_NO_COMPRESSION not defined?
zipfile.zlib.Z_DEFAULT_COMPRESSION = 0
else:
# maximum compression
zipfile.zlib.Z_DEFAULT_COMPRESSION = zipfile.zlib.Z_BEST_COMPRESSION
found_file = False
paktrace_dir = Default.getPakTraceDir(self.test_dir)
relative_paktrace_dir = os.path.relpath(paktrace_dir, self.test_dir)
for dir_name, subdir_name_list, file_name_list in os.walk(paktrace_dir):
for file_name in file_name_list:
found_file = True
break
if found_file:
break
# FIXME: if only the DEPS file is modified, the package will
# not be created (it should be).
if not found_file:
Ui.print("Not writing empty package: " + self.pak_file)
return
pak = zipfile.ZipFile(self.pak_file, "w", zipfile.ZIP_DEFLATED)
for dir_name, subdir_name_list, file_name_list in os.walk(self.test_dir):
for file_name in file_name_list:
rel_dir_name = os.path.relpath(dir_name, self.test_dir)
full_path = os.path.join(dir_name, file_name)
file_path = os.path.relpath(full_path, self.test_dir)
# ignore paktrace files
if file_path.startswith(relative_paktrace_dir + os.path.sep):
continue
# ignore DELETED and DEPS file, will add it later
if self.pak_format == "dpk" and file_path in Repository.dpk_special_files:
continue
found_file = True
# TODO: add a mechanism to know if VFS supports
# symbolic links in packages or not.
# Dæmon's DPK VFS is supporting symbolic links.
# DarkPlaces' PK3 VFS is supporting symbolic links.
# Others may not.
is_symlink_supported = True
if is_symlink_supported and os.path.islink(full_path):
Ui.print("add symlink to package " + os.path.basename(self.pak_file) + ": " + file_path)
# TODO: Remove this test when Urcheon deletes extra
# files in build directory. Currently a deleted but not
# committed file is kept.
if os.path.exists(full_path):
# FIXME: getmtime reads realpath datetime, not symbolic link datetime.
file_date_time = (datetime.fromtimestamp(os.path.getmtime(full_path)))
# See https://stackoverflow.com/a/61795576/9131399
attrs = ('year', 'month', 'day', 'hour', 'minute', 'second')
file_date_time_tuple = attrgetter(*attrs)(file_date_time)
# See https://stackoverflow.com/a/60691331/9131399
zip_info = zipfile.ZipInfo(file_path, date_time=file_date_time_tuple)
zip_info.create_system = 3
file_permissions = 0o777
file_permissions |= 0xA000
zip_info.external_attr = file_permissions << 16
target_path = os.readlink(full_path)
pak.writestr(zip_info, target_path)
else:
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": " + file_path)
pak.write(full_path, arcname=file_path)
if self.pak_format == "dpk":
# Writing DELETED file.
deleted_file_path = self.deleted.get_test_path()
if os.path.isfile(deleted_file_path):
pak.write(deleted_file_path, arcname="DELETED")
# Translating DEPS file.
if self.deps.read(deps_dir=self.test_dir):
self.deps.translateRelease(self.pak_vfs)
deps_temp_dir = tempfile.mkdtemp()
deps_temp_file = self.deps.write(deps_dir=deps_temp_dir)
Ui.print("add file to package " + os.path.basename(self.pak_file) + ": DEPS")
pak.write(deps_temp_file, arcname="DEPS")
logging.debug("close: " + self.pak_file)
pak.close()
if source_repository.isGit():
repo_date = int(source_repository.getDate("HEAD"))
os.utime(self.pak_file, (repo_date, repo_date))
Ui.laconic("Package written: " + self.pak_file)
class Cleaner():
def __init__(self, source_tree):
self.pak_name = source_tree.pak_name
self.game_profile = Game.Game(source_tree)
def cleanTest(self, test_dir):
for dir_name, subdir_name_list, file_name_list in os.walk(test_dir):
for file_name in file_name_list:
that_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + that_file)
os.remove(that_file)
FileSystem.removeEmptyDir(dir_name)
for dir_name in subdir_name_list:
that_dir = dir_name + os.path.sep + dir_name
FileSystem.removeEmptyDir(that_dir)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(test_dir)
def cleanPak(self, pak_prefix):
for dir_name, subdir_name_list, file_name_list in os.walk(pak_prefix):
for file_name in file_name_list:
if file_name.startswith(self.pak_name) and file_name.endswith(self.game_profile.pak_ext):
pak_file = os.path.join(dir_name, file_name)
Ui.laconic("clean: " + pak_file)
os.remove(pak_file)
FileSystem.removeEmptyDir(dir_name)
FileSystem.removeEmptyDir(pak_prefix)
def cleanMap(self, test_dir):
# TODO: use p | aktrace | identifier_name | |
Pak.py | produced_unit_list.extend(action.getOldProducedUnitList())
continue
if not self.is_parallel or not action_type.is_parallel:
# tasks are run sequentially but they can
# use multiple threads themselves
thread_count = cpu_count
else:
# this compute is super slow because of process.children()
child_thread_count = Parallelism.countChildThread(main_process)
thread_count = max(1, cpu_count - child_thread_count)
action.thread_count = thread_count
if not self.is_parallel or not action_type.is_parallel:
# sequential build explicitely requested (like in recursion)
# or action that can't be run concurrently to others (like MergeBsp)
produced_unit_list.extend(action.run())
else:
# do not use >= in case of there is some extra thread we don't think about
# it's better to spawn an extra one than looping forever
while child_thread_count > cpu_count:
# no need to loop at full cpu speed
time.sleep(.05)
child_thread_count = Parallelism.countChildThread(main_process)
pass
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
action.thread_count = max(2, cpu_count - child_thread_count)
# wrapper does: produced_unit_list.extend(action.run())
action_thread = Parallelism.Thread(target=self.threadExtendRes, args=(action.run, (), produced_unit_list))
action_thread_list.append(action_thread)
action_thread.start()
# join dead thread early to raise thread exceptions early
# forget ended threads
action_thread_list = Parallelism.joinDeadThreads(action_thread_list)
# wait for all threads to end, otherwise it will start packaging next
# package while the building task for the current one is not ended
# and well, we now have to read that list to purge old files, so we
# must wait
Parallelism.joinThreads(action_thread_list)
# Handle symbolic links.
for action_type in Action.list():
for file_path in self.action_list.active_action_dict[action_type.keyword]:
action = action_type(self.source_tree, self.test_dir, file_path, self.stage_name, action_list=self.action_list, map_profile=self.map_profile, is_nested=self.is_nested)
# TODO: check for symbolic link to missing or deleted files.
produced_unit_list.extend(action.symlink())
# deduplication
unit_list = []
deleted_file_list = []
produced_file_list = []
for unit in produced_unit_list:
if unit == []:
continue
logging.debug("unit: " + str(unit))
head = unit["head"]
body = unit["body"]
action = unit["action"]
if action == "ignore":
continue
if action == "delete":
deleted_file_list.append( head )
if head not in produced_file_list:
produced_file_list.append(head)
for part in body:
if part not in produced_file_list:
# FIXME: only if action was not “ignore”
produced_file_list.append(part)
# if multiple calls produce the same files (like merge_bsp)
# FIXME: that can't work, this is probably a leftover
# or we may have to do “if head in body” instead.
# See https://github.com/DaemonEngine/Urcheon/issues/48
if head in unit:
continue
unit_list.append(unit)
produced_unit_list = unit_list
if self.stage_name == "build" and not self.is_nested:
if self.pak_format == "dpk":
is_deleted = False
if self.since_reference:
Ui.laconic("looking for deleted files")
# Unvanquished game did not support DELETED file until after 0.52.1.
workaround_no_delete = self.source_tree.game_name == "unvanquished" and self.since_reference in ["unvanquished/0.52.1", "v0.52.1"]
git_repo = Repository.Git(self.source_dir, "dpk", workaround_no_delete=workaround_no_delete)
previous_version = git_repo.computeVersion(self.since_reference, named_reference=True)
self.deps.set(self.pak_name, previous_version)
for deleted_file in git_repo.getDeletedFileList(self.since_reference):
if deleted_file not in deleted_file_list:
is_deleted = True
deleted_file_list.append(deleted_file)
if deleted_file_list:
is_deleted = True
for deleted_file in deleted_file_list:
self.deleted.set(self.pak_name, deleted_file)
if self.deleted.read():
is_deleted = True
if is_deleted:
deleted_part_list = self.deleted.translate()
# TODO: No need to mark as DELETED a file from the same
# package if it does not depend on itself.
# TODO: A way to not translate DELETED files may be needed
# in some cases.
# If flamer.jpg producing flamer.crn was replaced
# by flamer.png also producing flamer.crn, the
# flamer.crn file will be listed as deleted
# while it will be shipped, but built from another
# source file, so we must check deleted files
# aren't built in other way to avoid listing
# as deleted a file that is actually shipped.
for deleted_part_dict in deleted_part_list:
is_built = False
if deleted_part_dict["pak_name"] == self.pak_name:
deleted_part = deleted_part_dict["file_path"]
if deleted_part.startswith(Default.repository_config_dir + os.path.sep):
continue
if deleted_part.startswith(Default.legacy_pakinfo_dir + os.path.sep):
continue
if deleted_part in produced_file_list:
is_built = True
Ui.laconic(deleted_part + ": do nothing because it is produced by another source file.")
self.deleted.removePart(self.pak_name, deleted_part)
if not is_built:
Ui.laconic(deleted_part + ": will mark as deleted.")
# Writing DELETED file.
for deleted_part in deleted_part_list:
self.deleted.set(self.source_tree.pak_name, deleted_part)
is_deleted = self.deleted.write()
if is_deleted:
unit = {
"head": "DELETED",
"body": [ "DELETED" ],
}
produced_unit_list.append(unit)
else:
# Remove DELETED leftover from partial build.
self.deps.remove(self.test_dir)
is_deps = False
# add itself to DEPS if partial build,
# also look for deleted files
if self.since_reference:
is_deps = True
if self.deps.read():
is_deps = True
if is_deps:
# translating DEPS file
self.deps.translateTest()
self.deps.write()
unit = {
"head": "DEPS",
"body": [ "DEPS" ],
}
produced_unit_list.append(unit)
else:
# Remove DEPS leftover from partial build.
self.deps.remove(self.test_dir)
logging.debug("produced unit list:" + str(produced_unit_list))
# do not clean-up if building from temporary directories
# or if user asked to not clean-up
if clean_dust:
cleaner.cleanDust(self.test_dir, produced_unit_list, previous_file_list)
return produced_unit_list
def threadExtendRes(self, func, args, res):
# magic: only works if res is a mutable object (like a list)
res.extend(func(*args))
class Packager():
# TODO: reuse paktraces, do not walk for file,s
def __init__(self, source_tree, args):
self.source_dir = source_tree.dir
self.pak_vfs = source_tree.pak_vfs
self.pak_config = source_tree.pak_config
self.pak_format = source_tree.pak_format
self.allow_dirty = args.allow_dirty
self.no_compress = args.no_compress
self.test_dir = self.pak_config.getTestDir(build_prefix=args.build_prefix, test_prefix=args.test_prefix, test_dir=args.test_dir)
self.pak_file = self.pak_config.getPakFile(build_prefix=args.build_prefix, pak_prefix=args.pak_prefix, pak_file=args.pak_file, version_suffix=args.version_suffix)
self.game_profile = Game.Game(source_tree)
if self.pak_format == "dpk":
self.deleted = Repository.Deleted(source_tree, self.test_dir, None)
self.deps = Repository.Deps(source_tree, self.test_dir)
def createSubdirs(self, pak_file):
pak_subdir = os.path.dirname(pak_fil | e)
if pak_subdir == "":
pak_subdir = "."
if os.path.isdir(pak_subdir):
logging.debug("found pak subdir: " + pak_subdir)
else:
logging.debug("create pak subdir: " + pak_subdir)
os.makedirs(pak_subdir, exist_ok=True)
def run(self):
if not os.path. | identifier_body | |
patchsBuild.py | utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, shell=True,cwd=r'C:\projs')
(out, err) = proc.communicate()
out=out.replace("\n","").split("\r")[1:-3]
fileName=javaFile.split("\\")[-1]
fileName=fileName.replace("_","\\")
for o in out:
if o=="":
continue
if not "@" in o:
continue
file,data=o.split(" ")
name,begin,end=data.split("@")
methodDir=fileName+"$"+name
if not methodDir in methods:
methods[methodDir]={}
if not "methodName" in methods[methodDir]:
methods[methodDir]["methodName"]=name
if not "fileName" in methods[methodDir]:
methods[methodDir]["fileName"]=fileName
rng=range(int(begin),int(end)+1)
if methodDir not in methods:
methods[methodDir]={}
methods[methodDir][key]=len(list(set(rng) & set(inds)))
def FileToMethods(beforeFile,AfterFile,deletedInds,addedInds, outPath,commitID):
methods={}
oneFileParser(methods,beforeFile,deletedInds,"deleted")
oneFileParser(methods,AfterFile,addedInds,"inserted")
f=open(outPath,"w")
for methodDir in methods:
dels=0
ins=0
fileName=""
methodName=""
if "deleted" in methods[methodDir]:
dels=methods[methodDir]["deleted"]
if "inserted" in methods[methodDir]:
ins=methods[methodDir]["inserted"]
if "fileName" in methods[methodDir]:
fileName=methods[methodDir]["fileName"]
if "methodName" in methods[methodDir]:
methodName=methods[methodDir]["methodName"]
row=[commitID,methodDir,fileName,methodName,str(dels),str(ins),str(dels+ins)]
f.write(",".join(row))
f.close()
def fixEnum(l):
if "enum =" in l:
l=l.replace("enum =","enumAmir =")
if "enum=" in l:
l=l.replace("enum=","enumAmir=")
if "enum," in l:
l=l.replace("enum,","enumAmir,")
if "enum." in l:
l=l.replace("enum.","enumAmir.")
if "enum;" in l:
l=l.replace("enum;","enumAmir;")
if "enum)" in l:
l=l.replace("enum)","enumAmir)")
return l
def fixAssert(l):
if "assert " in l:
l=l.replace("assert ","assertAmir ")
if ":" in l:
l=l.replace(":",";//")
if "assert(" in l:
l=l.replace("assert(","assertAmir(")
if ":" in l:
l=l.replace(":",";//")
return l
def OneClass(diff_lines, outPath, commitID, change):
fileName = diff_lines[0].split()
if len(fileName)<3:
return []
fileName = diff_lines[0].split()[2]
fileName = fileName[2:]
fileName = os.path.normpath(fileName).replace(os.path.sep,"_")
if not ".java" in fileName:
return []
fileName = fileName.split('.java')[0] + '.java'
if len(diff_lines) > 3:
diff_lines = diff_lines[5:]
befLines=[]
afterLines=[]
deletedInds=[]
addedInds=[]
delind=0
addind=0
for l in diff_lines:
if "\ No newline at end of file" in l:
continue
if "1.9.4.msysgit.2" in l:
continue
if "- \n"== l:
continue
if "-- \n"== l:
continue
l=fixEnum(l)
l=fixAssert(l)
replaced=re.sub('@@(-|\+|,| |[0-9])*@@','',l)
if replaced.startswith("*"):
replaced="\\"+replaced
if replaced.startswith("+"):
afterLines.append(replaced[1:])
addedInds.append(addind)
addind=addind+1
elif replaced.startswith("-"):
befLines.append(replaced[1:])
deletedInds.append(delind)
delind=delind+1
else:
|
with open(os.path.join(outPath, "before", fileName), "wb") as bef:
bef.writelines(befLines)
with open(os.path.join(outPath, "after", fileName), "wb") as after:
after.writelines(afterLines)
with open(os.path.join(outPath, fileName + "_deletsIns.txt"), "wb") as f:
f.writelines(["deleted\n", str(deletedInds)+"\n","added\n", str(addedInds)])
change.write(fileName+"@"+str(commitID)+"@"+str(deletedInds)+"@"+str(addedInds)+"\n")
def oneFile(PatchFile, outDir,change):
with open(PatchFile,'r') as f:
lines=f.readlines()
if len(lines)==0:
return []
commitSha = lines[0].split()[1] # line 0 word 1
commitID = str(commitSha)
mkDirs(outDir, commitID)
inds=[lines.index(l) for l in lines if "diff --git" in l]+[len(lines)] #lines that start with diff --git
shutil.copyfile(PatchFile, os.path.join(outDir, commitID, os.path.basename(PatchFile)))
for i in range(len(inds)-1):
diff_lines = lines[inds[i]:inds[i+1]]
if len(diff_lines) == 0:
continue
OneClass(diff_lines, os.path.join(outDir, commitID),commitID,change)
def debugPatchs(Path,outFile):
lst= glob.glob(Path+"/*.patch")
i=0
allComms=[]
ou=open(outFile,"wt")
for doc in lst:
i=i+1
f=open(doc,'r')
lines=f.readlines()[:9]
ou.writelines(lines)
ou.close()
def buildPatchs(Path,outDir,changedFile):
mkdir(outDir)
with open(changedFile,"wb") as change:
for doc in glob.glob(os.path.join(Path,"/*.patch")):
oneFile(doc, outDir, change)
def mkdir(d):
if not os.path.isdir(d):
os.mkdir(d)
def DbAdd(dbPath,allComms):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
c.execute('''CREATE TABLE commitedMethods (commitID INT, methodDir text, fileName text, methodName text, deletions INT , insertions INT , lines INT )''')
for com in allComms:
c.execute("INSERT INTO commitedMethods VALUES (?,?,?,?,?,?,?)",com)
conn.commit()
conn.close()
def RunCheckStyle(workingDir, outPath, checkStyle68, methodNameLines):
run_commands = ["java" ,"-jar" ,checkStyle68 ,"-c" ,methodNameLines ,"javaFile" ,"-o",outPath,workingDir]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def detectFromConf(lines,lineInd):
deleted = (lines[lineInd])
deleted = deleted.replace("[","").replace("]","").replace("\n","")
deleted = deleted.split(",")
return [x.lstrip() for x in deleted]
def readDataFile(Dfile):
f=open(Dfile,"r")
lines=f.readlines()
f.close()
deleted=detectFromConf(lines,1)
insertions=detectFromConf(lines,3)
return deleted,insertions
def checkStyleCreateDict(checkOut, changesDict):
methods = {}
lines = []
with open(checkOut, "r") as f:
lines = f.readlines()[1:-3]
for line in lines:
if line == "":
continue
if not "@" in line:
continue
if not len(line.split(" ")) == 2:
# case of error
continue
file, data = line.split(" ")
file = file.split(".java")[0]+".java"
fileNameSplited = file.split(os.path.sep)
fileName = fileNameSplited[-1].replace("_", os.path.sep)
commitID = fileNameSplited[fileNameSplited.index("commitsFiles") + 1]
if not (fileName, commitID) in changesDict.keys():
continue
key = ""
inds = []
deleted, insertions = changesDict[(fileName, commitID)]
if "before" in file:
key = "deletions"
inds = deleted
if "after" in file:
key = "insertions"
inds = insertions
name, begin, end = data.split("@")
rng = map(str, range(int(begin)-1, int(end)))
both = filter(lambda x: x in rng, map(str, inds))
keyChange = len(both)
if keyChange == | afterLines.append(replaced)
befLines.append(replaced)
delind=delind+1
addind=addind+1 | conditional_block |
patchsBuild.py | utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, shell=True,cwd=r'C:\projs')
(out, err) = proc.communicate()
out=out.replace("\n","").split("\r")[1:-3]
fileName=javaFile.split("\\")[-1]
fileName=fileName.replace("_","\\")
for o in out:
if o=="":
continue
if not "@" in o:
continue
file,data=o.split(" ")
name,begin,end=data.split("@")
methodDir=fileName+"$"+name
if not methodDir in methods:
methods[methodDir]={}
if not "methodName" in methods[methodDir]:
methods[methodDir]["methodName"]=name
if not "fileName" in methods[methodDir]:
methods[methodDir]["fileName"]=fileName
rng=range(int(begin),int(end)+1)
if methodDir not in methods:
methods[methodDir]={}
methods[methodDir][key]=len(list(set(rng) & set(inds)))
def FileToMethods(beforeFile,AfterFile,deletedInds,addedInds, outPath,commitID):
methods={}
oneFileParser(methods,beforeFile,deletedInds,"deleted")
oneFileParser(methods,AfterFile,addedInds,"inserted")
f=open(outPath,"w")
for methodDir in methods:
dels=0
ins=0
fileName=""
methodName=""
if "deleted" in methods[methodDir]:
dels=methods[methodDir]["deleted"]
if "inserted" in methods[methodDir]:
ins=methods[methodDir]["inserted"]
if "fileName" in methods[methodDir]:
fileName=methods[methodDir]["fileName"]
if "methodName" in methods[methodDir]:
methodName=methods[methodDir]["methodName"]
row=[commitID,methodDir,fileName,methodName,str(dels),str(ins),str(dels+ins)]
f.write(",".join(row))
f.close()
def fixEnum(l):
if "enum =" in l:
l=l.replace("enum =","enumAmir =")
if "enum=" in l:
l=l.replace("enum=","enumAmir=")
if "enum," in l:
l=l.replace("enum,","enumAmir,")
if "enum." in l:
l=l.replace("enum.","enumAmir.")
if "enum;" in l:
l=l.replace("enum;","enumAmir;")
if "enum)" in l:
l=l.replace("enum)","enumAmir)")
return l
def fixAssert(l):
if "assert " in l:
l=l.replace("assert ","assertAmir ")
if ":" in l:
l=l.replace(":",";//")
if "assert(" in l:
l=l.replace("assert(","assertAmir(")
if ":" in l:
l=l.replace(":",";//")
return l
def | (diff_lines, outPath, commitID, change):
fileName = diff_lines[0].split()
if len(fileName)<3:
return []
fileName = diff_lines[0].split()[2]
fileName = fileName[2:]
fileName = os.path.normpath(fileName).replace(os.path.sep,"_")
if not ".java" in fileName:
return []
fileName = fileName.split('.java')[0] + '.java'
if len(diff_lines) > 3:
diff_lines = diff_lines[5:]
befLines=[]
afterLines=[]
deletedInds=[]
addedInds=[]
delind=0
addind=0
for l in diff_lines:
if "\ No newline at end of file" in l:
continue
if "1.9.4.msysgit.2" in l:
continue
if "- \n"== l:
continue
if "-- \n"== l:
continue
l=fixEnum(l)
l=fixAssert(l)
replaced=re.sub('@@(-|\+|,| |[0-9])*@@','',l)
if replaced.startswith("*"):
replaced="\\"+replaced
if replaced.startswith("+"):
afterLines.append(replaced[1:])
addedInds.append(addind)
addind=addind+1
elif replaced.startswith("-"):
befLines.append(replaced[1:])
deletedInds.append(delind)
delind=delind+1
else:
afterLines.append(replaced)
befLines.append(replaced)
delind=delind+1
addind=addind+1
with open(os.path.join(outPath, "before", fileName), "wb") as bef:
bef.writelines(befLines)
with open(os.path.join(outPath, "after", fileName), "wb") as after:
after.writelines(afterLines)
with open(os.path.join(outPath, fileName + "_deletsIns.txt"), "wb") as f:
f.writelines(["deleted\n", str(deletedInds)+"\n","added\n", str(addedInds)])
change.write(fileName+"@"+str(commitID)+"@"+str(deletedInds)+"@"+str(addedInds)+"\n")
def oneFile(PatchFile, outDir,change):
with open(PatchFile,'r') as f:
lines=f.readlines()
if len(lines)==0:
return []
commitSha = lines[0].split()[1] # line 0 word 1
commitID = str(commitSha)
mkDirs(outDir, commitID)
inds=[lines.index(l) for l in lines if "diff --git" in l]+[len(lines)] #lines that start with diff --git
shutil.copyfile(PatchFile, os.path.join(outDir, commitID, os.path.basename(PatchFile)))
for i in range(len(inds)-1):
diff_lines = lines[inds[i]:inds[i+1]]
if len(diff_lines) == 0:
continue
OneClass(diff_lines, os.path.join(outDir, commitID),commitID,change)
def debugPatchs(Path,outFile):
lst= glob.glob(Path+"/*.patch")
i=0
allComms=[]
ou=open(outFile,"wt")
for doc in lst:
i=i+1
f=open(doc,'r')
lines=f.readlines()[:9]
ou.writelines(lines)
ou.close()
def buildPatchs(Path,outDir,changedFile):
mkdir(outDir)
with open(changedFile,"wb") as change:
for doc in glob.glob(os.path.join(Path,"/*.patch")):
oneFile(doc, outDir, change)
def mkdir(d):
if not os.path.isdir(d):
os.mkdir(d)
def DbAdd(dbPath,allComms):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
c.execute('''CREATE TABLE commitedMethods (commitID INT, methodDir text, fileName text, methodName text, deletions INT , insertions INT , lines INT )''')
for com in allComms:
c.execute("INSERT INTO commitedMethods VALUES (?,?,?,?,?,?,?)",com)
conn.commit()
conn.close()
def RunCheckStyle(workingDir, outPath, checkStyle68, methodNameLines):
run_commands = ["java" ,"-jar" ,checkStyle68 ,"-c" ,methodNameLines ,"javaFile" ,"-o",outPath,workingDir]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def detectFromConf(lines,lineInd):
deleted = (lines[lineInd])
deleted = deleted.replace("[","").replace("]","").replace("\n","")
deleted = deleted.split(",")
return [x.lstrip() for x in deleted]
def readDataFile(Dfile):
f=open(Dfile,"r")
lines=f.readlines()
f.close()
deleted=detectFromConf(lines,1)
insertions=detectFromConf(lines,3)
return deleted,insertions
def checkStyleCreateDict(checkOut, changesDict):
methods = {}
lines = []
with open(checkOut, "r") as f:
lines = f.readlines()[1:-3]
for line in lines:
if line == "":
continue
if not "@" in line:
continue
if not len(line.split(" ")) == 2:
# case of error
continue
file, data = line.split(" ")
file = file.split(".java")[0]+".java"
fileNameSplited = file.split(os.path.sep)
fileName = fileNameSplited[-1].replace("_", os.path.sep)
commitID = fileNameSplited[fileNameSplited.index("commitsFiles") + 1]
if not (fileName, commitID) in changesDict.keys():
continue
key = ""
inds = []
deleted, insertions = changesDict[(fileName, commitID)]
if "before" in file:
key = "deletions"
inds = deleted
if "after" in file:
key = "insertions"
inds = insertions
name, begin, end = data.split("@")
rng = map(str, range(int(begin)-1, int(end)))
both = filter(lambda x: x in rng, map(str, inds))
keyChange = len(both)
if keyChange == | OneClass | identifier_name |
patchsBuild.py | (begin),int(end)+1)
if methodDir not in methods:
methods[methodDir]={}
methods[methodDir][key]=len(list(set(rng) & set(inds)))
def FileToMethods(beforeFile,AfterFile,deletedInds,addedInds, outPath,commitID):
methods={}
oneFileParser(methods,beforeFile,deletedInds,"deleted")
oneFileParser(methods,AfterFile,addedInds,"inserted")
f=open(outPath,"w")
for methodDir in methods:
dels=0
ins=0
fileName=""
methodName=""
if "deleted" in methods[methodDir]:
dels=methods[methodDir]["deleted"]
if "inserted" in methods[methodDir]:
ins=methods[methodDir]["inserted"]
if "fileName" in methods[methodDir]:
fileName=methods[methodDir]["fileName"]
if "methodName" in methods[methodDir]:
methodName=methods[methodDir]["methodName"]
row=[commitID,methodDir,fileName,methodName,str(dels),str(ins),str(dels+ins)]
f.write(",".join(row))
f.close()
def fixEnum(l):
if "enum =" in l:
l=l.replace("enum =","enumAmir =")
if "enum=" in l:
l=l.replace("enum=","enumAmir=")
if "enum," in l:
l=l.replace("enum,","enumAmir,")
if "enum." in l:
l=l.replace("enum.","enumAmir.")
if "enum;" in l:
l=l.replace("enum;","enumAmir;")
if "enum)" in l:
l=l.replace("enum)","enumAmir)")
return l
def fixAssert(l):
if "assert " in l:
l=l.replace("assert ","assertAmir ")
if ":" in l:
l=l.replace(":",";//")
if "assert(" in l:
l=l.replace("assert(","assertAmir(")
if ":" in l:
l=l.replace(":",";//")
return l
def OneClass(diff_lines, outPath, commitID, change):
fileName = diff_lines[0].split()
if len(fileName)<3:
return []
fileName = diff_lines[0].split()[2]
fileName = fileName[2:]
fileName = os.path.normpath(fileName).replace(os.path.sep,"_")
if not ".java" in fileName:
return []
fileName = fileName.split('.java')[0] + '.java'
if len(diff_lines) > 3:
diff_lines = diff_lines[5:]
befLines=[]
afterLines=[]
deletedInds=[]
addedInds=[]
delind=0
addind=0
for l in diff_lines:
if "\ No newline at end of file" in l:
continue
if "1.9.4.msysgit.2" in l:
continue
if "- \n"== l:
continue
if "-- \n"== l:
continue
l=fixEnum(l)
l=fixAssert(l)
replaced=re.sub('@@(-|\+|,| |[0-9])*@@','',l)
if replaced.startswith("*"):
replaced="\\"+replaced
if replaced.startswith("+"):
afterLines.append(replaced[1:])
addedInds.append(addind)
addind=addind+1
elif replaced.startswith("-"):
befLines.append(replaced[1:])
deletedInds.append(delind)
delind=delind+1
else:
afterLines.append(replaced)
befLines.append(replaced)
delind=delind+1
addind=addind+1
with open(os.path.join(outPath, "before", fileName), "wb") as bef:
bef.writelines(befLines)
with open(os.path.join(outPath, "after", fileName), "wb") as after:
after.writelines(afterLines)
with open(os.path.join(outPath, fileName + "_deletsIns.txt"), "wb") as f:
f.writelines(["deleted\n", str(deletedInds)+"\n","added\n", str(addedInds)])
change.write(fileName+"@"+str(commitID)+"@"+str(deletedInds)+"@"+str(addedInds)+"\n")
def oneFile(PatchFile, outDir,change):
with open(PatchFile,'r') as f:
lines=f.readlines()
if len(lines)==0:
return []
commitSha = lines[0].split()[1] # line 0 word 1
commitID = str(commitSha)
mkDirs(outDir, commitID)
inds=[lines.index(l) for l in lines if "diff --git" in l]+[len(lines)] #lines that start with diff --git
shutil.copyfile(PatchFile, os.path.join(outDir, commitID, os.path.basename(PatchFile)))
for i in range(len(inds)-1):
diff_lines = lines[inds[i]:inds[i+1]]
if len(diff_lines) == 0:
continue
OneClass(diff_lines, os.path.join(outDir, commitID),commitID,change)
def debugPatchs(Path,outFile):
lst= glob.glob(Path+"/*.patch")
i=0
allComms=[]
ou=open(outFile,"wt")
for doc in lst:
i=i+1
f=open(doc,'r')
lines=f.readlines()[:9]
ou.writelines(lines)
ou.close()
def buildPatchs(Path,outDir,changedFile):
mkdir(outDir)
with open(changedFile,"wb") as change:
for doc in glob.glob(os.path.join(Path,"/*.patch")):
oneFile(doc, outDir, change)
def mkdir(d):
if not os.path.isdir(d):
os.mkdir(d)
def DbAdd(dbPath,allComms):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
c.execute('''CREATE TABLE commitedMethods (commitID INT, methodDir text, fileName text, methodName text, deletions INT , insertions INT , lines INT )''')
for com in allComms:
c.execute("INSERT INTO commitedMethods VALUES (?,?,?,?,?,?,?)",com)
conn.commit()
conn.close()
def RunCheckStyle(workingDir, outPath, checkStyle68, methodNameLines):
run_commands = ["java" ,"-jar" ,checkStyle68 ,"-c" ,methodNameLines ,"javaFile" ,"-o",outPath,workingDir]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def detectFromConf(lines,lineInd):
deleted = (lines[lineInd])
deleted = deleted.replace("[","").replace("]","").replace("\n","")
deleted = deleted.split(",")
return [x.lstrip() for x in deleted]
def readDataFile(Dfile):
f=open(Dfile,"r")
lines=f.readlines()
f.close()
deleted=detectFromConf(lines,1)
insertions=detectFromConf(lines,3)
return deleted,insertions
def checkStyleCreateDict(checkOut, changesDict):
methods = {}
lines = []
with open(checkOut, "r") as f:
lines = f.readlines()[1:-3]
for line in lines:
if line == "":
continue
if not "@" in line:
continue
if not len(line.split(" ")) == 2:
# case of error
continue
file, data = line.split(" ")
file = file.split(".java")[0]+".java"
fileNameSplited = file.split(os.path.sep)
fileName = fileNameSplited[-1].replace("_", os.path.sep)
commitID = fileNameSplited[fileNameSplited.index("commitsFiles") + 1]
if not (fileName, commitID) in changesDict.keys():
continue
key = ""
inds = []
deleted, insertions = changesDict[(fileName, commitID)]
if "before" in file:
key = "deletions"
inds = deleted
if "after" in file:
key = "insertions"
inds = insertions
name, begin, end = data.split("@")
rng = map(str, range(int(begin)-1, int(end)))
both = filter(lambda x: x in rng, map(str, inds))
keyChange = len(both)
if keyChange == 0:
continue
methodDir = fileName + "$" + name
tup = (methodDir, commitID)
if not tup in methods:
methods[tup] = {}
methods[tup][key] = keyChange
if not "methodName" in methods[tup]:
methods[tup]["methodName"] = name
if not "fileName" in methods[tup]:
methods[tup]["fileName"] = fileName
if not "commitID" in methods[tup]:
methods[tup]["commitID"] = commitID
return methods
def readChangesFile(change):
dict = {}
rows = []
with open(change, "r") as f:
for line in f:
fileName, commitSha, dels, Ins = line.strip().split("@") | fileName = fileName.replace("_", os.path.sep) | random_line_split | |
patchsBuild.py | utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, shell=True,cwd=r'C:\projs')
(out, err) = proc.communicate()
out=out.replace("\n","").split("\r")[1:-3]
fileName=javaFile.split("\\")[-1]
fileName=fileName.replace("_","\\")
for o in out:
if o=="":
continue
if not "@" in o:
continue
file,data=o.split(" ")
name,begin,end=data.split("@")
methodDir=fileName+"$"+name
if not methodDir in methods:
methods[methodDir]={}
if not "methodName" in methods[methodDir]:
methods[methodDir]["methodName"]=name
if not "fileName" in methods[methodDir]:
methods[methodDir]["fileName"]=fileName
rng=range(int(begin),int(end)+1)
if methodDir not in methods:
methods[methodDir]={}
methods[methodDir][key]=len(list(set(rng) & set(inds)))
def FileToMethods(beforeFile,AfterFile,deletedInds,addedInds, outPath,commitID):
methods={}
oneFileParser(methods,beforeFile,deletedInds,"deleted")
oneFileParser(methods,AfterFile,addedInds,"inserted")
f=open(outPath,"w")
for methodDir in methods:
dels=0
ins=0
fileName=""
methodName=""
if "deleted" in methods[methodDir]:
dels=methods[methodDir]["deleted"]
if "inserted" in methods[methodDir]:
ins=methods[methodDir]["inserted"]
if "fileName" in methods[methodDir]:
fileName=methods[methodDir]["fileName"]
if "methodName" in methods[methodDir]:
methodName=methods[methodDir]["methodName"]
row=[commitID,methodDir,fileName,methodName,str(dels),str(ins),str(dels+ins)]
f.write(",".join(row))
f.close()
def fixEnum(l):
if "enum =" in l:
l=l.replace("enum =","enumAmir =")
if "enum=" in l:
l=l.replace("enum=","enumAmir=")
if "enum," in l:
l=l.replace("enum,","enumAmir,")
if "enum." in l:
l=l.replace("enum.","enumAmir.")
if "enum;" in l:
l=l.replace("enum;","enumAmir;")
if "enum)" in l:
l=l.replace("enum)","enumAmir)")
return l
def fixAssert(l):
if "assert " in l:
l=l.replace("assert ","assertAmir ")
if ":" in l:
l=l.replace(":",";//")
if "assert(" in l:
l=l.replace("assert(","assertAmir(")
if ":" in l:
l=l.replace(":",";//")
return l
def OneClass(diff_lines, outPath, commitID, change):
fileName = diff_lines[0].split()
if len(fileName)<3:
return []
fileName = diff_lines[0].split()[2]
fileName = fileName[2:]
fileName = os.path.normpath(fileName).replace(os.path.sep,"_")
if not ".java" in fileName:
return []
fileName = fileName.split('.java')[0] + '.java'
if len(diff_lines) > 3:
diff_lines = diff_lines[5:]
befLines=[]
afterLines=[]
deletedInds=[]
addedInds=[]
delind=0
addind=0
for l in diff_lines:
if "\ No newline at end of file" in l:
continue
if "1.9.4.msysgit.2" in l:
continue
if "- \n"== l:
continue
if "-- \n"== l:
continue
l=fixEnum(l)
l=fixAssert(l)
replaced=re.sub('@@(-|\+|,| |[0-9])*@@','',l)
if replaced.startswith("*"):
replaced="\\"+replaced
if replaced.startswith("+"):
afterLines.append(replaced[1:])
addedInds.append(addind)
addind=addind+1
elif replaced.startswith("-"):
befLines.append(replaced[1:])
deletedInds.append(delind)
delind=delind+1
else:
afterLines.append(replaced)
befLines.append(replaced)
delind=delind+1
addind=addind+1
with open(os.path.join(outPath, "before", fileName), "wb") as bef:
bef.writelines(befLines)
with open(os.path.join(outPath, "after", fileName), "wb") as after:
after.writelines(afterLines)
with open(os.path.join(outPath, fileName + "_deletsIns.txt"), "wb") as f:
f.writelines(["deleted\n", str(deletedInds)+"\n","added\n", str(addedInds)])
change.write(fileName+"@"+str(commitID)+"@"+str(deletedInds)+"@"+str(addedInds)+"\n")
def oneFile(PatchFile, outDir,change):
with open(PatchFile,'r') as f:
lines=f.readlines()
if len(lines)==0:
return []
commitSha = lines[0].split()[1] # line 0 word 1
commitID = str(commitSha)
mkDirs(outDir, commitID)
inds=[lines.index(l) for l in lines if "diff --git" in l]+[len(lines)] #lines that start with diff --git
shutil.copyfile(PatchFile, os.path.join(outDir, commitID, os.path.basename(PatchFile)))
for i in range(len(inds)-1):
diff_lines = lines[inds[i]:inds[i+1]]
if len(diff_lines) == 0:
continue
OneClass(diff_lines, os.path.join(outDir, commitID),commitID,change)
def debugPatchs(Path,outFile):
lst= glob.glob(Path+"/*.patch")
i=0
allComms=[]
ou=open(outFile,"wt")
for doc in lst:
i=i+1
f=open(doc,'r')
lines=f.readlines()[:9]
ou.writelines(lines)
ou.close()
def buildPatchs(Path,outDir,changedFile):
mkdir(outDir)
with open(changedFile,"wb") as change:
for doc in glob.glob(os.path.join(Path,"/*.patch")):
oneFile(doc, outDir, change)
def mkdir(d):
if not os.path.isdir(d):
os.mkdir(d)
def DbAdd(dbPath,allComms):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
c.execute('''CREATE TABLE commitedMethods (commitID INT, methodDir text, fileName text, methodName text, deletions INT , insertions INT , lines INT )''')
for com in allComms:
c.execute("INSERT INTO commitedMethods VALUES (?,?,?,?,?,?,?)",com)
conn.commit()
conn.close()
def RunCheckStyle(workingDir, outPath, checkStyle68, methodNameLines):
run_commands = ["java" ,"-jar" ,checkStyle68 ,"-c" ,methodNameLines ,"javaFile" ,"-o",outPath,workingDir]
proc = utilsConf.open_subprocess(run_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def detectFromConf(lines,lineInd):
deleted = (lines[lineInd])
deleted = deleted.replace("[","").replace("]","").replace("\n","")
deleted = deleted.split(",")
return [x.lstrip() for x in deleted]
def readDataFile(Dfile):
f=open(Dfile,"r")
lines=f.readlines()
f.close()
deleted=detectFromConf(lines,1)
insertions=detectFromConf(lines,3)
return deleted,insertions
def checkStyleCreateDict(checkOut, changesDict):
| inds = []
deleted, insertions = changesDict[(fileName, commitID)]
if "before" in file:
key = "deletions"
inds = deleted
if "after" in file:
key = "insertions"
inds = insertions
name, begin, end = data.split("@")
rng = map(str, range(int(begin)-1, int(end)))
both = filter(lambda x: x in rng, map(str, inds))
keyChange = len(both)
if keyChange == | methods = {}
lines = []
with open(checkOut, "r") as f:
lines = f.readlines()[1:-3]
for line in lines:
if line == "":
continue
if not "@" in line:
continue
if not len(line.split(" ")) == 2:
# case of error
continue
file, data = line.split(" ")
file = file.split(".java")[0]+".java"
fileNameSplited = file.split(os.path.sep)
fileName = fileNameSplited[-1].replace("_", os.path.sep)
commitID = fileNameSplited[fileNameSplited.index("commitsFiles") + 1]
if not (fileName, commitID) in changesDict.keys():
continue
key = "" | identifier_body |
sg_start.go | run-set <commandset>",
ShortHelp: "DEPRECATED. Use 'sg start' instead. Run the given commandset.",
FlagSet: runSetFlagSet,
Exec: runSetExec,
}
)
func constructStartCmdLongHelp() string {
var out strings.Builder
fmt.Fprintf(&out, `Runs the given commandset.
If no commandset is specified, it starts the commandset with the name 'default'.
Use this to start your Sourcegraph environment!
`)
// Attempt to parse config to list available commands, but don't fail on
// error, because we should never error when the user wants --help output.
_, _ = parseConf(*configFlag, *overwriteConfigFlag)
if globalConf != nil {
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "AVAILABLE COMMANDSETS IN %s%s%s\n", output.StyleBold, *configFlag, output.StyleReset)
var names []string
for name := range globalConf.Commandsets {
switch name {
case "enterprise-codeintel":
names = append(names, fmt.Sprintf(" %s 🧠", name))
case "batches":
names = append(names, fmt.Sprintf(" %s 🦡", name))
default:
names = append(names, fmt.Sprintf(" %s", name))
}
}
sort.Strings(names)
fmt.Fprint(&out, strings.Join(names, "\n"))
}
return out.String()
}
func startExec(ctx context.Context, args []string) error {
ok, errLine := parseConf(*configFlag, *overwriteConfigFlag)
if !ok {
out.WriteLine(errLine)
os.Exit(1)
}
if len(args) > 2 {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: too many arguments"))
return flag.ErrHelp
}
if len(args) != 1 {
if globalConf.DefaultCommandset != "" {
args = append(args, globalConf.DefaultCommandset)
} else {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: No commandset specified and no 'defaultCommandset' specified in sg.config.yaml\n"))
return flag.ErrHelp
}
}
set, ok := globalConf.Commandsets[args[0]]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: commandset %q not found :(", args[0]))
return flag.ErrHelp
}
// If the commandset requires the dev-private repository to be cloned, we
// check that it's at the right location here.
if set.RequiresDevPrivate {
repoRoot, err := root.RepositoryRoot()
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to determine repository root location: %s", err))
os.Exit(1)
}
devPrivatePath := filepath.Join(repoRoot, "..", "dev-private")
exists, err := pathExists(devPrivatePath)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to check whether dev-private repository exists: %s", err))
os.Exit(1)
}
if !exists {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: dev-private repository not found!"))
out.WriteLine(output.Linef("", output.StyleWarning, "It's expected to exist at: %s", devPrivatePath))
out.WriteLine(output.Line("", output.StyleWarning, "If you're not a Sourcegraph employee you probably want to run: sg start oss"))
out.WriteLine(output.Line("", output.StyleWarning, "If you're a Sourcegraph employee, see the documentation for how to clone it: https://docs.sourcegraph.com/dev/getting-started/quickstart_2_clone_repository"))
out.Write("")
overwritePath := filepath.Join(repoRoot, "sg.config.overwrite.yaml")
out.WriteLine(output.Linef("", output.StylePending, "If you know what you're doing and want disable the check, add the following to %s:", overwritePath))
out.Write("")
out.Write(fmt.Sprintf(` commandsets:
%s:
requiresDevPrivate: false
`, set.Name))
out.Write("")
os.Exit(1)
}
}
var checks []run.Check
for _, name := range set.Checks {
check, ok := globalConf.Checks[name]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: check %s not found in config", name))
continue
}
checks = append(checks, check)
}
ok, err := run.Checks(ctx, globalConf.Env, checks...)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks could not be run: %s", err))
}
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks did not pass, aborting start of commandset %s", set.Name))
return nil
}
cmds := make([]run.Command, 0, len(set.Commands))
for _, name := range set.Commands {
cmd, ok := globalConf.Commands[name]
if !ok {
return errors.Errorf("command %q not found in commandset %q", name, args[0])
}
cmds = append(cmds, cmd)
}
if len(cmds) == 0 {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: no commands to run"))
}
levelOverrides := logLevelOverrides()
for _, cmd := range cmds {
enrichWithLogLevels(&cmd, levelOverrides)
}
env := globalConf.Env
for k, v := range set.Env {
env[k] = v
}
return run.Commands(ctx, env, *verboseFlag, cmds...)
}
// logLevelOverrides builds a map of commands -> log level that should be overridden in the environment.
func logLevelOverrides() map[string]string {
levelServices := make(map[string][]string)
levelServices["debug"] = parseCsv(*debugStartServices)
levelServices["info"] = parseCsv(*infoStartServices)
levelServices["warn"] = parseCsv(*warnStartServices)
levelServices["error"] = parseCsv(*errorStartServices)
levelServices["crit"] = parseCsv(*critStartServices)
overrides := make(map[string]string)
for level, services := range levelServices {
for _, service := range services {
overrides[service] = level
}
}
return overrides
}
// enrichWithLogLevels will add any logger level overrides to a given command if they have been specified.
func enrichWithLogLevels(cmd *run.Command, overrides map[string]string) {
logLevelVariable := "SRC_LOG_LEVEL"
if level, ok := overrides[cmd.Name]; ok {
out.WriteLine(output.Linef("", output.StylePending, "Setting log level: %s for command %s.", level, cmd.Name))
if cmd.Env == nil {
cmd.Env = make(map[string]string, 1)
cmd.Env[logLevelVariable] = level
}
cmd.Env[logLevelVariable] = level
}
}
// parseCsv takes an input comma seperated string and returns a list of tokens each trimmed for whitespace
func parseCsv(input string) []string {
tokens := strings.Split(input, ",")
results := make([]string, 0, len(tokens))
for _, token := range tokens {
results = append(results, strings.TrimSpace(token))
}
return results
}
var deprecationStyle = output.CombineStyles(output.Fg256Color(255), output.Bg256Color(124))
func runSetExec(ctx context.Context, args []string) error {
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _______________________________________________________________________ "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "/ `sg run-set` is deprecated - use `sg start` instead! \\"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! Run `sg start -help` for usage information. !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "\\_______________________________________________________________________/"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " L_ ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / _)! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " / /__L "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____/ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _____ (____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\_(____) "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " \\__/ "))
return startExec(ctx, args)
}
func pathExists(path string) (bool, error) {
_, err | := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
| identifier_body | |
sg_start.go | .ExitOnError)
debugStartServices = startFlagSet.String("debug", "", "Comma separated list of services to set at debug log level.")
infoStartServices = startFlagSet.String("info", "", "Comma separated list of services to set at info log level.")
warnStartServices = startFlagSet.String("warn", "", "Comma separated list of services to set at warn log level.")
errorStartServices = startFlagSet.String("error", "", "Comma separated list of services to set at error log level.")
critStartServices = startFlagSet.String("crit", "", "Comma separated list of services to set at crit log level.")
startCommand = &ffcli.Command{
Name: "start",
ShortUsage: "sg start [commandset]",
ShortHelp: "🌟Starts the given commandset. Without a commandset it starts the default Sourcegraph dev environment.",
LongHelp: constructStartCmdLongHelp(),
FlagSet: startFlagSet,
Exec: startExec,
}
// run-set is the deprecated older version of `start`
runSetFlagSet = flag.NewFlagSet("sg run-set", flag.ExitOnError)
runSetCommand = &ffcli.Command{
Name: "run-set",
ShortUsage: "sg run-set <commandset>",
ShortHelp: "DEPRECATED. Use 'sg start' instead. Run the given commandset.",
FlagSet: runSetFlagSet,
Exec: runSetExec,
}
)
func con | string {
var out strings.Builder
fmt.Fprintf(&out, `Runs the given commandset.
If no commandset is specified, it starts the commandset with the name 'default'.
Use this to start your Sourcegraph environment!
`)
// Attempt to parse config to list available commands, but don't fail on
// error, because we should never error when the user wants --help output.
_, _ = parseConf(*configFlag, *overwriteConfigFlag)
if globalConf != nil {
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "AVAILABLE COMMANDSETS IN %s%s%s\n", output.StyleBold, *configFlag, output.StyleReset)
var names []string
for name := range globalConf.Commandsets {
switch name {
case "enterprise-codeintel":
names = append(names, fmt.Sprintf(" %s 🧠", name))
case "batches":
names = append(names, fmt.Sprintf(" %s 🦡", name))
default:
names = append(names, fmt.Sprintf(" %s", name))
}
}
sort.Strings(names)
fmt.Fprint(&out, strings.Join(names, "\n"))
}
return out.String()
}
func startExec(ctx context.Context, args []string) error {
ok, errLine := parseConf(*configFlag, *overwriteConfigFlag)
if !ok {
out.WriteLine(errLine)
os.Exit(1)
}
if len(args) > 2 {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: too many arguments"))
return flag.ErrHelp
}
if len(args) != 1 {
if globalConf.DefaultCommandset != "" {
args = append(args, globalConf.DefaultCommandset)
} else {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: No commandset specified and no 'defaultCommandset' specified in sg.config.yaml\n"))
return flag.ErrHelp
}
}
set, ok := globalConf.Commandsets[args[0]]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: commandset %q not found :(", args[0]))
return flag.ErrHelp
}
// If the commandset requires the dev-private repository to be cloned, we
// check that it's at the right location here.
if set.RequiresDevPrivate {
repoRoot, err := root.RepositoryRoot()
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to determine repository root location: %s", err))
os.Exit(1)
}
devPrivatePath := filepath.Join(repoRoot, "..", "dev-private")
exists, err := pathExists(devPrivatePath)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to check whether dev-private repository exists: %s", err))
os.Exit(1)
}
if !exists {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: dev-private repository not found!"))
out.WriteLine(output.Linef("", output.StyleWarning, "It's expected to exist at: %s", devPrivatePath))
out.WriteLine(output.Line("", output.StyleWarning, "If you're not a Sourcegraph employee you probably want to run: sg start oss"))
out.WriteLine(output.Line("", output.StyleWarning, "If you're a Sourcegraph employee, see the documentation for how to clone it: https://docs.sourcegraph.com/dev/getting-started/quickstart_2_clone_repository"))
out.Write("")
overwritePath := filepath.Join(repoRoot, "sg.config.overwrite.yaml")
out.WriteLine(output.Linef("", output.StylePending, "If you know what you're doing and want disable the check, add the following to %s:", overwritePath))
out.Write("")
out.Write(fmt.Sprintf(` commandsets:
%s:
requiresDevPrivate: false
`, set.Name))
out.Write("")
os.Exit(1)
}
}
var checks []run.Check
for _, name := range set.Checks {
check, ok := globalConf.Checks[name]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: check %s not found in config", name))
continue
}
checks = append(checks, check)
}
ok, err := run.Checks(ctx, globalConf.Env, checks...)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks could not be run: %s", err))
}
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks did not pass, aborting start of commandset %s", set.Name))
return nil
}
cmds := make([]run.Command, 0, len(set.Commands))
for _, name := range set.Commands {
cmd, ok := globalConf.Commands[name]
if !ok {
return errors.Errorf("command %q not found in commandset %q", name, args[0])
}
cmds = append(cmds, cmd)
}
if len(cmds) == 0 {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: no commands to run"))
}
levelOverrides := logLevelOverrides()
for _, cmd := range cmds {
enrichWithLogLevels(&cmd, levelOverrides)
}
env := globalConf.Env
for k, v := range set.Env {
env[k] = v
}
return run.Commands(ctx, env, *verboseFlag, cmds...)
}
// logLevelOverrides builds a map of commands -> log level that should be overridden in the environment.
func logLevelOverrides() map[string]string {
levelServices := make(map[string][]string)
levelServices["debug"] = parseCsv(*debugStartServices)
levelServices["info"] = parseCsv(*infoStartServices)
levelServices["warn"] = parseCsv(*warnStartServices)
levelServices["error"] = parseCsv(*errorStartServices)
levelServices["crit"] = parseCsv(*critStartServices)
overrides := make(map[string]string)
for level, services := range levelServices {
for _, service := range services {
overrides[service] = level
}
}
return overrides
}
// enrichWithLogLevels will add any logger level overrides to a given command if they have been specified.
func enrichWithLogLevels(cmd *run.Command, overrides map[string]string) {
logLevelVariable := "SRC_LOG_LEVEL"
if level, ok := overrides[cmd.Name]; ok {
out.WriteLine(output.Linef("", output.StylePending, "Setting log level: %s for command %s.", level, cmd.Name))
if cmd.Env == nil {
cmd.Env = make(map[string]string, 1)
cmd.Env[logLevelVariable] = level
}
cmd.Env[logLevelVariable] = level
}
}
// parseCsv takes an input comma seperated string and returns a list of tokens each trimmed for whitespace
func parseCsv(input string) []string {
tokens := strings.Split(input, ",")
results := make([]string, 0, len(tokens))
for _, token := range tokens {
results = append(results, strings.TrimSpace(token))
}
return results
}
var deprecationStyle = output.CombineStyles(output.Fg256Color(255), output.Bg256Color(124))
func runSetExec(ctx context.Context, args []string) error {
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _______________________________________________________________________ "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "/ `sg run-set` is deprecated - use `sg start` instead! \\"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! Run `sg start -help` for usage information. !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "\\_______________________________________________________________________/"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! | structStartCmdLongHelp() | identifier_name |
sg_start.go | .ExitOnError)
debugStartServices = startFlagSet.String("debug", "", "Comma separated list of services to set at debug log level.")
infoStartServices = startFlagSet.String("info", "", "Comma separated list of services to set at info log level.")
warnStartServices = startFlagSet.String("warn", "", "Comma separated list of services to set at warn log level.")
errorStartServices = startFlagSet.String("error", "", "Comma separated list of services to set at error log level.")
critStartServices = startFlagSet.String("crit", "", "Comma separated list of services to set at crit log level.")
startCommand = &ffcli.Command{
Name: "start",
ShortUsage: "sg start [commandset]",
ShortHelp: "🌟Starts the given commandset. Without a commandset it starts the default Sourcegraph dev environment.",
LongHelp: constructStartCmdLongHelp(),
FlagSet: startFlagSet,
Exec: startExec,
}
// run-set is the deprecated older version of `start`
runSetFlagSet = flag.NewFlagSet("sg run-set", flag.ExitOnError)
runSetCommand = &ffcli.Command{
Name: "run-set",
ShortUsage: "sg run-set <commandset>",
ShortHelp: "DEPRECATED. Use 'sg start' instead. Run the given commandset.",
FlagSet: runSetFlagSet,
Exec: runSetExec,
}
)
func constructStartCmdLongHelp() string {
var out strings.Builder
fmt.Fprintf(&out, `Runs the given commandset.
If no commandset is specified, it starts the commandset with the name 'default'.
Use this to start your Sourcegraph environment!
`)
// Attempt to parse config to list available commands, but don't fail on
// error, because we should never error when the user wants --help output.
_, _ = parseConf(*configFlag, *overwriteConfigFlag)
if globalConf != nil {
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "AVAILABLE COMMANDSETS IN %s%s%s\n", output.StyleBold, *configFlag, output.StyleReset)
var names []string
for name := range globalConf.Commandsets {
switch name {
case "enterprise-codeintel":
names = append(names, fmt.Sprintf(" %s 🧠", name))
case "batches":
names = append(names, fmt.Sprintf(" %s 🦡", name))
default:
names = append(names, fmt.Sprintf(" %s", name))
}
}
sort.Strings(names)
fmt.Fprint(&out, strings.Join(names, "\n"))
}
return out.String()
}
func startExec(ctx context.Context, args []string) error {
ok, errLine := parseConf(*configFlag, *overwriteConfigFlag)
if !ok {
out.WriteLine(errLine)
os.Exit(1)
}
if len(args) > 2 {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: too many arguments"))
return flag.ErrHelp
}
if len(args) != 1 {
if globalConf.DefaultCommandset != "" {
args = append(args, globalConf.DefaultCommandset)
} else {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: No commandset specified and no 'defaultCommandset' specified in sg.config.yaml\n"))
return flag.ErrHelp
}
}
set, ok := globalConf.Commandsets[args[0]]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: commandset %q not found :(", args[0]))
return flag.ErrHelp
}
// If the commandset requires the dev-private repository to be cloned, we
// check that it's at the right location here.
if set.RequiresDevPrivate {
repoRoot, err := root.RepositoryRoot()
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to determine repository root location: %s", err))
os.Exit(1)
}
devPrivatePath := filepath.Join(repoRoot, "..", "dev-private")
exists, err := pathExists(devPrivatePath)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to check whether dev-private repository exists: %s", err))
os.Exit(1)
}
if !exists {
out. | checks []run.Check
for _, name := range set.Checks {
check, ok := globalConf.Checks[name]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: check %s not found in config", name))
continue
}
checks = append(checks, check)
}
ok, err := run.Checks(ctx, globalConf.Env, checks...)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks could not be run: %s", err))
}
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks did not pass, aborting start of commandset %s", set.Name))
return nil
}
cmds := make([]run.Command, 0, len(set.Commands))
for _, name := range set.Commands {
cmd, ok := globalConf.Commands[name]
if !ok {
return errors.Errorf("command %q not found in commandset %q", name, args[0])
}
cmds = append(cmds, cmd)
}
if len(cmds) == 0 {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: no commands to run"))
}
levelOverrides := logLevelOverrides()
for _, cmd := range cmds {
enrichWithLogLevels(&cmd, levelOverrides)
}
env := globalConf.Env
for k, v := range set.Env {
env[k] = v
}
return run.Commands(ctx, env, *verboseFlag, cmds...)
}
// logLevelOverrides builds a map of commands -> log level that should be overridden in the environment.
func logLevelOverrides() map[string]string {
levelServices := make(map[string][]string)
levelServices["debug"] = parseCsv(*debugStartServices)
levelServices["info"] = parseCsv(*infoStartServices)
levelServices["warn"] = parseCsv(*warnStartServices)
levelServices["error"] = parseCsv(*errorStartServices)
levelServices["crit"] = parseCsv(*critStartServices)
overrides := make(map[string]string)
for level, services := range levelServices {
for _, service := range services {
overrides[service] = level
}
}
return overrides
}
// enrichWithLogLevels will add any logger level overrides to a given command if they have been specified.
func enrichWithLogLevels(cmd *run.Command, overrides map[string]string) {
logLevelVariable := "SRC_LOG_LEVEL"
if level, ok := overrides[cmd.Name]; ok {
out.WriteLine(output.Linef("", output.StylePending, "Setting log level: %s for command %s.", level, cmd.Name))
if cmd.Env == nil {
cmd.Env = make(map[string]string, 1)
cmd.Env[logLevelVariable] = level
}
cmd.Env[logLevelVariable] = level
}
}
// parseCsv takes an input comma seperated string and returns a list of tokens each trimmed for whitespace
func parseCsv(input string) []string {
tokens := strings.Split(input, ",")
results := make([]string, 0, len(tokens))
for _, token := range tokens {
results = append(results, strings.TrimSpace(token))
}
return results
}
var deprecationStyle = output.CombineStyles(output.Fg256Color(255), output.Bg256Color(124))
func runSetExec(ctx context.Context, args []string) error {
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _______________________________________________________________________ "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "/ `sg run-set` is deprecated - use `sg start` instead! \\"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! Run `sg start -help` for usage information. !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "\\_______________________________________________________________________/"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! | WriteLine(output.Linef("", output.StyleWarning, "ERROR: dev-private repository not found!"))
out.WriteLine(output.Linef("", output.StyleWarning, "It's expected to exist at: %s", devPrivatePath))
out.WriteLine(output.Line("", output.StyleWarning, "If you're not a Sourcegraph employee you probably want to run: sg start oss"))
out.WriteLine(output.Line("", output.StyleWarning, "If you're a Sourcegraph employee, see the documentation for how to clone it: https://docs.sourcegraph.com/dev/getting-started/quickstart_2_clone_repository"))
out.Write("")
overwritePath := filepath.Join(repoRoot, "sg.config.overwrite.yaml")
out.WriteLine(output.Linef("", output.StylePending, "If you know what you're doing and want disable the check, add the following to %s:", overwritePath))
out.Write("")
out.Write(fmt.Sprintf(` commandsets:
%s:
requiresDevPrivate: false
`, set.Name))
out.Write("")
os.Exit(1)
}
}
var | conditional_block |
sg_start.go | .ExitOnError)
debugStartServices = startFlagSet.String("debug", "", "Comma separated list of services to set at debug log level.")
infoStartServices = startFlagSet.String("info", "", "Comma separated list of services to set at info log level.")
warnStartServices = startFlagSet.String("warn", "", "Comma separated list of services to set at warn log level.")
errorStartServices = startFlagSet.String("error", "", "Comma separated list of services to set at error log level.")
critStartServices = startFlagSet.String("crit", "", "Comma separated list of services to set at crit log level.")
startCommand = &ffcli.Command{
Name: "start",
ShortUsage: "sg start [commandset]",
ShortHelp: "🌟Starts the given commandset. Without a commandset it starts the default Sourcegraph dev environment.",
LongHelp: constructStartCmdLongHelp(),
FlagSet: startFlagSet,
Exec: startExec,
}
// run-set is the deprecated older version of `start`
runSetFlagSet = flag.NewFlagSet("sg run-set", flag.ExitOnError)
runSetCommand = &ffcli.Command{
Name: "run-set",
ShortUsage: "sg run-set <commandset>",
ShortHelp: "DEPRECATED. Use 'sg start' instead. Run the given commandset.",
FlagSet: runSetFlagSet,
Exec: runSetExec,
}
)
func constructStartCmdLongHelp() string {
var out strings.Builder
fmt.Fprintf(&out, `Runs the given commandset.
If no commandset is specified, it starts the commandset with the name 'default'.
Use this to start your Sourcegraph environment!
`)
// Attempt to parse config to list available commands, but don't fail on
// error, because we should never error when the user wants --help output.
_, _ = parseConf(*configFlag, *overwriteConfigFlag)
if globalConf != nil {
fmt.Fprintf(&out, "\n")
fmt.Fprintf(&out, "AVAILABLE COMMANDSETS IN %s%s%s\n", output.StyleBold, *configFlag, output.StyleReset)
var names []string
for name := range globalConf.Commandsets {
switch name {
case "enterprise-codeintel":
names = append(names, fmt.Sprintf(" %s 🧠", name))
case "batches":
names = append(names, fmt.Sprintf(" %s 🦡", name))
default:
names = append(names, fmt.Sprintf(" %s", name))
}
}
sort.Strings(names) | }
return out.String()
}
func startExec(ctx context.Context, args []string) error {
ok, errLine := parseConf(*configFlag, *overwriteConfigFlag)
if !ok {
out.WriteLine(errLine)
os.Exit(1)
}
if len(args) > 2 {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: too many arguments"))
return flag.ErrHelp
}
if len(args) != 1 {
if globalConf.DefaultCommandset != "" {
args = append(args, globalConf.DefaultCommandset)
} else {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: No commandset specified and no 'defaultCommandset' specified in sg.config.yaml\n"))
return flag.ErrHelp
}
}
set, ok := globalConf.Commandsets[args[0]]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: commandset %q not found :(", args[0]))
return flag.ErrHelp
}
// If the commandset requires the dev-private repository to be cloned, we
// check that it's at the right location here.
if set.RequiresDevPrivate {
repoRoot, err := root.RepositoryRoot()
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to determine repository root location: %s", err))
os.Exit(1)
}
devPrivatePath := filepath.Join(repoRoot, "..", "dev-private")
exists, err := pathExists(devPrivatePath)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "Failed to check whether dev-private repository exists: %s", err))
os.Exit(1)
}
if !exists {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: dev-private repository not found!"))
out.WriteLine(output.Linef("", output.StyleWarning, "It's expected to exist at: %s", devPrivatePath))
out.WriteLine(output.Line("", output.StyleWarning, "If you're not a Sourcegraph employee you probably want to run: sg start oss"))
out.WriteLine(output.Line("", output.StyleWarning, "If you're a Sourcegraph employee, see the documentation for how to clone it: https://docs.sourcegraph.com/dev/getting-started/quickstart_2_clone_repository"))
out.Write("")
overwritePath := filepath.Join(repoRoot, "sg.config.overwrite.yaml")
out.WriteLine(output.Linef("", output.StylePending, "If you know what you're doing and want disable the check, add the following to %s:", overwritePath))
out.Write("")
out.Write(fmt.Sprintf(` commandsets:
%s:
requiresDevPrivate: false
`, set.Name))
out.Write("")
os.Exit(1)
}
}
var checks []run.Check
for _, name := range set.Checks {
check, ok := globalConf.Checks[name]
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: check %s not found in config", name))
continue
}
checks = append(checks, check)
}
ok, err := run.Checks(ctx, globalConf.Env, checks...)
if err != nil {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks could not be run: %s", err))
}
if !ok {
out.WriteLine(output.Linef("", output.StyleWarning, "ERROR: checks did not pass, aborting start of commandset %s", set.Name))
return nil
}
cmds := make([]run.Command, 0, len(set.Commands))
for _, name := range set.Commands {
cmd, ok := globalConf.Commands[name]
if !ok {
return errors.Errorf("command %q not found in commandset %q", name, args[0])
}
cmds = append(cmds, cmd)
}
if len(cmds) == 0 {
out.WriteLine(output.Linef("", output.StyleWarning, "WARNING: no commands to run"))
}
levelOverrides := logLevelOverrides()
for _, cmd := range cmds {
enrichWithLogLevels(&cmd, levelOverrides)
}
env := globalConf.Env
for k, v := range set.Env {
env[k] = v
}
return run.Commands(ctx, env, *verboseFlag, cmds...)
}
// logLevelOverrides builds a map of commands -> log level that should be overridden in the environment.
func logLevelOverrides() map[string]string {
levelServices := make(map[string][]string)
levelServices["debug"] = parseCsv(*debugStartServices)
levelServices["info"] = parseCsv(*infoStartServices)
levelServices["warn"] = parseCsv(*warnStartServices)
levelServices["error"] = parseCsv(*errorStartServices)
levelServices["crit"] = parseCsv(*critStartServices)
overrides := make(map[string]string)
for level, services := range levelServices {
for _, service := range services {
overrides[service] = level
}
}
return overrides
}
// enrichWithLogLevels will add any logger level overrides to a given command if they have been specified.
func enrichWithLogLevels(cmd *run.Command, overrides map[string]string) {
logLevelVariable := "SRC_LOG_LEVEL"
if level, ok := overrides[cmd.Name]; ok {
out.WriteLine(output.Linef("", output.StylePending, "Setting log level: %s for command %s.", level, cmd.Name))
if cmd.Env == nil {
cmd.Env = make(map[string]string, 1)
cmd.Env[logLevelVariable] = level
}
cmd.Env[logLevelVariable] = level
}
}
// parseCsv takes an input comma seperated string and returns a list of tokens each trimmed for whitespace
func parseCsv(input string) []string {
tokens := strings.Split(input, ",")
results := make([]string, 0, len(tokens))
for _, token := range tokens {
results = append(results, strings.TrimSpace(token))
}
return results
}
var deprecationStyle = output.CombineStyles(output.Fg256Color(255), output.Bg256Color(124))
func runSetExec(ctx context.Context, args []string) error {
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " _______________________________________________________________________ "))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "/ `sg run-set` is deprecated - use `sg start` instead! \\"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "! Run `sg start -help` for usage information. !"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, "\\_______________________________________________________________________/"))
stdout.Out.WriteLine(output.Linef("", deprecationStyle, " ! ! | fmt.Fprint(&out, strings.Join(names, "\n")) | random_line_split |
hunk.rs | self.state = match new_line_state(&self.line, &self.raw_line, &self.state, self.config) {
Some(HunkMinus(diff_type, raw_line)) => {
if let HunkPlus(_, _) = self.state {
// We have just entered a new subhunk; process the previous one
// and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
}
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkMinus(diff_type, raw_line);
self.painter.minus_lines.push((line, state.clone()));
state
}
Some(HunkPlus(diff_type, raw_line)) => {
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkPlus(diff_type, raw_line);
self.painter.plus_lines.push((line, state.clone()));
state
}
Some(HunkZero(diff_type, raw_line)) => {
// We are in a zero (unchanged) line, therefore we have just exited a subhunk (a
// sequence of consecutive minus (removed) and/or plus (added) lines). Process that
// subhunk and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
let n_parents = if is_word_diff() {
0
} else {
diff_type.n_parents()
};
let line = prepare(&self.line, n_parents, self.config);
let state = State::HunkZero(diff_type, raw_line);
self.painter.paint_zero_line(&line, state.clone());
state
}
_ => {
// The first character here could be e.g. '\' from '\ No newline at end of file'. This
// is not a hunk line, but the parser does not have a more accurate state corresponding
// to this.
self.painter.paint_buffered_minus_and_plus_lines();
self.painter
.output_buffer
.push_str(&tabs::expand(&self.raw_line, &self.config.tab_cfg));
self.painter.output_buffer.push('\n');
State::HunkZero(Unified, None)
}
};
self.painter.emit()?;
Ok(true)
}
}
// Return Some(prepared_raw_line) if delta should emit this line raw.
fn maybe_raw_line(
raw_line: &str,
state_style_is_raw: bool,
n_parents: usize,
non_raw_styles: &[style::Style],
config: &Config,
) -> Option<String> {
let emit_raw_line = is_word_diff()
|| config.inspect_raw_lines == cli::InspectRawLines::True
&& style::line_has_style_other_than(raw_line, non_raw_styles)
|| state_style_is_raw;
if emit_raw_line {
Some(prepare_raw_line(raw_line, n_parents, config))
} else {
None
}
}
// Return the new state corresponding to `new_line`, given the previous state. A return value of
// None means that `new_line` is not recognized as a hunk line.
fn new_line_state(
new_line: &str,
new_raw_line: &str,
prev_state: &State,
config: &Config,
) -> Option<State> {
use DiffType::*;
use MergeParents::*;
use State::*;
if is_word_diff() {
return Some(HunkZero(
Unified,
maybe_raw_line(new_raw_line, config.zero_style.is_raw, 0, &[], config),
));
}
// 1. Given the previous line state, compute the new line diff type. These are basically the
// same, except that a string prefix is converted into an integer number of parents (prefix
// length).
let diff_type = match prev_state {
HunkMinus(Unified, _)
| HunkZero(Unified, _)
| HunkPlus(Unified, _)
| HunkHeader(Unified, _, _, _) => Unified,
HunkHeader(Combined(Number(n), InMergeConflict::No), _, _, _) => {
Combined(Number(*n), InMergeConflict::No)
}
// The prefixes are specific to the previous line, but the number of merge parents remains
// equal to the prefix length.
HunkHeader(Combined(Prefix(prefix), InMergeConflict::No), _, _, _) => {
Combined(Number(prefix.len()), InMergeConflict::No)
}
HunkMinus(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkZero(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkPlus(Combined(Prefix(prefix), in_merge_conflict), _) => {
Combined(Number(prefix.len()), in_merge_conflict.clone())
}
HunkMinus(Combined(Number(n), in_merge_conflict), _)
| HunkZero(Combined(Number(n), in_merge_conflict), _)
| HunkPlus(Combined(Number(n), in_merge_conflict), _) => {
Combined(Number(*n), in_merge_conflict.clone())
}
_ => delta_unreachable(&format!(
"Unexpected state in new_line_state: {prev_state:?}",
)),
};
// 2. Given the new diff state, and the new line, compute the new prefix.
let (prefix_char, prefix, in_merge_conflict) = match diff_type.clone() {
Unified => (new_line.chars().next(), None, None),
Combined(Number(n_parents), in_merge_conflict) => {
let prefix = &new_line[..min(n_parents, new_line.len())];
let prefix_char = match prefix.chars().find(|c| c == &'-' || c == &'+') {
Some(c) => Some(c),
None => match prefix.chars().find(|c| c != &' ') {
None => Some(' '),
Some(_) => None,
},
};
(
prefix_char,
Some(prefix.to_string()),
Some(in_merge_conflict),
)
}
_ => delta_unreachable(""),
};
let maybe_minus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.minus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_MINUS_STYLE, config.git_minus_style],
config,
)
};
let maybe_zero_raw_line = || {
maybe_raw_line(
new_raw_line,
config.zero_style.is_raw,
diff_type.n_parents(),
&[],
config,
)
};
let maybe_plus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.plus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_PLUS_STYLE, config.git_plus_style],
config,
)
};
// 3. Given the new prefix, compute the full new line state...except without its raw_line, which
// is added later. TODO: that is not a sensible design.
match (prefix_char, prefix, in_merge_conflict) {
(Some('-'), None, None) => Some(HunkMinus(Unified, maybe_minus_raw_line())),
(Some(' '), None, None) => Some(HunkZero(Unified, maybe_zero_raw_line())),
(Some('+'), None, None) => Some(HunkPlus(Unified, maybe_plus_raw_line())),
(Some('-'), Some(prefix), Some(in_merge_conflict)) => Some(HunkMinus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_minus_raw_line(),
)),
(Some(' '), Some(prefix), Some(in_merge_conflict)) => Some(HunkZero(
Combined(Prefix(prefix), in_merge_conflict),
maybe_zero_raw_line(),
)),
(Some('+'), Some(prefix), Some(in_merge_conflict)) => Some(HunkPlus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_plus_raw_line(),
)),
_ => None,
}
}
#[cfg(test)]
mod tests {
use crate::tests::integration_test_utils::DeltaTest;
mod word_diff {
use super::*;
#[test]
fn test_word_diff() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --word-diff")
.explain_ansi()
.with_input(GIT_DIFF_WORD_DIFF)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue) | {
use DiffType::*;
use State::*;
// A true hunk line should start with one of: '+', '-', ' '. However, handle_hunk_line
// handles all lines until the state transitions away from the hunk states.
if !self.test_hunk_line() {
return Ok(false);
}
// Don't let the line buffers become arbitrarily large -- if we
// were to allow that, then for a large deleted/added file we
// would process the entire file before painting anything.
if self.painter.minus_lines.len() > self.config.line_buffer_size
|| self.painter.plus_lines.len() > self.config.line_buffer_size
{
self.painter.paint_buffered_minus_and_plus_lines();
}
if let State::HunkHeader(_, parsed_hunk_header, line, raw_line) = &self.state.clone() {
self.emit_hunk_header_line(parsed_hunk_header, line, raw_line)?;
} | identifier_body | |
hunk.rs | (&self) -> bool {
matches!(
self.state,
State::HunkHeader(_, _, _, _)
| State::HunkZero(_, _)
| State::HunkMinus(_, _)
| State::HunkPlus(_, _)
)
}
/// Handle a hunk line, i.e. a minus line, a plus line, or an unchanged line.
// In the case of a minus or plus line, we store the line in a
// buffer. When we exit the changed region we process the collected
// minus and plus lines jointly, in order to paint detailed
// highlighting according to inferred edit operations. In the case of
// an unchanged line, we paint it immediately.
pub fn handle_hunk_line(&mut self) -> std::io::Result<bool> {
use DiffType::*;
use State::*;
// A true hunk line should start with one of: '+', '-', ' '. However, handle_hunk_line
// handles all lines until the state transitions away from the hunk states.
if !self.test_hunk_line() {
return Ok(false);
}
// Don't let the line buffers become arbitrarily large -- if we
// were to allow that, then for a large deleted/added file we
// would process the entire file before painting anything.
if self.painter.minus_lines.len() > self.config.line_buffer_size
|| self.painter.plus_lines.len() > self.config.line_buffer_size
{
self.painter.paint_buffered_minus_and_plus_lines();
}
if let State::HunkHeader(_, parsed_hunk_header, line, raw_line) = &self.state.clone() {
self.emit_hunk_header_line(parsed_hunk_header, line, raw_line)?;
}
self.state = match new_line_state(&self.line, &self.raw_line, &self.state, self.config) {
Some(HunkMinus(diff_type, raw_line)) => {
if let HunkPlus(_, _) = self.state {
// We have just entered a new subhunk; process the previous one
// and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
}
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkMinus(diff_type, raw_line);
self.painter.minus_lines.push((line, state.clone()));
state
}
Some(HunkPlus(diff_type, raw_line)) => {
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkPlus(diff_type, raw_line);
self.painter.plus_lines.push((line, state.clone()));
state
}
Some(HunkZero(diff_type, raw_line)) => {
// We are in a zero (unchanged) line, therefore we have just exited a subhunk (a
// sequence of consecutive minus (removed) and/or plus (added) lines). Process that
// subhunk and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
let n_parents = if is_word_diff() {
0
} else {
diff_type.n_parents()
};
let line = prepare(&self.line, n_parents, self.config);
let state = State::HunkZero(diff_type, raw_line);
self.painter.paint_zero_line(&line, state.clone());
state
}
_ => {
// The first character here could be e.g. '\' from '\ No newline at end of file'. This
// is not a hunk line, but the parser does not have a more accurate state corresponding
// to this.
self.painter.paint_buffered_minus_and_plus_lines();
self.painter
.output_buffer
.push_str(&tabs::expand(&self.raw_line, &self.config.tab_cfg));
self.painter.output_buffer.push('\n');
State::HunkZero(Unified, None)
}
};
self.painter.emit()?;
Ok(true)
}
}
// Return Some(prepared_raw_line) if delta should emit this line raw.
fn maybe_raw_line(
raw_line: &str,
state_style_is_raw: bool,
n_parents: usize,
non_raw_styles: &[style::Style],
config: &Config,
) -> Option<String> {
let emit_raw_line = is_word_diff()
|| config.inspect_raw_lines == cli::InspectRawLines::True
&& style::line_has_style_other_than(raw_line, non_raw_styles)
|| state_style_is_raw;
if emit_raw_line {
Some(prepare_raw_line(raw_line, n_parents, config))
} else {
None
}
}
// Return the new state corresponding to `new_line`, given the previous state. A return value of
// None means that `new_line` is not recognized as a hunk line.
fn new_line_state(
new_line: &str,
new_raw_line: &str,
prev_state: &State,
config: &Config,
) -> Option<State> {
use DiffType::*;
use MergeParents::*;
use State::*;
if is_word_diff() {
return Some(HunkZero(
Unified,
maybe_raw_line(new_raw_line, config.zero_style.is_raw, 0, &[], config),
));
}
// 1. Given the previous line state, compute the new line diff type. These are basically the
// same, except that a string prefix is converted into an integer number of parents (prefix
// length).
let diff_type = match prev_state {
HunkMinus(Unified, _)
| HunkZero(Unified, _)
| HunkPlus(Unified, _)
| HunkHeader(Unified, _, _, _) => Unified,
HunkHeader(Combined(Number(n), InMergeConflict::No), _, _, _) => {
Combined(Number(*n), InMergeConflict::No)
}
// The prefixes are specific to the previous line, but the number of merge parents remains
// equal to the prefix length.
HunkHeader(Combined(Prefix(prefix), InMergeConflict::No), _, _, _) => {
Combined(Number(prefix.len()), InMergeConflict::No)
}
HunkMinus(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkZero(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkPlus(Combined(Prefix(prefix), in_merge_conflict), _) => {
Combined(Number(prefix.len()), in_merge_conflict.clone())
}
HunkMinus(Combined(Number(n), in_merge_conflict), _)
| HunkZero(Combined(Number(n), in_merge_conflict), _)
| HunkPlus(Combined(Number(n), in_merge_conflict), _) => {
Combined(Number(*n), in_merge_conflict.clone())
}
_ => delta_unreachable(&format!(
"Unexpected state in new_line_state: {prev_state:?}",
)),
};
// 2. Given the new diff state, and the new line, compute the new prefix.
let (prefix_char, prefix, in_merge_conflict) = match diff_type.clone() {
Unified => (new_line.chars().next(), None, None),
Combined(Number(n_parents), in_merge_conflict) => {
let prefix = &new_line[..min(n_parents, new_line.len())];
let prefix_char = match prefix.chars().find(|c| c == &'-' || c == &'+') {
Some(c) => Some(c),
None => match prefix.chars().find(|c| c != &' ') {
None => Some(' '),
Some(_) => None,
},
};
(
prefix_char,
Some(prefix.to_string()),
Some(in_merge_conflict),
)
}
_ => delta_unreachable(""),
};
let maybe_minus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.minus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_MINUS_STYLE, config.git_minus_style],
config,
)
};
let maybe_zero_raw_line = || {
maybe_raw_line(
new_raw_line,
config.zero_style.is_raw,
diff_type.n_parents(),
&[],
config,
)
};
let maybe_plus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.plus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_PLUS_STYLE, config.git_plus_style],
config,
)
};
// 3. Given the new prefix, compute the full new line state...except without its raw_line, which
// is added later. TODO: that is not a sensible design.
match (prefix_char, prefix, in_merge_conflict) {
(Some('-'), None, None) => Some(HunkMinus(Unified, maybe_minus_raw_line())),
(Some(' '), None, None) => Some(HunkZero(Unified, maybe_zero_raw_line())),
(Some('+'), None, None) => Some(HunkPlus(Unified, maybe_plus_raw_line())),
(Some('-'), Some(prefix), Some(in_merge_conflict)) => Some(HunkMinus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_minus_raw_line(),
)),
(Some(' '), Some(prefix), Some(in_merge_conflict)) => Some(HunkZero(
Combined(Prefix(prefix), in_merge_conflict),
maybe_zero_raw_line | test_hunk_line | identifier_name | |
hunk.rs | Type::*;
use State::*;
// A true hunk line should start with one of: '+', '-', ' '. However, handle_hunk_line
// handles all lines until the state transitions away from the hunk states.
if !self.test_hunk_line() {
return Ok(false);
}
// Don't let the line buffers become arbitrarily large -- if we
// were to allow that, then for a large deleted/added file we
// would process the entire file before painting anything.
if self.painter.minus_lines.len() > self.config.line_buffer_size | if let State::HunkHeader(_, parsed_hunk_header, line, raw_line) = &self.state.clone() {
self.emit_hunk_header_line(parsed_hunk_header, line, raw_line)?;
}
self.state = match new_line_state(&self.line, &self.raw_line, &self.state, self.config) {
Some(HunkMinus(diff_type, raw_line)) => {
if let HunkPlus(_, _) = self.state {
// We have just entered a new subhunk; process the previous one
// and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
}
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkMinus(diff_type, raw_line);
self.painter.minus_lines.push((line, state.clone()));
state
}
Some(HunkPlus(diff_type, raw_line)) => {
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkPlus(diff_type, raw_line);
self.painter.plus_lines.push((line, state.clone()));
state
}
Some(HunkZero(diff_type, raw_line)) => {
// We are in a zero (unchanged) line, therefore we have just exited a subhunk (a
// sequence of consecutive minus (removed) and/or plus (added) lines). Process that
// subhunk and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
let n_parents = if is_word_diff() {
0
} else {
diff_type.n_parents()
};
let line = prepare(&self.line, n_parents, self.config);
let state = State::HunkZero(diff_type, raw_line);
self.painter.paint_zero_line(&line, state.clone());
state
}
_ => {
// The first character here could be e.g. '\' from '\ No newline at end of file'. This
// is not a hunk line, but the parser does not have a more accurate state corresponding
// to this.
self.painter.paint_buffered_minus_and_plus_lines();
self.painter
.output_buffer
.push_str(&tabs::expand(&self.raw_line, &self.config.tab_cfg));
self.painter.output_buffer.push('\n');
State::HunkZero(Unified, None)
}
};
self.painter.emit()?;
Ok(true)
}
}
// Return Some(prepared_raw_line) if delta should emit this line raw.
fn maybe_raw_line(
raw_line: &str,
state_style_is_raw: bool,
n_parents: usize,
non_raw_styles: &[style::Style],
config: &Config,
) -> Option<String> {
let emit_raw_line = is_word_diff()
|| config.inspect_raw_lines == cli::InspectRawLines::True
&& style::line_has_style_other_than(raw_line, non_raw_styles)
|| state_style_is_raw;
if emit_raw_line {
Some(prepare_raw_line(raw_line, n_parents, config))
} else {
None
}
}
// Return the new state corresponding to `new_line`, given the previous state. A return value of
// None means that `new_line` is not recognized as a hunk line.
fn new_line_state(
new_line: &str,
new_raw_line: &str,
prev_state: &State,
config: &Config,
) -> Option<State> {
use DiffType::*;
use MergeParents::*;
use State::*;
if is_word_diff() {
return Some(HunkZero(
Unified,
maybe_raw_line(new_raw_line, config.zero_style.is_raw, 0, &[], config),
));
}
// 1. Given the previous line state, compute the new line diff type. These are basically the
// same, except that a string prefix is converted into an integer number of parents (prefix
// length).
let diff_type = match prev_state {
HunkMinus(Unified, _)
| HunkZero(Unified, _)
| HunkPlus(Unified, _)
| HunkHeader(Unified, _, _, _) => Unified,
HunkHeader(Combined(Number(n), InMergeConflict::No), _, _, _) => {
Combined(Number(*n), InMergeConflict::No)
}
// The prefixes are specific to the previous line, but the number of merge parents remains
// equal to the prefix length.
HunkHeader(Combined(Prefix(prefix), InMergeConflict::No), _, _, _) => {
Combined(Number(prefix.len()), InMergeConflict::No)
}
HunkMinus(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkZero(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkPlus(Combined(Prefix(prefix), in_merge_conflict), _) => {
Combined(Number(prefix.len()), in_merge_conflict.clone())
}
HunkMinus(Combined(Number(n), in_merge_conflict), _)
| HunkZero(Combined(Number(n), in_merge_conflict), _)
| HunkPlus(Combined(Number(n), in_merge_conflict), _) => {
Combined(Number(*n), in_merge_conflict.clone())
}
_ => delta_unreachable(&format!(
"Unexpected state in new_line_state: {prev_state:?}",
)),
};
// 2. Given the new diff state, and the new line, compute the new prefix.
let (prefix_char, prefix, in_merge_conflict) = match diff_type.clone() {
Unified => (new_line.chars().next(), None, None),
Combined(Number(n_parents), in_merge_conflict) => {
let prefix = &new_line[..min(n_parents, new_line.len())];
let prefix_char = match prefix.chars().find(|c| c == &'-' || c == &'+') {
Some(c) => Some(c),
None => match prefix.chars().find(|c| c != &' ') {
None => Some(' '),
Some(_) => None,
},
};
(
prefix_char,
Some(prefix.to_string()),
Some(in_merge_conflict),
)
}
_ => delta_unreachable(""),
};
let maybe_minus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.minus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_MINUS_STYLE, config.git_minus_style],
config,
)
};
let maybe_zero_raw_line = || {
maybe_raw_line(
new_raw_line,
config.zero_style.is_raw,
diff_type.n_parents(),
&[],
config,
)
};
let maybe_plus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.plus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_PLUS_STYLE, config.git_plus_style],
config,
)
};
// 3. Given the new prefix, compute the full new line state...except without its raw_line, which
// is added later. TODO: that is not a sensible design.
match (prefix_char, prefix, in_merge_conflict) {
(Some('-'), None, None) => Some(HunkMinus(Unified, maybe_minus_raw_line())),
(Some(' '), None, None) => Some(HunkZero(Unified, maybe_zero_raw_line())),
(Some('+'), None, None) => Some(HunkPlus(Unified, maybe_plus_raw_line())),
(Some('-'), Some(prefix), Some(in_merge_conflict)) => Some(HunkMinus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_minus_raw_line(),
)),
(Some(' '), Some(prefix), Some(in_merge_conflict)) => Some(HunkZero(
Combined(Prefix(prefix), in_merge_conflict),
maybe_zero_raw_line(),
)),
(Some('+'), Some(prefix), Some(in_merge_conflict)) => Some(HunkPlus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_plus_raw_line(),
)),
_ => None,
}
}
#[cfg(test)]
mod tests {
use crate::tests::integration_test_utils::DeltaTest;
mod word_diff {
use super::*;
#[test]
fn test_word_diff() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --word-diff")
.explain_ansi()
.with_input(GIT_DIFF_WORD_DIFF)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal)
| || self.painter.plus_lines.len() > self.config.line_buffer_size
{
self.painter.paint_buffered_minus_and_plus_lines();
} | random_line_split |
hunk.rs | ::*;
use State::*;
// A true hunk line should start with one of: '+', '-', ' '. However, handle_hunk_line
// handles all lines until the state transitions away from the hunk states.
if !self.test_hunk_line() {
return Ok(false);
}
// Don't let the line buffers become arbitrarily large -- if we
// were to allow that, then for a large deleted/added file we
// would process the entire file before painting anything.
if self.painter.minus_lines.len() > self.config.line_buffer_size
|| self.painter.plus_lines.len() > self.config.line_buffer_size
{
self.painter.paint_buffered_minus_and_plus_lines();
}
if let State::HunkHeader(_, parsed_hunk_header, line, raw_line) = &self.state.clone() {
self.emit_hunk_header_line(parsed_hunk_header, line, raw_line)?;
}
self.state = match new_line_state(&self.line, &self.raw_line, &self.state, self.config) {
Some(HunkMinus(diff_type, raw_line)) => {
if let HunkPlus(_, _) = self.state {
// We have just entered a new subhunk; process the previous one
// and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
}
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkMinus(diff_type, raw_line);
self.painter.minus_lines.push((line, state.clone()));
state
}
Some(HunkPlus(diff_type, raw_line)) => {
let n_parents = diff_type.n_parents();
let line = prepare(&self.line, n_parents, self.config);
let state = HunkPlus(diff_type, raw_line);
self.painter.plus_lines.push((line, state.clone()));
state
}
Some(HunkZero(diff_type, raw_line)) => {
// We are in a zero (unchanged) line, therefore we have just exited a subhunk (a
// sequence of consecutive minus (removed) and/or plus (added) lines). Process that
// subhunk and flush the line buffers.
self.painter.paint_buffered_minus_and_plus_lines();
let n_parents = if is_word_diff() {
0
} else {
diff_type.n_parents()
};
let line = prepare(&self.line, n_parents, self.config);
let state = State::HunkZero(diff_type, raw_line);
self.painter.paint_zero_line(&line, state.clone());
state
}
_ => {
// The first character here could be e.g. '\' from '\ No newline at end of file'. This
// is not a hunk line, but the parser does not have a more accurate state corresponding
// to this.
self.painter.paint_buffered_minus_and_plus_lines();
self.painter
.output_buffer
.push_str(&tabs::expand(&self.raw_line, &self.config.tab_cfg));
self.painter.output_buffer.push('\n');
State::HunkZero(Unified, None)
}
};
self.painter.emit()?;
Ok(true)
}
}
// Return Some(prepared_raw_line) if delta should emit this line raw.
fn maybe_raw_line(
raw_line: &str,
state_style_is_raw: bool,
n_parents: usize,
non_raw_styles: &[style::Style],
config: &Config,
) -> Option<String> {
let emit_raw_line = is_word_diff()
|| config.inspect_raw_lines == cli::InspectRawLines::True
&& style::line_has_style_other_than(raw_line, non_raw_styles)
|| state_style_is_raw;
if emit_raw_line {
Some(prepare_raw_line(raw_line, n_parents, config))
} else {
None
}
}
// Return the new state corresponding to `new_line`, given the previous state. A return value of
// None means that `new_line` is not recognized as a hunk line.
fn new_line_state(
new_line: &str,
new_raw_line: &str,
prev_state: &State,
config: &Config,
) -> Option<State> {
use DiffType::*;
use MergeParents::*;
use State::*;
if is_word_diff() |
// 1. Given the previous line state, compute the new line diff type. These are basically the
// same, except that a string prefix is converted into an integer number of parents (prefix
// length).
let diff_type = match prev_state {
HunkMinus(Unified, _)
| HunkZero(Unified, _)
| HunkPlus(Unified, _)
| HunkHeader(Unified, _, _, _) => Unified,
HunkHeader(Combined(Number(n), InMergeConflict::No), _, _, _) => {
Combined(Number(*n), InMergeConflict::No)
}
// The prefixes are specific to the previous line, but the number of merge parents remains
// equal to the prefix length.
HunkHeader(Combined(Prefix(prefix), InMergeConflict::No), _, _, _) => {
Combined(Number(prefix.len()), InMergeConflict::No)
}
HunkMinus(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkZero(Combined(Prefix(prefix), in_merge_conflict), _)
| HunkPlus(Combined(Prefix(prefix), in_merge_conflict), _) => {
Combined(Number(prefix.len()), in_merge_conflict.clone())
}
HunkMinus(Combined(Number(n), in_merge_conflict), _)
| HunkZero(Combined(Number(n), in_merge_conflict), _)
| HunkPlus(Combined(Number(n), in_merge_conflict), _) => {
Combined(Number(*n), in_merge_conflict.clone())
}
_ => delta_unreachable(&format!(
"Unexpected state in new_line_state: {prev_state:?}",
)),
};
// 2. Given the new diff state, and the new line, compute the new prefix.
let (prefix_char, prefix, in_merge_conflict) = match diff_type.clone() {
Unified => (new_line.chars().next(), None, None),
Combined(Number(n_parents), in_merge_conflict) => {
let prefix = &new_line[..min(n_parents, new_line.len())];
let prefix_char = match prefix.chars().find(|c| c == &'-' || c == &'+') {
Some(c) => Some(c),
None => match prefix.chars().find(|c| c != &' ') {
None => Some(' '),
Some(_) => None,
},
};
(
prefix_char,
Some(prefix.to_string()),
Some(in_merge_conflict),
)
}
_ => delta_unreachable(""),
};
let maybe_minus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.minus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_MINUS_STYLE, config.git_minus_style],
config,
)
};
let maybe_zero_raw_line = || {
maybe_raw_line(
new_raw_line,
config.zero_style.is_raw,
diff_type.n_parents(),
&[],
config,
)
};
let maybe_plus_raw_line = || {
maybe_raw_line(
new_raw_line,
config.plus_style.is_raw,
diff_type.n_parents(),
&[*style::GIT_DEFAULT_PLUS_STYLE, config.git_plus_style],
config,
)
};
// 3. Given the new prefix, compute the full new line state...except without its raw_line, which
// is added later. TODO: that is not a sensible design.
match (prefix_char, prefix, in_merge_conflict) {
(Some('-'), None, None) => Some(HunkMinus(Unified, maybe_minus_raw_line())),
(Some(' '), None, None) => Some(HunkZero(Unified, maybe_zero_raw_line())),
(Some('+'), None, None) => Some(HunkPlus(Unified, maybe_plus_raw_line())),
(Some('-'), Some(prefix), Some(in_merge_conflict)) => Some(HunkMinus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_minus_raw_line(),
)),
(Some(' '), Some(prefix), Some(in_merge_conflict)) => Some(HunkZero(
Combined(Prefix(prefix), in_merge_conflict),
maybe_zero_raw_line(),
)),
(Some('+'), Some(prefix), Some(in_merge_conflict)) => Some(HunkPlus(
Combined(Prefix(prefix), in_merge_conflict),
maybe_plus_raw_line(),
)),
_ => None,
}
}
#[cfg(test)]
mod tests {
use crate::tests::integration_test_utils::DeltaTest;
mod word_diff {
use super::*;
#[test]
fn test_word_diff() {
DeltaTest::with_args(&[])
.with_calling_process("git diff --word-diff")
.explain_ansi()
.with_input(GIT_DIFF_WORD_DIFF)
.expect_after_skip(
11,
"
#indent_mark
(blue)───(blue)┐(normal)
(blue)1(normal): (blue)│(normal)
(blue)───(blue)┘(normal | {
return Some(HunkZero(
Unified,
maybe_raw_line(new_raw_line, config.zero_style.is_raw, 0, &[], config),
));
} | conditional_block |
ps_couture.py | [i+1][0][0],p[i+1][0][1]=b2[2]
p.insert(i+1,[[b1[2][0],b1[2][1]],[b1[3][0],b1[3][1]],[b2[1][0],b2[1][1]]])
else:
d=(box+chord)/2
lengths.append(d)
i+=1
new=[p[i][1] for i in range(0,len(p)-1) if lengths[i]>zero]
new.append(p[-1][1])
lengths=[l for l in lengths if l>zero]
return(new,lengths)
def addDot(self,idPoint,labelPoint,diametre,typepoint, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M dia,0 A dia,dia 0 0 1 0,dia dia,dia 0 0 1 -dia,0 dia,dia 0 0 1 0,-dia dia,dia 0 0 1 dia,0 Z'
ligneH='M 0,0 H dia'
ligneV='M 0,0 V dia'
rayon=ligneH.replace('dia',str(self.unittouu(diametre))) #valeur par defaut.
if typepoint=="LigneV":
rayon=ligneV.replace('dia',str(self.unittouu(diametre)))
if typepoint=="Cercle":
rayon=cercle.replace('dia',str(self.unittouu(diametre)/2))
dot.set('d',rayon)
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
def addMark(self,x,y,idPoint,labelPoint,diametre, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M 0,0 V dia'
rayon=cercle.replace('dia',str(self.unittouu(diametre)))
dot.set('d',rayon)
dot.set('x', str(x))
dot.set('y', str(y))
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
Style['stroke']= Couleur
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
return dot
def addText(self,x,y,text):
new = inkex.etree.Element(inkex.addNS('text','svg'))
new.set('style', "font-style:normal;font-weight:normal;font-size:10px;line-height:100%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")#simplestyle.formatStyle(s))
new.set('x', str(x))
new.set('y', str(y))
new.text = str(text)
self.current_layer.append(new)
return new
class Pointsellier(pathmodifier.Diffeo):
def __init__(self):
pathmodifier.Diffeo.__init__(self)
self.OptionParser.add_option("--title")
self.OptionParser.add_option("--diamlong",
action="store", type="string",
dest="diamlong", default="1.0mm")
self.OptionParser.add_option("--typePoint",
action="store", type="string",
dest="typePoint", default="LigneH")
self.OptionParser.add_option("--textInfos",
action="store", type="inkbool",
dest="textInfos", default=False)
self.OptionParser.add_option("-t", "--toffset",
action="store", type="string",
dest="toffset", default="0.1mm")
self.OptionParser.add_option("-p", "--space",
action="store", type="string",
dest="space", default="3.0mm")
self.OptionParser.add_option("--autoOffset",
action="store", type="inkbool",
dest="autoOffset", default=False)
self.OptionParser.add_option("-r","--nrepeat",
action="store", type="int",
dest="nrepeat", default=1,help="nombre d'objets")
self.OptionParser.add_option("--autoRepeat",
action="store", type="inkbool",
dest="autoRepeat", default=False)
self.OptionParser.add_option("--autoMask",
action="store", type="inkbool",
dest="autoMask", default=False)
self.OptionParser.add_option("--autoMark",
action="store", type="inkbool",
dest="autoMark", default=False)
self.OptionParser.add_option("--typeMark",
action="store", type="string",
dest="typeMark", default="markX")
self.OptionParser.add_option( "--nrepeat2",
action="store", type="int",
dest="nrepeat2", default=1,help="nombre d'objets")
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab",
help="The selected UI-tab when OK was pressed")
def lengthtotime(self,l):
'''
Recieves an arc length l, and returns the index of the segment in self.skelcomp
containing the coresponding point, to gether with the position of the point on this segment.
If the deformer is closed, do computations modulo the toal length.
'''
if self.skelcompIsClosed:
l=l % sum(self.lengths)
if l<=0:
return 0,l/self.lengths[0]
i=0
while (i<len(self.lengths)) and (self.lengths[i]<=l):
l-=self.lengths[i]
i+=1
t=l/self.lengths[min(i,len(self.lengths)-1)]
return i, t
def applyDiffeo(self,bpt,vects=()):
'''
The kernel of this stuff:
bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.
'''
s=bpt[0]-self.skelcomp[0][0]
i,t=self.lengthtotime(s)
if i==len(self.skelcomp)-1:#je regarde si je suis au debut du skelete car sinon j'ai pas de vecteur
x,y=bezmisc.tpoint(self.skelcomp[i-1],self.skelcomp[i],1+t)
dx=(self.skelcomp[i][0]-self.skelcomp[i-1][0])/self.lengths[-1]
dy=(self.skelcomp[i][1]-self.skelcomp[i-1][1])/self.lengths[-1]
else:
x,y=bezmisc.tpoint(self.skelcomp[i],self.skelcomp[i+1],t)
dx=(self.skelcomp[i+1][0]-self.skelcomp[i][0])/self.lengths[i]
dy=(self.skelcomp[i+1][1]-self.skelcomp[i][1])/self.lengths[i]
vx=0
vy=bpt[1]-self.skelcomp[0][1]
bpt[0]=x+vx*dx-vy*dy
bpt[1]=y+vx*dy+vy*dx
for v in vects:
vx=v[0]-self.skelcomp[0][0]-s
vy=v[1]-self.skelcomp[0][1]
v[0]=x+vx*dx-vy*dy
v[1]=y+vx*dy+vy*dx
def effect(self): | if len(self.options.ids)<1 and len(self.options.ids)>1:
inkex.errormsg("This extension requires only one selected paths.")
return
#liste des chemins, preparation
idList=self.options.ids
idList=pathmodifier.zSort(self.document.getroot(),idList)
id = idList[-1]
idpoint=id+'-'+ str(random.randint(1, 99)) #id du paterns creer a partir du chemin selectionner
idpointMark=id+'-'+ str(random.randint(1, 99))
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
style = simplestyle.parseStyle(node.get('style')) #je recupere l'ancien style
style['stroke']='#00ff00' #je modifie la valeur
if self.options.autoMask==True:
style['display']='none'
node.set('style', simplestyle.formatStyle(style) ) #j'applique la modifi
#gestion du skelete (le | random_line_split | |
ps_couture.py | +1][0][0],p[i+1][0][1]=b2[2]
p.insert(i+1,[[b1[2][0],b1[2][1]],[b1[3][0],b1[3][1]],[b2[1][0],b2[1][1]]])
else:
d=(box+chord)/2
lengths.append(d)
i+=1
new=[p[i][1] for i in range(0,len(p)-1) if lengths[i]>zero]
new.append(p[-1][1])
lengths=[l for l in lengths if l>zero]
return(new,lengths)
def addDot(self,idPoint,labelPoint,diametre,typepoint, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M dia,0 A dia,dia 0 0 1 0,dia dia,dia 0 0 1 -dia,0 dia,dia 0 0 1 0,-dia dia,dia 0 0 1 dia,0 Z'
ligneH='M 0,0 H dia'
ligneV='M 0,0 V dia'
rayon=ligneH.replace('dia',str(self.unittouu(diametre))) #valeur par defaut.
if typepoint=="LigneV":
rayon=ligneV.replace('dia',str(self.unittouu(diametre)))
if typepoint=="Cercle":
rayon=cercle.replace('dia',str(self.unittouu(diametre)/2))
dot.set('d',rayon)
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
def addMark(self,x,y,idPoint,labelPoint,diametre, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M 0,0 V dia'
rayon=cercle.replace('dia',str(self.unittouu(diametre)))
dot.set('d',rayon)
dot.set('x', str(x))
dot.set('y', str(y))
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
Style['stroke']= Couleur
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
return dot
def addText(self,x,y,text):
new = inkex.etree.Element(inkex.addNS('text','svg'))
new.set('style', "font-style:normal;font-weight:normal;font-size:10px;line-height:100%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")#simplestyle.formatStyle(s))
new.set('x', str(x))
new.set('y', str(y))
new.text = str(text)
self.current_layer.append(new)
return new
class Pointsellier(pathmodifier.Diffeo):
def __init__(self):
pathmodifier.Diffeo.__init__(self)
self.OptionParser.add_option("--title")
self.OptionParser.add_option("--diamlong",
action="store", type="string",
dest="diamlong", default="1.0mm")
self.OptionParser.add_option("--typePoint",
action="store", type="string",
dest="typePoint", default="LigneH")
self.OptionParser.add_option("--textInfos",
action="store", type="inkbool",
dest="textInfos", default=False)
self.OptionParser.add_option("-t", "--toffset",
action="store", type="string",
dest="toffset", default="0.1mm")
self.OptionParser.add_option("-p", "--space",
action="store", type="string",
dest="space", default="3.0mm")
self.OptionParser.add_option("--autoOffset",
action="store", type="inkbool",
dest="autoOffset", default=False)
self.OptionParser.add_option("-r","--nrepeat",
action="store", type="int",
dest="nrepeat", default=1,help="nombre d'objets")
self.OptionParser.add_option("--autoRepeat",
action="store", type="inkbool",
dest="autoRepeat", default=False)
self.OptionParser.add_option("--autoMask",
action="store", type="inkbool",
dest="autoMask", default=False)
self.OptionParser.add_option("--autoMark",
action="store", type="inkbool",
dest="autoMark", default=False)
self.OptionParser.add_option("--typeMark",
action="store", type="string",
dest="typeMark", default="markX")
self.OptionParser.add_option( "--nrepeat2",
action="store", type="int",
dest="nrepeat2", default=1,help="nombre d'objets")
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab",
help="The selected UI-tab when OK was pressed")
def | (self,l):
'''
Recieves an arc length l, and returns the index of the segment in self.skelcomp
containing the coresponding point, to gether with the position of the point on this segment.
If the deformer is closed, do computations modulo the toal length.
'''
if self.skelcompIsClosed:
l=l % sum(self.lengths)
if l<=0:
return 0,l/self.lengths[0]
i=0
while (i<len(self.lengths)) and (self.lengths[i]<=l):
l-=self.lengths[i]
i+=1
t=l/self.lengths[min(i,len(self.lengths)-1)]
return i, t
def applyDiffeo(self,bpt,vects=()):
'''
The kernel of this stuff:
bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.
'''
s=bpt[0]-self.skelcomp[0][0]
i,t=self.lengthtotime(s)
if i==len(self.skelcomp)-1:#je regarde si je suis au debut du skelete car sinon j'ai pas de vecteur
x,y=bezmisc.tpoint(self.skelcomp[i-1],self.skelcomp[i],1+t)
dx=(self.skelcomp[i][0]-self.skelcomp[i-1][0])/self.lengths[-1]
dy=(self.skelcomp[i][1]-self.skelcomp[i-1][1])/self.lengths[-1]
else:
x,y=bezmisc.tpoint(self.skelcomp[i],self.skelcomp[i+1],t)
dx=(self.skelcomp[i+1][0]-self.skelcomp[i][0])/self.lengths[i]
dy=(self.skelcomp[i+1][1]-self.skelcomp[i][1])/self.lengths[i]
vx=0
vy=bpt[1]-self.skelcomp[0][1]
bpt[0]=x+vx*dx-vy*dy
bpt[1]=y+vx*dy+vy*dx
for v in vects:
vx=v[0]-self.skelcomp[0][0]-s
vy=v[1]-self.skelcomp[0][1]
v[0]=x+vx*dx-vy*dy
v[1]=y+vx*dy+vy*dx
def effect(self):
if len(self.options.ids)<1 and len(self.options.ids)>1:
inkex.errormsg("This extension requires only one selected paths.")
return
#liste des chemins, preparation
idList=self.options.ids
idList=pathmodifier.zSort(self.document.getroot(),idList)
id = idList[-1]
idpoint=id+'-'+ str(random.randint(1, 99)) #id du paterns creer a partir du chemin selectionner
idpointMark=id+'-'+ str(random.randint(1, 99))
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
style = simplestyle.parseStyle(node.get('style')) #je recupere l'ancien style
style['stroke']='#00ff00' #je modifie la valeur
if self.options.autoMask==True:
style['display']='none'
node.set('style', simplestyle.formatStyle(style) ) #j'applique la modifi
#gestion du skelete (le | lengthtotime | identifier_name |
ps_couture.py | self.lengths[i]
dy=(self.skelcomp[i+1][1]-self.skelcomp[i][1])/self.lengths[i]
vx=0
vy=bpt[1]-self.skelcomp[0][1]
bpt[0]=x+vx*dx-vy*dy
bpt[1]=y+vx*dy+vy*dx
for v in vects:
vx=v[0]-self.skelcomp[0][0]-s
vy=v[1]-self.skelcomp[0][1]
v[0]=x+vx*dx-vy*dy
v[1]=y+vx*dy+vy*dx
def effect(self):
if len(self.options.ids)<1 and len(self.options.ids)>1:
inkex.errormsg("This extension requires only one selected paths.")
return
#liste des chemins, preparation
idList=self.options.ids
idList=pathmodifier.zSort(self.document.getroot(),idList)
id = idList[-1]
idpoint=id+'-'+ str(random.randint(1, 99)) #id du paterns creer a partir du chemin selectionner
idpointMark=id+'-'+ str(random.randint(1, 99))
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
style = simplestyle.parseStyle(node.get('style')) #je recupere l'ancien style
style['stroke']='#00ff00' #je modifie la valeur
if self.options.autoMask==True:
style['display']='none'
node.set('style', simplestyle.formatStyle(style) ) #j'applique la modifi
#gestion du skelete (le chemin selectionner)
self.skeletons=self.selected
self.expandGroupsUnlinkClones(self.skeletons, True, False)
self.objectsToPaths(self.skeletons)
for skelnode in self.skeletons.itervalues(): #calcul de la longeur du chemin
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
self.skelcomp,self.lengths=linearize(comp)
longeur=sum(self.lengths)
distance=self.unittouu(self.options.space)
taille= self.unittouu(self.options.diamlong)
MaxCopies=max(1,int(round((longeur+distance)/distance)))
NbCopies= self.options.nrepeat #nombre de copie desirer a integrer dans les choix a modifier pour ne pas depasser les valeurs maxi
if NbCopies > MaxCopies:
NbCopies=MaxCopies #on limitte le nombre de copie au maxi possible sur le chemin
if self.options.autoRepeat: #gestion du calcul auto
NbCopies=MaxCopies
if self.options.autoOffset: #gestion du decallage automatique
tOffset=((longeur-(NbCopies-1)*distance)/2)-taille/2
else:
tOffset=self.unittouu(self.options.toffset)
#gestion du paterns
labelpoint='Point: '+ idpoint+ ' Nbr:' + str(NbCopies)+' longueur:'+str(round(self.uutounit(longeur,'mm'),2))+'mm'
addDot(self,idpoint,labelpoint,self.options.diamlong,self.options.typePoint,0)#creation du cercle de base
self.patterns={idpoint:self.getElementById(idpoint)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patterns.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patterns.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
xoffset=self.skelcomp[0][0]-bbox[0]+tOffset
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
if self.options.textInfos:
addText(self,xoffset,yoffset,labelpoint)
width=distance*NbCopies
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbCopies,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
inkex.errormsg("This extension need a path, not groups.")
if self.options.autoMark:
if self.options.typeMark=="markFraction":
Fraction= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
distance=(width)/Fraction #distance inter point
NbrMark=max(1,int(round((width+distance)/distance)))
infos= " Marquage 1/"+ str(Fraction)
couleur= '#ff0000'
else:
Repeat= self.options.nrepeat2 #en mode fraction 1= au debut et a la fin, 2= un demi, 3= 1/3 etc
NbrMark=max(1,int(round((NbCopies/Repeat))))
distance=distance*Repeat #distance inter point
infos=" Marquage tous les " + str(Repeat) + " points"
couleur= '#ffaa00'
labelMark="Mark: "+idpoint + infos
addMark(self,0,0,idpointMark,labelMark,self.options.diamlong,couleur)
self.patternsMark={idpointMark:self.getElementById(idpointMark)} #ajout du point dans le paterns de base
bbox=simpletransform.computeBBox(self.patternsMark.values())
#liste des chemins, fin de preparation
if distance < 0.01:
exit(_("The total length of the pattern is too small :\nPlease choose a larger object or set 'Space between copies' > 0"))
for id, node in self.patternsMark.iteritems():
if node.tag == inkex.addNS('path','svg') or node.tag=='path':
d = node.get('d')
p0 = cubicsuperpath.parsePath(d)
newp=[]
for skelnode in self.skeletons.itervalues():
self.curSekeleton=cubicsuperpath.parsePath(skelnode.get('d'))
for comp in self.curSekeleton:
p=copy.deepcopy(p0)
self.skelcomp,self.lengths=linearize(comp)
#!!!!>----> TODO: really test if path is closed! end point==start point is not enough!
self.skelcompIsClosed = (self.skelcomp[0]==self.skelcomp[-1])
# a tester si les point au dessus sont utilisable pour positionner les autres a upoi ressemble skelcomp ??
xoffset=self.skelcomp[0][0]-bbox[0] +tOffset+taille/2
yoffset=self.skelcomp[0][1]-(bbox[2]+bbox[3])/2
width=distance*NbrMark
if not self.skelcompIsClosed:
width-=distance
new=[]
for sub in p: #creation du nombre de patern
for i in range(0,NbrMark,1):
new.append(copy.deepcopy(sub)) #realise une copie de sub pour chaque nouveau element du patern
offset(sub,distance,0)
p=new
for sub in p:
offset(sub,xoffset,yoffset)
for sub in p: #une fois tous creer, on les mets en place
for ctlpt in sub:#pose le patern sur le chemin
self.applyDiffeo(ctlpt[1],(ctlpt[0],ctlpt[2]))
newp+=p
node.set('d', cubicsuperpath.formatPath(newp))
else:
| inkex.errormsg("This extension need a path, not groups.") | conditional_block | |
ps_couture.py | +1][0][0],p[i+1][0][1]=b2[2]
p.insert(i+1,[[b1[2][0],b1[2][1]],[b1[3][0],b1[3][1]],[b2[1][0],b2[1][1]]])
else:
d=(box+chord)/2
lengths.append(d)
i+=1
new=[p[i][1] for i in range(0,len(p)-1) if lengths[i]>zero]
new.append(p[-1][1])
lengths=[l for l in lengths if l>zero]
return(new,lengths)
def addDot(self,idPoint,labelPoint,diametre,typepoint, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M dia,0 A dia,dia 0 0 1 0,dia dia,dia 0 0 1 -dia,0 dia,dia 0 0 1 0,-dia dia,dia 0 0 1 dia,0 Z'
ligneH='M 0,0 H dia'
ligneV='M 0,0 V dia'
rayon=ligneH.replace('dia',str(self.unittouu(diametre))) #valeur par defaut.
if typepoint=="LigneV":
rayon=ligneV.replace('dia',str(self.unittouu(diametre)))
if typepoint=="Cercle":
rayon=cercle.replace('dia',str(self.unittouu(diametre)/2))
dot.set('d',rayon)
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
def addMark(self,x,y,idPoint,labelPoint,diametre, Couleur):
dot = inkex.etree.Element(inkex.addNS('path','svg'))
dot.set('id',idPoint)
cercle='M 0,0 V dia'
rayon=cercle.replace('dia',str(self.unittouu(diametre)))
dot.set('d',rayon)
dot.set('x', str(x))
dot.set('y', str(y))
Style= { 'stroke': '#000000', 'fill': 'none','stroke-opacity':'1', 'stroke-width': str(self.unittouu('1px')) }
Style['stroke']= Couleur
dot.set('style', simplestyle.formatStyle(Style))
dot.set(inkex.addNS('label','inkscape'), labelPoint)
self.current_layer.append(dot)
return dot
def addText(self,x,y,text):
new = inkex.etree.Element(inkex.addNS('text','svg'))
new.set('style', "font-style:normal;font-weight:normal;font-size:10px;line-height:100%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")#simplestyle.formatStyle(s))
new.set('x', str(x))
new.set('y', str(y))
new.text = str(text)
self.current_layer.append(new)
return new
class Pointsellier(pathmodifier.Diffeo):
| self.OptionParser.add_option("-p", "--space",
action="store", type="string",
dest="space", default="3.0mm")
self.OptionParser.add_option("--autoOffset",
action="store", type="inkbool",
dest="autoOffset", default=False)
self.OptionParser.add_option("-r","--nrepeat",
action="store", type="int",
dest="nrepeat", default=1,help="nombre d'objets")
self.OptionParser.add_option("--autoRepeat",
action="store", type="inkbool",
dest="autoRepeat", default=False)
self.OptionParser.add_option("--autoMask",
action="store", type="inkbool",
dest="autoMask", default=False)
self.OptionParser.add_option("--autoMark",
action="store", type="inkbool",
dest="autoMark", default=False)
self.OptionParser.add_option("--typeMark",
action="store", type="string",
dest="typeMark", default="markX")
self.OptionParser.add_option( "--nrepeat2",
action="store", type="int",
dest="nrepeat2", default=1,help="nombre d'objets")
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab",
help="The selected UI-tab when OK was pressed")
def lengthtotime(self,l):
'''
Recieves an arc length l, and returns the index of the segment in self.skelcomp
containing the coresponding point, to gether with the position of the point on this segment.
If the deformer is closed, do computations modulo the toal length.
'''
if self.skelcompIsClosed:
l=l % sum(self.lengths)
if l<=0:
return 0,l/self.lengths[0]
i=0
while (i<len(self.lengths)) and (self.lengths[i]<=l):
l-=self.lengths[i]
i+=1
t=l/self.lengths[min(i,len(self.lengths)-1)]
return i, t
def applyDiffeo(self,bpt,vects=()):
'''
The kernel of this stuff:
bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.
'''
s=bpt[0]-self.skelcomp[0][0]
i,t=self.lengthtotime(s)
if i==len(self.skelcomp)-1:#je regarde si je suis au debut du skelete car sinon j'ai pas de vecteur
x,y=bezmisc.tpoint(self.skelcomp[i-1],self.skelcomp[i],1+t)
dx=(self.skelcomp[i][0]-self.skelcomp[i-1][0])/self.lengths[-1]
dy=(self.skelcomp[i][1]-self.skelcomp[i-1][1])/self.lengths[-1]
else:
x,y=bezmisc.tpoint(self.skelcomp[i],self.skelcomp[i+1],t)
dx=(self.skelcomp[i+1][0]-self.skelcomp[i][0])/self.lengths[i]
dy=(self.skelcomp[i+1][1]-self.skelcomp[i][1])/self.lengths[i]
vx=0
vy=bpt[1]-self.skelcomp[0][1]
bpt[0]=x+vx*dx-vy*dy
bpt[1]=y+vx*dy+vy*dx
for v in vects:
vx=v[0]-self.skelcomp[0][0]-s
vy=v[1]-self.skelcomp[0][1]
v[0]=x+vx*dx-vy*dy
v[1]=y+vx*dy+vy*dx
def effect(self):
if len(self.options.ids)<1 and len(self.options.ids)>1:
inkex.errormsg("This extension requires only one selected paths.")
return
#liste des chemins, preparation
idList=self.options.ids
idList=pathmodifier.zSort(self.document.getroot(),idList)
id = idList[-1]
idpoint=id+'-'+ str(random.randint(1, 99)) #id du paterns creer a partir du chemin selectionner
idpointMark=id+'-'+ str(random.randint(1, 99))
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
style = simplestyle.parseStyle(node.get('style')) #je recupere l'ancien style
style['stroke']='#00ff00' #je modifie la valeur
if self.options.autoMask==True:
style['display']='none'
node.set('style', simplestyle.formatStyle(style) ) #j'applique la modifi
#gestion du skelete (le chemin | def __init__(self):
pathmodifier.Diffeo.__init__(self)
self.OptionParser.add_option("--title")
self.OptionParser.add_option("--diamlong",
action="store", type="string",
dest="diamlong", default="1.0mm")
self.OptionParser.add_option("--typePoint",
action="store", type="string",
dest="typePoint", default="LigneH")
self.OptionParser.add_option("--textInfos",
action="store", type="inkbool",
dest="textInfos", default=False)
self.OptionParser.add_option("-t", "--toffset",
action="store", type="string",
dest="toffset", default="0.1mm")
| identifier_body |
utils.py | (text, wordtoix, opt, is_cnn = True):
sent = [wordtoix[x] for x in text.split()]
return prepare_data_for_cnn([sent for i in range(opt.batch_size)], opt)
def prepare_data_for_cnn(seqs_x, opt):
maxlen=opt.maxlen
filter_h=opt.filter_shape
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
new_lengths_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
if l_x < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
else:
new_seqs_x.append(s_x[l_x-maxlen+1:])
new_lengths_x.append(maxlen-1)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
if len(lengths_x) < 1 :
return None, None
pad = filter_h -1
x = []
for rev in seqs_x:
xx = []
for i in xrange(pad):
xx.append(0)
for idx in rev:
xx.append(idx)
while len(xx) < maxlen + 2*pad:
xx.append(0)
x.append(xx)
x = np.array(x,dtype='int32')
return x
def prepare_data_for_rnn(seqs_x, opt, is_add_GO = True):
maxlen=opt.sent_len -2 #+ opt.filter_shape - 1 # 49
lengths_x = [len(s) for s in seqs_x]
# print lengths_x
if maxlen != None:
new_seqs_x = []
for l_x, s_x in zip(lengths_x, seqs_x):
if l_x < maxlen-2:
new_seqs_x.append(s_x)
else:
#new_seqs_x.append(s_x[l_x-maxlen+1:])
new_seqs_x.append(s_x[:maxlen-2]+[2])
seqs_x = new_seqs_x
lengths_x = [len(s) for s in seqs_x]
if len(lengths_x) < 1 :
return None, None
n_samples = len(seqs_x)
maxlen_x = np.max(lengths_x)
x = np.zeros(( n_samples, opt.sent_len)).astype('int32')
for idx, s_x in enumerate(seqs_x):
if is_add_GO:
x[idx, 0] = 1 # GO symbol
x[idx, 1:lengths_x[idx]+1] = s_x
else:
x[idx, :lengths_x[idx]] = s_x
return x
def restore_from_save(t_vars, sess, opt, prefix = 'd_', load_path = None):
if not load_path:
load_path = opt.load_path
if opt.load_from_pretrain:
save_keys = tensors_key_in_file(load_path)
#print(save_keys.keys())
ss = set([var.name[2:][:-2] for var in t_vars])&set([s[2:] for s in save_keys.keys()])
cc = {var.name[2:][:-2]:var for var in t_vars}
ss_right_shape = set([s for s in ss if cc[s].get_shape() == save_keys[prefix+s]]) # only restore variables with correct shape
ss_wrong_shape = ss - ss_right_shape
cc2 = {prefix+ var.name[2:][:-2]:var for var in t_vars if var.name[2:][:-2] in ss_right_shape} # name in file -> var
loader = tf.train.Saver(var_list=cc2)
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
else:
save_keys = tensors_key_in_file(load_path)
ss = [var for var in t_vars if var.name[:-2] in save_keys.keys()]
ss_right_shape = [var.name for var in ss if var.get_shape() == save_keys[var.name[:-2]]]
ss_wrong_shape = set([v.name for v in ss]) - set(ss_right_shape)
#ss = [var for var in ss if 'OptimizeLoss' not in var]
loader = tf.train.Saver(var_list= [var for var in t_vars if var.name in ss_right_shape])
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
_buckets = [(60,60)]
def read_data(source_path, target_path, opt):
"""
From tensorflow tutorial translate.py
Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not opt.max_train_data_size or counter < opt.max_train_data_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if opt.minlen <len(source_ids) < min(source_size, opt.maxlen) and opt.minlen <len(target_ids) < min(target_size, opt.maxlen):
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def read_pair_data_full(src_f, tgt_f, dic_f, train_prop = 0.9, max_num=None, rev_src=False, rev_tgt = False, is_text_src = False, is_text_tgt = False, p_f = '../data/', from_p = True):
#train, val = [], []
if from_p:
p_f = src_f[:-3] + str(max_num) + '.p'
if os.path.exists(p_f):
with open(p_f, 'rb') as pfile:
train, val, test, wordtoix, ixtoword = cPickle.load(pfile)
return train, val, test, wordtoix, ixtoword
wordtoix, ixtoword = {}, {}
print "Start reading dic file . . ."
if os.path.exists(dic_f):
print("loading Dictionary")
counter=0
with codecs.open(dic_f,"r",'utf-8') as f:
s=f.readline()
while s:
s=s.rstrip('\n').rstrip("\r")
#print("s==",s)
wordtoix[s]=counter
ixtoword[counter]=s
counter+=1
s=f.readline()
def shift_id(x):
return x
src, tgt = [], []
print "Start reading src file . . ."
with codecs.open(src_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count, max_l = 0, 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_src:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
max_l = max(max_l, len(tokens))
if not rev_src: # reverse source
src.append(tokens)
else :
src.append(tokens[::-1])
#pdb.set_trace()
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Source cnt: " + str(count) + " maxLen: " + str(max_l)
print "Start reading tgt file . . . | sent2idx | identifier_name | |
utils.py |
seqs_x = new_seqs_x
lengths_x = [len(s) for s in seqs_x]
if len(lengths_x) < 1 :
return None, None
n_samples = len(seqs_x)
maxlen_x = np.max(lengths_x)
x = np.zeros(( n_samples, opt.sent_len)).astype('int32')
for idx, s_x in enumerate(seqs_x):
if is_add_GO:
x[idx, 0] = 1 # GO symbol
x[idx, 1:lengths_x[idx]+1] = s_x
else:
x[idx, :lengths_x[idx]] = s_x
return x
def restore_from_save(t_vars, sess, opt, prefix = 'd_', load_path = None):
if not load_path:
load_path = opt.load_path
if opt.load_from_pretrain:
save_keys = tensors_key_in_file(load_path)
#print(save_keys.keys())
ss = set([var.name[2:][:-2] for var in t_vars])&set([s[2:] for s in save_keys.keys()])
cc = {var.name[2:][:-2]:var for var in t_vars}
ss_right_shape = set([s for s in ss if cc[s].get_shape() == save_keys[prefix+s]]) # only restore variables with correct shape
ss_wrong_shape = ss - ss_right_shape
cc2 = {prefix+ var.name[2:][:-2]:var for var in t_vars if var.name[2:][:-2] in ss_right_shape} # name in file -> var
loader = tf.train.Saver(var_list=cc2)
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
else:
save_keys = tensors_key_in_file(load_path)
ss = [var for var in t_vars if var.name[:-2] in save_keys.keys()]
ss_right_shape = [var.name for var in ss if var.get_shape() == save_keys[var.name[:-2]]]
ss_wrong_shape = set([v.name for v in ss]) - set(ss_right_shape)
#ss = [var for var in ss if 'OptimizeLoss' not in var]
loader = tf.train.Saver(var_list= [var for var in t_vars if var.name in ss_right_shape])
loader.restore(sess, load_path)
print("Loading variables from '%s'." % load_path)
print("Loaded variables:"+str(ss_right_shape))
print("Mis-shaped variables:"+str(ss_wrong_shape))
_buckets = [(60,60)]
def read_data(source_path, target_path, opt):
"""
From tensorflow tutorial translate.py
Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not opt.max_train_data_size or counter < opt.max_train_data_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if opt.minlen <len(source_ids) < min(source_size, opt.maxlen) and opt.minlen <len(target_ids) < min(target_size, opt.maxlen):
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def read_pair_data_full(src_f, tgt_f, dic_f, train_prop = 0.9, max_num=None, rev_src=False, rev_tgt = False, is_text_src = False, is_text_tgt = False, p_f = '../data/', from_p = True):
#train, val = [], []
if from_p:
p_f = src_f[:-3] + str(max_num) + '.p'
if os.path.exists(p_f):
with open(p_f, 'rb') as pfile:
train, val, test, wordtoix, ixtoword = cPickle.load(pfile)
return train, val, test, wordtoix, ixtoword
wordtoix, ixtoword = {}, {}
print "Start reading dic file . . ."
if os.path.exists(dic_f):
print("loading Dictionary")
counter=0
with codecs.open(dic_f,"r",'utf-8') as f:
s=f.readline()
while s:
s=s.rstrip('\n').rstrip("\r")
#print("s==",s)
wordtoix[s]=counter
ixtoword[counter]=s
counter+=1
s=f.readline()
def shift_id(x):
return x
src, tgt = [], []
print "Start reading src file . . ."
with codecs.open(src_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count, max_l = 0, 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_src:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
max_l = max(max_l, len(tokens))
if not rev_src: # reverse source
src.append(tokens)
else :
src.append(tokens[::-1])
#pdb.set_trace()
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Source cnt: " + str(count) + " maxLen: " + str(max_l)
print "Start reading tgt file . . ."
with codecs.open(tgt_f,"r",'utf-8') as f:
line = f.readline().rstrip("\n").rstrip("\r")
count = 0
#max_length_fact=0
while line and (not max_num or count<max_num):
count+=1
if is_text_tgt:
tokens=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in line.split()]
else:
tokens=[shift_id(int(x)) for x in line.split()]
if not rev_tgt: # reverse source
tgt.append(tokens)
else :
tgt.append(tokens[::-1])
line = f.readline().rstrip("\n").rstrip("\r")
if np.mod(count,100000)==0:
print count
print "Target cnt: " + str(count) + " maxLen: " + str(max_l)
assert(len(src)==len(tgt))
all_pairs = np.array(zip(*[tgt, src]))
if not train_prop:
train , val, test = all_pairs, [], []
else:
idx = np.random.choice(len(all_pairs), int(np.floor(train_prop*len(all_pairs))))
rem_idx = np.array(list(set(range(len(all_pairs)))-set(idx)))
#v_idx = np.random.choice(rem_idx, int(np.floor(0.5*len(rem_idx))))
v_idx = np.random.choice(rem_idx, len(rem_idx)-2000)
t_idx = np.array(list(set(rem_idx)-set(v_idx)))
#pdb.set_trace()
train, val, test = all_pairs[idx], all_pairs[v_idx], all_pairs[t_idx]
if from_p:
with open(p_f, 'wb') as pfile:
cPickle.dump([train, val, test, wordtoix, ixtoword], pfile)
| if l_x < maxlen-2:
new_seqs_x.append(s_x)
else:
#new_seqs_x.append(s_x[l_x-maxlen+1:])
new_seqs_x.append(s_x[:maxlen-2]+[2]) | conditional_block | |
utils.py | =[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in l.split()] + [2]
conv.append(sent)
# bp()
test.append(conv)
return test
def tensors_key_in_file(file_name):
"""Return tensors key in a checkpoint file.
Args:
file_name: Name of the checkpoint file.
"""
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
return reader.get_variable_to_shape_map()
except Exception as e: # pylint: disable=broad-except
print(str(e))
return None
def get_minibatches_idx(n, minibatch_size, shuffle=False):
idx_list = np.arange(n, dtype="int32")
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
# if (minibatch_start != n):
# # Make a minibatch out of what is left
# minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# def normalizing_L1(x, axis):
# norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
# normalized = x / (norm)
# return normalized
def normalizing(x, axis):
norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
normalized = x / (norm)
return normalized
def normalizing_sum(x, axis):
# sum(x) == 1
sum_prob = tf.reduce_sum(x, axis=axis, keep_dims=True)
normalized = x / sum_prob
return normalized
def _p(pp, name):
return '%s_%s' % (pp, name)
def dropout(X, trng, p=0.):
if p != 0:
retain_prob = 1 - p
X = X / retain_prob * trng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
return X
""" used for initialization of the parameters. """
def ortho_weight(ndim):
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype(config.floatX)
def uniform_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.uniform(low=-scale, high=scale, size=(nin, nout))
return W.astype(config.floatX)
def normal_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.randn(nin, nout) * scale
return W.astype(config.floatX)
def zero_bias(ndim):
b = np.zeros((ndim,))
return b.astype(config.floatX)
"""auxiliary function for KDE"""
def log_mean_exp(A,b,sigma):
a=-0.5*((A-theano.tensor.tile(b,[A.shape[0],1]))**2).sum(1)/(sigma**2)
max_=a.max()
return max_+theano.tensor.log(theano.tensor.exp(a-theano.tensor.tile(max_,a.shape[0])).mean())
'''calculate KDE'''
def cal_nkde(X,mu,sigma):
s1,updates=theano.scan(lambda i,s: s+log_mean_exp(mu,X[i,:],sigma), sequences=[theano.tensor.arange(X.shape[0])],outputs_info=[np.asarray(0.,dtype="float32")])
E=s1[-1]
Z=mu.shape[0]*theano.tensor.log(sigma*np.sqrt(np.pi*2))
return (Z-E)/mu.shape[0]
def cal_relevance(generated, reference, embedding): # embedding V* E
generated = [[g] for g in generated]
reference = [[s] for s in reference]
#bp()
relevance_score = [0.0,0.0,0.0]
relevance_score[0] = greedy_match(reference, generated, embedding)
relevance_score[1] = average_score(reference, generated, embedding)
relevance_score[2] = extrema_score(reference, generated, embedding)
return relevance_score
def cal_BLEU(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2],score[1:]):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
return BLEUscore
def cal_BLEU_4(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2,3],score):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
BLEUscore[3] = BLEUscore[3]/len(generated)
return BLEUscore
def cal_BLEU_4_nltk(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
reference = [[s] for s in reference]
#bp()
chencherry = SmoothingFunction()
# Note: please keep smoothing turned on, because there is a bug in NLTK without smoothing (see below).
if is_corpus:
return nltk.translate.bleu_score.corpus_bleu(reference, generated, smoothing_function=chencherry.method2) # smoothing options: 0-7
else:
return np.mean([nltk.translate.bleu_score.sentence_bleu(r, g, smoothing_function=chencherry.method2) for r,g in zip(reference, generated)]) # smoothing options: 0-7
def cal_entropy(generated):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
etp_score = [0.0,0.0,0.0,0.0]
div_score = [0.0,0.0,0.0,0.0]
counter = [defaultdict(int),defaultdict(int),defaultdict(int),defaultdict(int)]
for gg in generated:
g = gg.rstrip('2').split()
for n in range(4):
for idx in range(len(g)-n):
ngram = ' '.join(g[idx:idx+n+1])
counter[n][ngram] += 1
for n in range(4):
total = sum(counter[n].values()) +1e-10
for v in counter[n].values():
etp_score[n] += - (v+0.0) /total * (np.log(v+0.0) - np.log(total))
div_score[n] = (len(counter[n].values())+0.0) /total
return etp_score, div_score
def prepare_for_bleu(sentence):
sent=[x for x in sentence if x!=0]
while len(sent)<4:
sent.append(0)
#sent = ' '.join([ixtoword[x] for x in sent])
sent = ' '.join([str(x) for x in sent])
return sent
def _clip_gradients_seperate_norm(grads_and_vars, clip_gradients):
| """Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients = [clip_ops.clip_by_norm(grad, clip_gradients) for grad in gradients]
return list(zip(clipped_gradients, variables)) | identifier_body | |
utils.py | test file . . ."
test = []
with codecs.open(test_file,"r",'utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip("\n").rstrip("\r").split('\t')
conv = []
for l in line:
sent=[wordtoix[x] if x in wordtoix else dp.UNK_ID for x in l.split()] + [2]
conv.append(sent)
# bp()
test.append(conv)
return test
def tensors_key_in_file(file_name):
"""Return tensors key in a checkpoint file.
Args:
file_name: Name of the checkpoint file.
"""
try:
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
return reader.get_variable_to_shape_map()
except Exception as e: # pylint: disable=broad-except
print(str(e))
return None
def get_minibatches_idx(n, minibatch_size, shuffle=False):
idx_list = np.arange(n, dtype="int32")
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
# if (minibatch_start != n):
# # Make a minibatch out of what is left
# minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
# def normalizing_L1(x, axis):
# norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
# normalized = x / (norm)
# return normalized
def normalizing(x, axis):
norm = tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True))
normalized = x / (norm)
return normalized
def normalizing_sum(x, axis):
# sum(x) == 1
sum_prob = tf.reduce_sum(x, axis=axis, keep_dims=True)
normalized = x / sum_prob
return normalized
def _p(pp, name):
return '%s_%s' % (pp, name)
def dropout(X, trng, p=0.):
if p != 0:
retain_prob = 1 - p
X = X / retain_prob * trng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
return X
""" used for initialization of the parameters. """
def ortho_weight(ndim):
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype(config.floatX)
def uniform_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.uniform(low=-scale, high=scale, size=(nin, nout))
return W.astype(config.floatX)
def normal_weight(nin,nout=None, scale=0.05):
if nout == None:
nout = nin
W = np.random.randn(nin, nout) * scale
return W.astype(config.floatX)
def zero_bias(ndim):
b = np.zeros((ndim,))
return b.astype(config.floatX)
"""auxiliary function for KDE"""
def log_mean_exp(A,b,sigma):
a=-0.5*((A-theano.tensor.tile(b,[A.shape[0],1]))**2).sum(1)/(sigma**2)
max_=a.max()
return max_+theano.tensor.log(theano.tensor.exp(a-theano.tensor.tile(max_,a.shape[0])).mean())
'''calculate KDE'''
def cal_nkde(X,mu,sigma):
s1,updates=theano.scan(lambda i,s: s+log_mean_exp(mu,X[i,:],sigma), sequences=[theano.tensor.arange(X.shape[0])],outputs_info=[np.asarray(0.,dtype="float32")])
E=s1[-1]
Z=mu.shape[0]*theano.tensor.log(sigma*np.sqrt(np.pi*2))
return (Z-E)/mu.shape[0]
def cal_relevance(generated, reference, embedding): # embedding V* E
generated = [[g] for g in generated]
reference = [[s] for s in reference]
#bp()
relevance_score = [0.0,0.0,0.0]
relevance_score[0] = greedy_match(reference, generated, embedding)
relevance_score[1] = average_score(reference, generated, embedding)
relevance_score[2] = extrema_score(reference, generated, embedding)
return relevance_score
def cal_BLEU(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2],score[1:]):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
return BLEUscore
def cal_BLEU_4(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
BLEUscore = [0.0,0.0,0.0,0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]} , {0: [g]})
#print g, score
for i, s in zip([0,1,2,3],score):
BLEUscore[i]+=s
#BLEUscore += nltk.translate.bleu_score.sentence_bleu(reference, g, weight)
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
BLEUscore[3] = BLEUscore[3]/len(generated)
return BLEUscore
def cal_BLEU_4_nltk(generated, reference, is_corpus = False):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
reference = [[s] for s in reference]
#bp()
chencherry = SmoothingFunction()
# Note: please keep smoothing turned on, because there is a bug in NLTK without smoothing (see below).
if is_corpus:
return nltk.translate.bleu_score.corpus_bleu(reference, generated, smoothing_function=chencherry.method2) # smoothing options: 0-7
else:
return np.mean([nltk.translate.bleu_score.sentence_bleu(r, g, smoothing_function=chencherry.method2) for r,g in zip(reference, generated)]) # smoothing options: 0-7
def cal_entropy(generated):
#print 'in BLEU score calculation'
#the maximum is bigram, so assign the weight into 2 half.
etp_score = [0.0,0.0,0.0,0.0]
div_score = [0.0,0.0,0.0,0.0]
counter = [defaultdict(int),defaultdict(int),defaultdict(int),defaultdict(int)]
for gg in generated:
g = gg.rstrip('2').split()
for n in range(4):
for idx in range(len(g)-n):
ngram = ' '.join(g[idx:idx+n+1])
counter[n][ngram] += 1
for n in range(4):
total = sum(counter[n].values()) +1e-10
for v in counter[n].values():
etp_score[n] += - (v+0.0) /total * (np.log(v+0.0) - np.log(total))
div_score[n] = (len(counter[n].values())+0.0) /total
return etp_score, div_score
def prepare_for_bleu(sentence):
sent=[x for x in sentence if x!=0]
while len(sent)<4:
sent.append(0)
#sent = ' '.join([ixtoword[x] for x in sent]) | sent = ' '.join([str(x) for x in sent])
return sent
| random_line_split | |
formatter.rs | ' | '%' | 's' | '?' => true,
_ => false,
}
}
// get an integer from pos, returning the number of bytes
// consumed and the integer
fn get_integer(s: &[u8], pos: usize) -> (usize, Option<i64>) {
let (_, rest) = s.split_at(pos);
let mut consumed: usize = 0;
for b in rest {
match *b as char {
'0'..='9' => {}
_ => break,
};
consumed += 1;
}
if consumed == 0 {
(0, None)
} else {
let (intstr, _) = rest.split_at(consumed);
let val = unsafe {
// I think I can be reasonably sure that 0-9 chars are utf8 :)
match str::from_utf8_unchecked(intstr).parse::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
// The rest of this was pretty much strait up copied from python's format parser
// All credit goes to python source file: formatter_unicode.c
//
let mut format = FmtPy {
fill: ' ',
align: '\0',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: '\0',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let rest = rest.as_bytes();
let mut align_specified = false;
let mut fill_specified = false;
let end: usize = rest.len();
let mut pos: usize = 0;
// If the second char is an alignment token,
// then fake_fill as fill
if end - pos >= 1 + fake_fill.len_utf8()
&& is_alignment_token(rest[pos + fake_fill.len_utf8()] as char)
{
format.align = rest[pos + fake_fill.len_utf8()] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 1 + fake_fill.len_utf8();
} else if end - pos >= 1 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos += fake_fill.len_utf8();
}
// Parse the various sign options
if end - pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos += 1;
}
// If the next character is #, we're in alternate mode. This only
// applies to integers.
if end - pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos += 1;
}
// The special case for 0-padding (backwards compat)
if !fill_specified && end - pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if !align_specified {
format.align = '=';
}
pos += 1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing width".to_string(),
))
}
Some(v) => {
format.width = v;
}
}
}
// Comma signifies add thousands separators
if end - pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos += 1;
}
// Parse field precision
if end - pos > 0 && rest[pos] as char == '.' {
pos += 1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing precision".to_string(),
))
}
Some(v) => {
format.precision = v;
}
}
} else {
// Not having a precision after a dot is an error.
if consumed == 0 {
return Err(FmtError::Invalid(
"Format specifier missing precision".to_string(),
));
}
}
pos += consumed;
}
// Finally, parse the type field.
if end - pos > 1 {
// More than one char remain, invalid format specifier.
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end - pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::TypeError(msg));
}
// pos+=1;
}
// Do as much validating as we can, just by looking at the format
// specifier. Do not take into account what type of formatting
// we're doing (int, float, string).
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' | '%' | 'F' | '\0' => {} /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a, 'b> Formatter<'a, 'b> {
/// create Formatter from format string
pub fn from_str(s: &'a str, buff: &'b mut String) -> Result<Formatter<'a, 'b>> {
let mut found_colon = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => {
return Err(FmtError::Invalid("must specify identifier".to_string()))
}
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
found_colon = true;
break;
}
c = match chars.next() {
Some(c) => c,
None => {
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
let identifier = if found_colon {
let (i, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
i
} else {
identifier
};
let format = parse_like_python(rest)?;
Ok(Formatter {
key: identifier,
fill: format.fill,
align: match format.align {
'\0' => Alignment::Unspecified,
'<' => Alignment::Left,
'^' => Alignment::Center,
'>' => Alignment::Right,
'=' => Alignment::Equal,
_ => unreachable!(),
},
sign: match format.sign {
'\0' => Sign::Unspecified,
'+' => Sign::Plus,
'-' => Sign::Minus,
' ' => Sign::Space,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
'\0' => None,
_ => Some(format.ty),
},
buff: buff,
pattern: s,
})
}
/// call this to re-write the original format string verbatum
/// back to the output
pub fn skip(mut self) -> Result<()> {
self.buff.push('{');
self.write_str(self.pattern).unwrap();
self.buff.push('}');
Ok(())
}
/// fill getter
pub fn fill(&self) -> char {
self.fill
}
/// align getter
pub fn align(&self) -> Alignment {
self.align.clone()
}
// provide default for unspecified alignment
pub fn set_default_align(&mut self, align: Alignment) {
if self.align == Alignment::Unspecified {
self.align = align
}
}
/// width getter
pub fn width(&self) -> Option<usize> | {
self.width
} | identifier_body | |
formatter.rs | | 'x' | 'X' | 'e' | 'E' | 'f' | 'F' | '%' | 's' | '?' => true,
_ => false,
}
}
// get an integer from pos, returning the number of bytes
// consumed and the integer
fn get_integer(s: &[u8], pos: usize) -> (usize, Option<i64>) {
let (_, rest) = s.split_at(pos);
let mut consumed: usize = 0;
for b in rest {
match *b as char {
'0'..='9' => {}
_ => break,
};
consumed += 1;
}
if consumed == 0 {
(0, None)
} else {
let (intstr, _) = rest.split_at(consumed);
let val = unsafe {
// I think I can be reasonably sure that 0-9 chars are utf8 :)
match str::from_utf8_unchecked(intstr).parse::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
// The rest of this was pretty much strait up copied from python's format parser
// All credit goes to python source file: formatter_unicode.c
//
let mut format = FmtPy {
fill: ' ',
align: '\0',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: '\0',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let rest = rest.as_bytes();
let mut align_specified = false;
let mut fill_specified = false;
let end: usize = rest.len();
let mut pos: usize = 0;
// If the second char is an alignment token,
// then fake_fill as fill
if end - pos >= 1 + fake_fill.len_utf8()
&& is_alignment_token(rest[pos + fake_fill.len_utf8()] as char)
{
format.align = rest[pos + fake_fill.len_utf8()] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 1 + fake_fill.len_utf8();
} else if end - pos >= 1 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos += fake_fill.len_utf8();
}
// Parse the various sign options
if end - pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos += 1;
} | if end - pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos += 1;
}
// The special case for 0-padding (backwards compat)
if !fill_specified && end - pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if !align_specified {
format.align = '=';
}
pos += 1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing width".to_string(),
))
}
Some(v) => {
format.width = v;
}
}
}
// Comma signifies add thousands separators
if end - pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos += 1;
}
// Parse field precision
if end - pos > 0 && rest[pos] as char == '.' {
pos += 1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing precision".to_string(),
))
}
Some(v) => {
format.precision = v;
}
}
} else {
// Not having a precision after a dot is an error.
if consumed == 0 {
return Err(FmtError::Invalid(
"Format specifier missing precision".to_string(),
));
}
}
pos += consumed;
}
// Finally, parse the type field.
if end - pos > 1 {
// More than one char remain, invalid format specifier.
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end - pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::TypeError(msg));
}
// pos+=1;
}
// Do as much validating as we can, just by looking at the format
// specifier. Do not take into account what type of formatting
// we're doing (int, float, string).
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' | '%' | 'F' | '\0' => {} /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a, 'b> Formatter<'a, 'b> {
/// create Formatter from format string
pub fn from_str(s: &'a str, buff: &'b mut String) -> Result<Formatter<'a, 'b>> {
let mut found_colon = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => {
return Err(FmtError::Invalid("must specify identifier".to_string()))
}
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
found_colon = true;
break;
}
c = match chars.next() {
Some(c) => c,
None => {
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
let identifier = if found_colon {
let (i, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
i
} else {
identifier
};
let format = parse_like_python(rest)?;
Ok(Formatter {
key: identifier,
fill: format.fill,
align: match format.align {
'\0' => Alignment::Unspecified,
'<' => Alignment::Left,
'^' => Alignment::Center,
'>' => Alignment::Right,
'=' => Alignment::Equal,
_ => unreachable!(),
},
sign: match format.sign {
'\0' => Sign::Unspecified,
'+' => Sign::Plus,
'-' => Sign::Minus,
' ' => Sign::Space,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
'\0' => None,
_ => Some(format.ty),
},
buff: buff,
pattern: s,
})
}
/// call this to re-write the original format string verbatum
/// back to the output
pub fn skip(mut self) -> Result<()> {
self.buff.push('{');
self.write_str(self.pattern).unwrap();
self.buff.push('}');
Ok(())
}
/// fill getter
pub fn fill(&self) -> char {
self.fill
}
/// align getter
pub fn align(&self) -> Alignment {
self.align.clone()
}
// provide default for unspecified alignment
pub fn set_default_align(&mut self, align: Alignment) {
if self.align == Alignment::Unspecified {
self.align = align
}
}
|
// If the next character is #, we're in alternate mode. This only
// applies to integers. | random_line_split |
formatter.rs | ::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
// The rest of this was pretty much strait up copied from python's format parser
// All credit goes to python source file: formatter_unicode.c
//
let mut format = FmtPy {
fill: ' ',
align: '\0',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: '\0',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let rest = rest.as_bytes();
let mut align_specified = false;
let mut fill_specified = false;
let end: usize = rest.len();
let mut pos: usize = 0;
// If the second char is an alignment token,
// then fake_fill as fill
if end - pos >= 1 + fake_fill.len_utf8()
&& is_alignment_token(rest[pos + fake_fill.len_utf8()] as char)
{
format.align = rest[pos + fake_fill.len_utf8()] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 1 + fake_fill.len_utf8();
} else if end - pos >= 1 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos += fake_fill.len_utf8();
}
// Parse the various sign options
if end - pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos += 1;
}
// If the next character is #, we're in alternate mode. This only
// applies to integers.
if end - pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos += 1;
}
// The special case for 0-padding (backwards compat)
if !fill_specified && end - pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if !align_specified {
format.align = '=';
}
pos += 1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing width".to_string(),
))
}
Some(v) => {
format.width = v;
}
}
}
// Comma signifies add thousands separators
if end - pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos += 1;
}
// Parse field precision
if end - pos > 0 && rest[pos] as char == '.' {
pos += 1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing precision".to_string(),
))
}
Some(v) => {
format.precision = v;
}
}
} else {
// Not having a precision after a dot is an error.
if consumed == 0 {
return Err(FmtError::Invalid(
"Format specifier missing precision".to_string(),
));
}
}
pos += consumed;
}
// Finally, parse the type field.
if end - pos > 1 {
// More than one char remain, invalid format specifier.
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end - pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::TypeError(msg));
}
// pos+=1;
}
// Do as much validating as we can, just by looking at the format
// specifier. Do not take into account what type of formatting
// we're doing (int, float, string).
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' | '%' | 'F' | '\0' => {} /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a, 'b> Formatter<'a, 'b> {
/// create Formatter from format string
pub fn from_str(s: &'a str, buff: &'b mut String) -> Result<Formatter<'a, 'b>> {
let mut found_colon = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => {
return Err(FmtError::Invalid("must specify identifier".to_string()))
}
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
found_colon = true;
break;
}
c = match chars.next() {
Some(c) => c,
None => {
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
let identifier = if found_colon {
let (i, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
i
} else {
identifier
};
let format = parse_like_python(rest)?;
Ok(Formatter {
key: identifier,
fill: format.fill,
align: match format.align {
'\0' => Alignment::Unspecified,
'<' => Alignment::Left,
'^' => Alignment::Center,
'>' => Alignment::Right,
'=' => Alignment::Equal,
_ => unreachable!(),
},
sign: match format.sign {
'\0' => Sign::Unspecified,
'+' => Sign::Plus,
'-' => Sign::Minus,
' ' => Sign::Space,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
'\0' => None,
_ => Some(format.ty),
},
buff: buff,
pattern: s,
})
}
/// call this to re-write the original format string verbatum
/// back to the output
pub fn skip(mut self) -> Result<()> {
self.buff.push('{');
self.write_str(self.pattern).unwrap();
self.buff.push('}');
Ok(())
}
/// fill getter
pub fn fill(&self) -> char {
self.fill
}
/// align getter
pub fn align(&self) -> Alignment {
self.align.clone()
}
// provide default for unspecified alignment
pub fn set_default_align(&mut self, align: Alignment) {
if self.align == Alignment::Unspecified {
self.align = align
}
}
/// width getter
pub fn width(&self) -> Option<usize> {
self.width
}
/// thousands getter
pub fn thousands(&self) -> bool {
self.thousands
}
/// precision getter
pub fn precision(&self) -> Option<usize> {
self.precision
}
/// set precision to None, used for formatting int, float, etc
pub fn set_precision(&mut self, precision: Option<usize>) {
self.precision = precision;
}
/// sign getter
pub fn sign(&self) -> Sign {
self.sign.clone()
}
/// sign plus getter
/// here because it is in fmt::Formatter
pub fn sign_plus(&self) -> bool {
self.sign == Sign::Plus
}
/// sign minus getter
/// here because it is in fmt::Formatter
pub fn sign_minus(&self) -> bool {
self.sign == Sign::Minus
}
/// alternate getter
pub fn | alternate | identifier_name | |
formatter.rs | | 'x' | 'X' | 'e' | 'E' | 'f' | 'F' | '%' | 's' | '?' => true,
_ => false,
}
}
// get an integer from pos, returning the number of bytes
// consumed and the integer
fn get_integer(s: &[u8], pos: usize) -> (usize, Option<i64>) {
let (_, rest) = s.split_at(pos);
let mut consumed: usize = 0;
for b in rest {
match *b as char {
'0'..='9' => {}
_ => break,
};
consumed += 1;
}
if consumed == 0 {
(0, None)
} else {
let (intstr, _) = rest.split_at(consumed);
let val = unsafe {
// I think I can be reasonably sure that 0-9 chars are utf8 :)
match str::from_utf8_unchecked(intstr).parse::<i64>() {
Ok(v) => Some(v),
Err(_) => None,
}
};
(consumed, val)
}
}
#[derive(Debug)]
/// The format struct as it is defined in the python source
struct FmtPy {
pub fill: char,
pub align: char,
pub alternate: bool,
pub sign: char,
pub width: i64,
pub thousands: bool,
pub precision: i64,
pub ty: char,
}
fn parse_like_python(rest: &str) -> Result<FmtPy> {
// The rest of this was pretty much strait up copied from python's format parser
// All credit goes to python source file: formatter_unicode.c
//
let mut format = FmtPy {
fill: ' ',
align: '\0',
alternate: false,
sign: '\0',
width: -1,
thousands: false,
precision: -1,
ty: '\0',
};
let mut chars = rest.chars();
let fake_fill = match chars.next() {
Some(c) => c,
None => return Ok(format),
};
// from now on all format characters MUST be valid
// ASCII characters (fill and identifier were the
// only ones that weren't.
// Therefore we can use bytes for the rest
let rest = rest.as_bytes();
let mut align_specified = false;
let mut fill_specified = false;
let end: usize = rest.len();
let mut pos: usize = 0;
// If the second char is an alignment token,
// then fake_fill as fill
if end - pos >= 1 + fake_fill.len_utf8()
&& is_alignment_token(rest[pos + fake_fill.len_utf8()] as char)
{
format.align = rest[pos + fake_fill.len_utf8()] as char;
format.fill = fake_fill;
fill_specified = true;
align_specified = true;
pos += 1 + fake_fill.len_utf8();
} else if end - pos >= 1 && is_alignment_token(fake_fill) {
format.align = fake_fill;
pos += fake_fill.len_utf8();
}
// Parse the various sign options
if end - pos >= 1 && is_sign_element(rest[pos] as char) {
format.sign = rest[pos] as char;
pos += 1;
}
// If the next character is #, we're in alternate mode. This only
// applies to integers.
if end - pos >= 1 && rest[pos] as char == '#' {
format.alternate = true;
pos += 1;
}
// The special case for 0-padding (backwards compat)
if !fill_specified && end - pos >= 1 && rest[pos] == '0' as u8 {
format.fill = '0';
if !align_specified {
format.align = '=';
}
pos += 1;
}
// check to make sure that val is good
let (consumed, val) = get_integer(rest, pos);
pos += consumed;
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing width".to_string(),
))
}
Some(v) => {
format.width = v;
}
}
}
// Comma signifies add thousands separators
if end - pos > 0 && rest[pos] as char == ',' {
format.thousands = true;
pos += 1;
}
// Parse field precision
if end - pos > 0 && rest[pos] as char == '.' {
pos += 1;
let (consumed, val) = get_integer(rest, pos);
if consumed != 0 {
match val {
None => {
return Err(FmtError::Invalid(
"overflow error when parsing precision".to_string(),
))
}
Some(v) => |
}
} else {
// Not having a precision after a dot is an error.
if consumed == 0 {
return Err(FmtError::Invalid(
"Format specifier missing precision".to_string(),
));
}
}
pos += consumed;
}
// Finally, parse the type field.
if end - pos > 1 {
// More than one char remain, invalid format specifier.
return Err(FmtError::Invalid("Invalid format specifier".to_string()));
}
if end - pos == 1 {
format.ty = rest[pos] as char;
if !is_type_element(format.ty) {
let mut msg = String::new();
write!(msg, "Invalid type specifier: {:?}", format.ty).unwrap();
return Err(FmtError::TypeError(msg));
}
// pos+=1;
}
// Do as much validating as we can, just by looking at the format
// specifier. Do not take into account what type of formatting
// we're doing (int, float, string).
if format.thousands {
match format.ty {
'd' | 'e' | 'f' | 'g' | 'E' | 'G' | '%' | 'F' | '\0' => {} /* These are allowed. See PEP 378.*/
_ => {
let mut msg = String::new();
write!(msg, "Invalid comma type: {}", format.ty).unwrap();
return Err(FmtError::Invalid(msg));
}
}
}
Ok(format)
}
impl<'a, 'b> Formatter<'a, 'b> {
/// create Formatter from format string
pub fn from_str(s: &'a str, buff: &'b mut String) -> Result<Formatter<'a, 'b>> {
let mut found_colon = false;
let mut chars = s.chars();
let mut c = match chars.next() {
Some(':') | None => {
return Err(FmtError::Invalid("must specify identifier".to_string()))
}
Some(c) => c,
};
let mut consumed = 0;
// find the identifier
loop {
consumed += c.len_utf8();
if c == ':' {
found_colon = true;
break;
}
c = match chars.next() {
Some(c) => c,
None => {
break;
}
};
}
let (identifier, rest) = s.split_at(consumed);
let identifier = if found_colon {
let (i, _) = identifier.split_at(identifier.len() - 1); // get rid of ':'
i
} else {
identifier
};
let format = parse_like_python(rest)?;
Ok(Formatter {
key: identifier,
fill: format.fill,
align: match format.align {
'\0' => Alignment::Unspecified,
'<' => Alignment::Left,
'^' => Alignment::Center,
'>' => Alignment::Right,
'=' => Alignment::Equal,
_ => unreachable!(),
},
sign: match format.sign {
'\0' => Sign::Unspecified,
'+' => Sign::Plus,
'-' => Sign::Minus,
' ' => Sign::Space,
_ => unreachable!(),
},
alternate: format.alternate,
width: match format.width {
-1 => None,
_ => Some(format.width as usize),
},
thousands: format.thousands,
precision: match format.precision {
-1 => None,
_ => Some(format.precision as usize),
},
ty: match format.ty {
'\0' => None,
_ => Some(format.ty),
},
buff: buff,
pattern: s,
})
}
/// call this to re-write the original format string verbatum
/// back to the output
pub fn skip(mut self) -> Result<()> {
self.buff.push('{');
self.write_str(self.pattern).unwrap();
self.buff.push('}');
Ok(())
}
/// fill getter
pub fn fill(&self) -> char {
self.fill
}
/// align getter
pub fn align(&self) -> Alignment {
self.align.clone()
}
// provide default for unspecified alignment
pub fn set_default_align(&mut self, align: Alignment) {
if self.align == Alignment::Unspecified {
self.align = align
}
| {
format.precision = v;
} | conditional_block |
server.go | )
import "fmt"
import "net/rpc"
import "log"
import "paxos"
import "sync"
import "sync/atomic"
import "os"
import "syscall"
import "encoding/gob"
const Debug = false
var log_mu sync.Mutex
func (sm *ShardMaster) Logf(format string, a ...interface{}) {
if !Debug {
return
}
log_mu.Lock()
defer log_mu.Unlock()
me := sm.me
fmt.Printf("\x1b[%dm", (me%6)+31)
fmt.Printf("SM#%d : ", me)
fmt.Printf(format+"\n", a...)
fmt.Printf("\x1b[0m")
}
type ShardMaster struct {
mu sync.Mutex
l net.Listener
me int
dead int32 // for testing
unreliable int32 // for testing
px *paxos.Paxos
opReqChan chan OpReq
lastDummySeq int //Seq of last time we launched a dummy op to fill a hole
activeGIDs map[int64]bool //If inactive remove from map instead of setting to false
lastConfig int
configs []Config // indexed by config num
}
type Op struct {
OpID int64
Type OpType
GID int64 //Used by all Ops but Query
Servers []string //Used by Join
Shard int //Used by move
Num int //Used by Query
}
type OpType int
const (
JoinOp OpType = iota + 1
LeaveOp
MoveOp
QueryOp
Dummy
)
type OpReq struct {
op Op
replyChan chan Config
}
func (sm *ShardMaster) sequentialApplier() {
seq := 1
for !sm.isdead() {
select {
case opreq := <-sm.opReqChan:
op := opreq.op
sm.Logf("Got operation through channel")
seq = sm.addToPaxos(seq, op)
sm.Logf("Operation added to paxos log at %d", seq)
if op.Type == QueryOp {
if op.Num < 0 {
//Returning latest config
opreq.replyChan <- sm.configs[sm.lastConfig]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", sm.lastConfig, seq)
} else {
opreq.replyChan <- sm.configs[op.Num]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", op.Num, seq)
}
} else {
opreq.replyChan <- Config{}
}
case <-time.After(50 * time.Millisecond):
sm.Logf("Ping")
seq = sm.ping(seq)
}
sm.Logf("Calling Done(%d)", seq-2)
sm.px.Done(seq - 1)
}
}
//Takes the last non-applied seq and returns the new one
func (sm *ShardMaster) ping(seq int) int {
//TODO: Is this a good dummy OP?
dummyOp := Op{}
for !sm.isdead() {
fate, val := sm.px.Status(seq)
if fate == paxos.Decided {
sm.applyOp(val.(Op))
seq++
continue
}
if sm.px.Max() > seq && seq > sm.lastDummySeq {
sm.px.Start(seq, dummyOp)
sm.waitForPaxos(seq)
sm.lastDummySeq = seq
} else {
return seq
}
}
sm.Logf("ERRRRORR: Ping fallthrough, we are dying! Return seq -1 ")
return -1
}
func (sm *ShardMaster) addToPaxos(seq int, op Op) (retseq int) {
for !sm.isdead() {
//Suggest OP as next seq
sm.px.Start(seq, op)
val, err := sm.waitForPaxos(seq)
if err != nil {
sm.Logf("ERRRROROOROROO!!!")
continue
}
sm.applyOp(val.(Op))
seq++
//Did work?
if val.(Op).OpID == op.OpID {
sm.Logf("Applied operation in log at seq %d", seq-1)
return seq
} else {
sm.Logf("Somebody else took seq %d before us, applying it and trying again", seq-1)
}
}
return -1
}
func (sm *ShardMaster) waitForPaxos(seq int) (val interface{}, err error) {
var status paxos.Fate
to := 10 * time.Millisecond
for {
status, val = sm.px.Status(seq)
if status == paxos.Decided {
err = nil
return
}
if status == paxos.Forgotten || sm.isdead() {
err = fmt.Errorf("We are dead or waiting for something forgotten. Server shutting down?")
sm.Logf("We are dead or waiting for something forgotten. Server shutting down?")
return
}
sm.Logf("Still waiting for paxos: %d", seq)
time.Sleep(to)
if to < 3*time.Second {
to *= 2
} else {
err = fmt.Errorf("Wait for paxos timeout!1")
return
}
}
}
func (sm *ShardMaster) applyOp(op Op) {
sm.Logf("Applying op to database")
switch op.Type {
case JoinOp:
sm.Logf("Join, you guys!")
sm.ApplyJoin(op.GID, op.Servers)
case LeaveOp:
sm.ApplyLeave(op.GID)
sm.Logf("Leave op applied!")
case MoveOp:
sm.ApplyMove(op.GID, op.Shard)
sm.Logf("Move op applied!")
case QueryOp:
//Do nothing
case Dummy:
//Do nothing
}
}
func (sm *ShardMaster) ApplyMove(GID int64, Shard int) {
newConfig := sm.makeNewConfig()
newConfig.Shards[Shard] = GID
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyJoin(GID int64, Servers []string) {
sm.activeGIDs[GID] = true
newConfig := sm.makeNewConfig()
newConfig.Groups[GID] = Servers
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyLeave(GID int64) {
delete(sm.activeGIDs, GID)
newConfig := sm.makeNewConfig()
delete(newConfig.Groups, GID)
for i, group := range newConfig.Shards {
if group == GID {
newConfig.Shards[i] = 0 //Set to invalid group. Will be distributed by the rebalance
}
}
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) rebalanceShards(newConfig *Config) {
nShards := len(newConfig.Shards)
nGroups := 0
for _, _ = range newConfig.Groups {
nGroups++
}
groupToShards := make(map[int64][]int)
groupToShards[0] = []int{}
for GUID, _ := range sm.activeGIDs {
groupToShards[GUID] = []int{}
}
for i, v := range newConfig.Shards {
GUID := v
groupToShards[GUID] = append(groupToShards[GUID], i)
}
minGUID, minGroupSize := getMin(groupToShards)
maxGUID, maxGroupSize := getMax(groupToShards)
minShardsPerGroup := nShards / nGroups
maxShardsPerGroup := minShardsPerGroup
if nShards%nGroups > 0 {
maxShardsPerGroup += 1
}
for len(groupToShards[0]) > 0 || minGroupSize < minShardsPerGroup || maxGroupSize > maxShardsPerGroup {
sm.Logf("Rebalance iteration! ")
sm.Logf("%d > 0! ", len(groupToShards[0]))
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
shardsInInvalidGroup := len(groupToShards[0])
if shardsInInvalidGroup > 0 {
for i := 0; i < shardsInInvalidGroup; i++ {
sm.Logf("Rebalance 0 iteration!")
moveshard := groupToShards[0][0] //Remove the first on
groupToShards[0] = sliceDel(groupToShards[0], 0)
minGUID, _ := getMin(groupToShards)
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
|
import (
"math/rand"
"net"
"time" | random_line_split | |
server.go | 2
} else {
err = fmt.Errorf("Wait for paxos timeout!1")
return
}
}
}
func (sm *ShardMaster) applyOp(op Op) {
sm.Logf("Applying op to database")
switch op.Type {
case JoinOp:
sm.Logf("Join, you guys!")
sm.ApplyJoin(op.GID, op.Servers)
case LeaveOp:
sm.ApplyLeave(op.GID)
sm.Logf("Leave op applied!")
case MoveOp:
sm.ApplyMove(op.GID, op.Shard)
sm.Logf("Move op applied!")
case QueryOp:
//Do nothing
case Dummy:
//Do nothing
}
}
func (sm *ShardMaster) ApplyMove(GID int64, Shard int) {
newConfig := sm.makeNewConfig()
newConfig.Shards[Shard] = GID
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyJoin(GID int64, Servers []string) {
sm.activeGIDs[GID] = true
newConfig := sm.makeNewConfig()
newConfig.Groups[GID] = Servers
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyLeave(GID int64) {
delete(sm.activeGIDs, GID)
newConfig := sm.makeNewConfig()
delete(newConfig.Groups, GID)
for i, group := range newConfig.Shards {
if group == GID {
newConfig.Shards[i] = 0 //Set to invalid group. Will be distributed by the rebalance
}
}
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) rebalanceShards(newConfig *Config) {
nShards := len(newConfig.Shards)
nGroups := 0
for _, _ = range newConfig.Groups {
nGroups++
}
groupToShards := make(map[int64][]int)
groupToShards[0] = []int{}
for GUID, _ := range sm.activeGIDs {
groupToShards[GUID] = []int{}
}
for i, v := range newConfig.Shards {
GUID := v
groupToShards[GUID] = append(groupToShards[GUID], i)
}
minGUID, minGroupSize := getMin(groupToShards)
maxGUID, maxGroupSize := getMax(groupToShards)
minShardsPerGroup := nShards / nGroups
maxShardsPerGroup := minShardsPerGroup
if nShards%nGroups > 0 {
maxShardsPerGroup += 1
}
for len(groupToShards[0]) > 0 || minGroupSize < minShardsPerGroup || maxGroupSize > maxShardsPerGroup {
sm.Logf("Rebalance iteration! ")
sm.Logf("%d > 0! ", len(groupToShards[0]))
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
shardsInInvalidGroup := len(groupToShards[0])
if shardsInInvalidGroup > 0 {
for i := 0; i < shardsInInvalidGroup; i++ {
sm.Logf("Rebalance 0 iteration!")
moveshard := groupToShards[0][0] //Remove the first on
groupToShards[0] = sliceDel(groupToShards[0], 0)
minGUID, _ := getMin(groupToShards)
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
continue
}
minGUID, minGroupSize = getMin(groupToShards)
maxGUID, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d (GUID: %d) ", minGroupSize, minGUID)
sm.Logf("max %d (GUID: %d) ", maxGroupSize, maxGUID)
maxCanGive := maxGroupSize - minShardsPerGroup
minNeeds := minShardsPerGroup - minGroupSize
shardsToMove := minNeeds
if maxCanGive < minNeeds {
shardsToMove = maxCanGive
}
sm.Logf("Moving %d shards: minNeeds %d, maxCanGive %d!", shardsToMove, minNeeds, maxCanGive)
for i := 0; i < shardsToMove; i++ {
moveshard := groupToShards[maxGUID][i]
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
groupToShards[maxGUID] = sliceDel(groupToShards[maxGUID], i)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
}
}
func getMax(groupToShards map[int64][]int) (GUID int64, nShards int) {
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) >= nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func getMin(groupToShards map[int64][]int) (GUID int64, nShards int) {
nShards = 2147483647 //Max signed int32 int is 32 or 64, so this will fit
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) < nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func sliceDel(a []int, i int) []int {
return append(a[:i], a[i+1:]...)
}
func sliceDelInt64(a []int64, i int) []int64 {
return append(a[:i], a[i+1:]...)
}
func (sm *ShardMaster) makeNewConfig() Config {
oldConfig := sm.configs[sm.lastConfig]
newConfig := Config{Groups: make(map[int64][]string)}
newConfig.Num = oldConfig.Num + 1
sm.lastConfig = newConfig.Num
newConfig.Shards = oldConfig.Shards //TODO: Does this work?
for k, v := range oldConfig.Groups {
newConfig.Groups[k] = v
}
return newConfig
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) error {
op := Op{Type: JoinOp, OpID: rand.Int63(), GID: args.GID, Servers: args.Servers}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) error {
op := Op{Type: LeaveOp, OpID: rand.Int63(), GID: args.GID}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) error {
op := Op{Type: MoveOp, OpID: rand.Int63(), GID: args.GID, Shard: args.Shard}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
<-opReq.replyChan
sm.Logf("Got return!")
return nil
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) error | {
op := Op{Type: QueryOp, OpID: rand.Int63(), Num: args.Num}
opReq := OpReq{op, make(chan Config, 1)}
sm.opReqChan <- opReq
sm.Logf("Waiting on return channel!")
reply.Config = <-opReq.replyChan
sm.Logf("Got return!")
return nil
} | identifier_body | |
server.go | [op.Num]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", op.Num, seq)
}
} else {
opreq.replyChan <- Config{}
}
case <-time.After(50 * time.Millisecond):
sm.Logf("Ping")
seq = sm.ping(seq)
}
sm.Logf("Calling Done(%d)", seq-2)
sm.px.Done(seq - 1)
}
}
//Takes the last non-applied seq and returns the new one
func (sm *ShardMaster) | (seq int) int {
//TODO: Is this a good dummy OP?
dummyOp := Op{}
for !sm.isdead() {
fate, val := sm.px.Status(seq)
if fate == paxos.Decided {
sm.applyOp(val.(Op))
seq++
continue
}
if sm.px.Max() > seq && seq > sm.lastDummySeq {
sm.px.Start(seq, dummyOp)
sm.waitForPaxos(seq)
sm.lastDummySeq = seq
} else {
return seq
}
}
sm.Logf("ERRRRORR: Ping fallthrough, we are dying! Return seq -1 ")
return -1
}
func (sm *ShardMaster) addToPaxos(seq int, op Op) (retseq int) {
for !sm.isdead() {
//Suggest OP as next seq
sm.px.Start(seq, op)
val, err := sm.waitForPaxos(seq)
if err != nil {
sm.Logf("ERRRROROOROROO!!!")
continue
}
sm.applyOp(val.(Op))
seq++
//Did work?
if val.(Op).OpID == op.OpID {
sm.Logf("Applied operation in log at seq %d", seq-1)
return seq
} else {
sm.Logf("Somebody else took seq %d before us, applying it and trying again", seq-1)
}
}
return -1
}
func (sm *ShardMaster) waitForPaxos(seq int) (val interface{}, err error) {
var status paxos.Fate
to := 10 * time.Millisecond
for {
status, val = sm.px.Status(seq)
if status == paxos.Decided {
err = nil
return
}
if status == paxos.Forgotten || sm.isdead() {
err = fmt.Errorf("We are dead or waiting for something forgotten. Server shutting down?")
sm.Logf("We are dead or waiting for something forgotten. Server shutting down?")
return
}
sm.Logf("Still waiting for paxos: %d", seq)
time.Sleep(to)
if to < 3*time.Second {
to *= 2
} else {
err = fmt.Errorf("Wait for paxos timeout!1")
return
}
}
}
func (sm *ShardMaster) applyOp(op Op) {
sm.Logf("Applying op to database")
switch op.Type {
case JoinOp:
sm.Logf("Join, you guys!")
sm.ApplyJoin(op.GID, op.Servers)
case LeaveOp:
sm.ApplyLeave(op.GID)
sm.Logf("Leave op applied!")
case MoveOp:
sm.ApplyMove(op.GID, op.Shard)
sm.Logf("Move op applied!")
case QueryOp:
//Do nothing
case Dummy:
//Do nothing
}
}
func (sm *ShardMaster) ApplyMove(GID int64, Shard int) {
newConfig := sm.makeNewConfig()
newConfig.Shards[Shard] = GID
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyJoin(GID int64, Servers []string) {
sm.activeGIDs[GID] = true
newConfig := sm.makeNewConfig()
newConfig.Groups[GID] = Servers
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyLeave(GID int64) {
delete(sm.activeGIDs, GID)
newConfig := sm.makeNewConfig()
delete(newConfig.Groups, GID)
for i, group := range newConfig.Shards {
if group == GID {
newConfig.Shards[i] = 0 //Set to invalid group. Will be distributed by the rebalance
}
}
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) rebalanceShards(newConfig *Config) {
nShards := len(newConfig.Shards)
nGroups := 0
for _, _ = range newConfig.Groups {
nGroups++
}
groupToShards := make(map[int64][]int)
groupToShards[0] = []int{}
for GUID, _ := range sm.activeGIDs {
groupToShards[GUID] = []int{}
}
for i, v := range newConfig.Shards {
GUID := v
groupToShards[GUID] = append(groupToShards[GUID], i)
}
minGUID, minGroupSize := getMin(groupToShards)
maxGUID, maxGroupSize := getMax(groupToShards)
minShardsPerGroup := nShards / nGroups
maxShardsPerGroup := minShardsPerGroup
if nShards%nGroups > 0 {
maxShardsPerGroup += 1
}
for len(groupToShards[0]) > 0 || minGroupSize < minShardsPerGroup || maxGroupSize > maxShardsPerGroup {
sm.Logf("Rebalance iteration! ")
sm.Logf("%d > 0! ", len(groupToShards[0]))
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
shardsInInvalidGroup := len(groupToShards[0])
if shardsInInvalidGroup > 0 {
for i := 0; i < shardsInInvalidGroup; i++ {
sm.Logf("Rebalance 0 iteration!")
moveshard := groupToShards[0][0] //Remove the first on
groupToShards[0] = sliceDel(groupToShards[0], 0)
minGUID, _ := getMin(groupToShards)
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
continue
}
minGUID, minGroupSize = getMin(groupToShards)
maxGUID, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d (GUID: %d) ", minGroupSize, minGUID)
sm.Logf("max %d (GUID: %d) ", maxGroupSize, maxGUID)
maxCanGive := maxGroupSize - minShardsPerGroup
minNeeds := minShardsPerGroup - minGroupSize
shardsToMove := minNeeds
if maxCanGive < minNeeds {
shardsToMove = maxCanGive
}
sm.Logf("Moving %d shards: minNeeds %d, maxCanGive %d!", shardsToMove, minNeeds, maxCanGive)
for i := 0; i < shardsToMove; i++ {
moveshard := groupToShards[maxGUID][i]
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
groupToShards[maxGUID] = sliceDel(groupToShards[maxGUID], i)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
}
}
func getMax(groupToShards map[int64][]int) (GUID int64, nShards int) {
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) >= nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func getMin(groupToShards map[int64][]int) (GUID int64, nShards int) {
nShards = 2147483647 //Max signed int32 int is 32 or 64, so this will fit
for guid, shards | ping | identifier_name |
server.go | [op.Num]
sm.Logf("Query applied! Feeding value config nr %d through channel. %d", op.Num, seq)
}
} else {
opreq.replyChan <- Config{}
}
case <-time.After(50 * time.Millisecond):
sm.Logf("Ping")
seq = sm.ping(seq)
}
sm.Logf("Calling Done(%d)", seq-2)
sm.px.Done(seq - 1)
}
}
//Takes the last non-applied seq and returns the new one
func (sm *ShardMaster) ping(seq int) int {
//TODO: Is this a good dummy OP?
dummyOp := Op{}
for !sm.isdead() {
fate, val := sm.px.Status(seq)
if fate == paxos.Decided {
sm.applyOp(val.(Op))
seq++
continue
}
if sm.px.Max() > seq && seq > sm.lastDummySeq | else {
return seq
}
}
sm.Logf("ERRRRORR: Ping fallthrough, we are dying! Return seq -1 ")
return -1
}
func (sm *ShardMaster) addToPaxos(seq int, op Op) (retseq int) {
for !sm.isdead() {
//Suggest OP as next seq
sm.px.Start(seq, op)
val, err := sm.waitForPaxos(seq)
if err != nil {
sm.Logf("ERRRROROOROROO!!!")
continue
}
sm.applyOp(val.(Op))
seq++
//Did work?
if val.(Op).OpID == op.OpID {
sm.Logf("Applied operation in log at seq %d", seq-1)
return seq
} else {
sm.Logf("Somebody else took seq %d before us, applying it and trying again", seq-1)
}
}
return -1
}
func (sm *ShardMaster) waitForPaxos(seq int) (val interface{}, err error) {
var status paxos.Fate
to := 10 * time.Millisecond
for {
status, val = sm.px.Status(seq)
if status == paxos.Decided {
err = nil
return
}
if status == paxos.Forgotten || sm.isdead() {
err = fmt.Errorf("We are dead or waiting for something forgotten. Server shutting down?")
sm.Logf("We are dead or waiting for something forgotten. Server shutting down?")
return
}
sm.Logf("Still waiting for paxos: %d", seq)
time.Sleep(to)
if to < 3*time.Second {
to *= 2
} else {
err = fmt.Errorf("Wait for paxos timeout!1")
return
}
}
}
func (sm *ShardMaster) applyOp(op Op) {
sm.Logf("Applying op to database")
switch op.Type {
case JoinOp:
sm.Logf("Join, you guys!")
sm.ApplyJoin(op.GID, op.Servers)
case LeaveOp:
sm.ApplyLeave(op.GID)
sm.Logf("Leave op applied!")
case MoveOp:
sm.ApplyMove(op.GID, op.Shard)
sm.Logf("Move op applied!")
case QueryOp:
//Do nothing
case Dummy:
//Do nothing
}
}
func (sm *ShardMaster) ApplyMove(GID int64, Shard int) {
newConfig := sm.makeNewConfig()
newConfig.Shards[Shard] = GID
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyJoin(GID int64, Servers []string) {
sm.activeGIDs[GID] = true
newConfig := sm.makeNewConfig()
newConfig.Groups[GID] = Servers
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) ApplyLeave(GID int64) {
delete(sm.activeGIDs, GID)
newConfig := sm.makeNewConfig()
delete(newConfig.Groups, GID)
for i, group := range newConfig.Shards {
if group == GID {
newConfig.Shards[i] = 0 //Set to invalid group. Will be distributed by the rebalance
}
}
sm.rebalanceShards(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) rebalanceShards(newConfig *Config) {
nShards := len(newConfig.Shards)
nGroups := 0
for _, _ = range newConfig.Groups {
nGroups++
}
groupToShards := make(map[int64][]int)
groupToShards[0] = []int{}
for GUID, _ := range sm.activeGIDs {
groupToShards[GUID] = []int{}
}
for i, v := range newConfig.Shards {
GUID := v
groupToShards[GUID] = append(groupToShards[GUID], i)
}
minGUID, minGroupSize := getMin(groupToShards)
maxGUID, maxGroupSize := getMax(groupToShards)
minShardsPerGroup := nShards / nGroups
maxShardsPerGroup := minShardsPerGroup
if nShards%nGroups > 0 {
maxShardsPerGroup += 1
}
for len(groupToShards[0]) > 0 || minGroupSize < minShardsPerGroup || maxGroupSize > maxShardsPerGroup {
sm.Logf("Rebalance iteration! ")
sm.Logf("%d > 0! ", len(groupToShards[0]))
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
shardsInInvalidGroup := len(groupToShards[0])
if shardsInInvalidGroup > 0 {
for i := 0; i < shardsInInvalidGroup; i++ {
sm.Logf("Rebalance 0 iteration!")
moveshard := groupToShards[0][0] //Remove the first on
groupToShards[0] = sliceDel(groupToShards[0], 0)
minGUID, _ := getMin(groupToShards)
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
continue
}
minGUID, minGroupSize = getMin(groupToShards)
maxGUID, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d (GUID: %d) ", minGroupSize, minGUID)
sm.Logf("max %d (GUID: %d) ", maxGroupSize, maxGUID)
maxCanGive := maxGroupSize - minShardsPerGroup
minNeeds := minShardsPerGroup - minGroupSize
shardsToMove := minNeeds
if maxCanGive < minNeeds {
shardsToMove = maxCanGive
}
sm.Logf("Moving %d shards: minNeeds %d, maxCanGive %d!", shardsToMove, minNeeds, maxCanGive)
for i := 0; i < shardsToMove; i++ {
moveshard := groupToShards[maxGUID][i]
groupToShards[minGUID] = append(groupToShards[minGUID], moveshard)
groupToShards[maxGUID] = sliceDel(groupToShards[maxGUID], i)
newConfig.Shards[moveshard] = minGUID
sm.Logf("Moving shard %d to group %d", moveshard, minGUID)
}
_, minGroupSize = getMin(groupToShards)
_, maxGroupSize = getMax(groupToShards)
sm.Logf("min %d < %d (GUID: %d)! ", minGroupSize, minShardsPerGroup, minGUID)
sm.Logf("max %d > %d!(GUID: %d) ", maxGroupSize, maxShardsPerGroup, maxGUID)
}
}
func getMax(groupToShards map[int64][]int) (GUID int64, nShards int) {
for guid, shards := range groupToShards {
if guid == 0 {
//GUID 0 is invalid and should not be counted
continue
}
if len(shards) >= nShards {
GUID = guid
nShards = len(shards)
}
}
return
}
func getMin(groupToShards map[int64][]int) (GUID int64, nShards int) {
nShards = 2147483647 //Max signed int32 int is 32 or 64, so this will fit
for guid, shards | {
sm.px.Start(seq, dummyOp)
sm.waitForPaxos(seq)
sm.lastDummySeq = seq
} | conditional_block |
scripts_LA_2.py | 3()
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
links = process_links(graph, node, features, in_order=True)
names = ['capacity', 'length', 'fftt']
color = 3 * (linkToCity[:, 1] == city)
color = color + 10 * (features[:, 0] > 900.)
weight = (features[:, 0] <= 900.) + 3. * (features[:, 0] > 900.)
geojson_link(links, names, color, weight)
def process_LA_net_attack(thres, beta):
process_net_attack('data/LA_net.txt',
'data/LA_net_attack.csv', thres, beta)
def load_LA_4():
graph = np.loadtxt('data/LA_net_attack.csv', delimiter=',', skiprows=1)
demand = np.loadtxt('data/LA_od_3.csv', delimiter=',', skiprows=1)
node = np.loadtxt('data/LA_node.csv', delimiter=',')
# features = table in the format [[capacity, length, FreeFlowTime]]
features = extract_features('data/LA_net.txt')
# increase capacities of these two links because they have a travel time
# in equilibrium that that is too big
features[10787, 0] = features[10787, 0] * 1.5
graph[10787, -1] = graph[10787, -1] / (1.5**4)
features[3348, :] = features[3348, :] * 1.2
graph[3348, -1] = graph[3348, -1] / (1.2**4)
# divide demand going to node 106 by 10 because too large
for i in range(demand.shape[0]):
if demand[i, 1] == 106.:
demand[i, 2] = demand[i, 2] / 10.
return graph, demand, node, features
def LA_parametric_study_attack(alphas, thres, betas):
for beta in betas:
net2, d, node, features = LA_metrics_attacks_all(beta, thres)
parametric_study_3(alphas, beta, net2, d, node, features,
1000., 3000., 'data/LA/test_attack_{}_{}.csv',
stop=1e-2)
# beta is the coefficient of reduction of capacity: capacity = beta*capacity
# load_LA_4() loads the modified network
def LA_metrics_attack(alphas, input, output, beta):
net, d, node, features = load_LA_4()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
net2, small_capacity = multiply_cognitive_cost(
net, features, beta, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attack_2(alphas, input, output, thres, beta):
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(net, features, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attacks_city(beta, thres, city):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# extract the mapping from links to cities
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
print linkToCity
links_affected = np.logical_and(
linkToCity[:, 1] == city, features[:, 0] < thres)
print np.sum(links_affected)
# modify all small capacity links in GLendale
net2 = modify_capacity(net, links_affected, beta)
print net2
def LA_metrics_attacks_all(beta, thres):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# modify all small capacity links
links_affected = (features[:, 0] < thres)
net2 = modify_capacity(net, links_affected, beta)
return net2, d, node, features
def compute_metrics_beta(alpha, beta, f, net, d, feat, subset, out, row,
fs=None, net2=None,
length_unit='Mile', time_unit='Minute'):
'''
Save in the numpy array 'out' at the specific 'row' the following metrics
- average cost for non-routed
- average cost for routed
- average cost
- average cost on a subset (e.g. local routes)
- average cost outside of a subset (e.g. non-local routes)
- total gas emissions
- total gas emissions on a subset (e.g. local routes)
- total gas emissions outside of a subset (e.g. non-local routes)
- total flow in the network
- total flow in the network on a subset (e.g. local routes)
- total flow in the network outside of a subset (e.g. non-local routes)
'''
if length_unit == 'Meter':
lengths = feat[:, 1] / 1609.34 # convert into miles
elif length_unit == 'Mile':
lengths = feat[:, 1]
if time_unit == 'Minute':
|
elif time_unit == 'Second':
a = 3600.
b = 60. / a
speed = a * np.divide(lengths, np.maximum(cost(f, net), 10e-8))
co2 = np.multiply(gas_emission(speed), lengths)
out[row, 0] = alpha
out[row, 1] = beta
out[row, 4] = b * average_cost(f, net, d)
out[row, 5] = b * average_cost_subset(f, net, d, subset)
out[row, 6] = out[row, 3] - out[row, 4]
out[row, 7] = co2.dot(f) / f.dot(lengths)
out[row, 8] = np.multiply(co2, subset).dot(f) / f.dot(lengths)
out[row, 9] = out[row, 6] - out[row, 7]
out[row, 10] = np.sum(np.multiply(f, lengths)) * 4000.
out[row, 11] = np.sum(np.multiply(np.multiply(f, lengths), subset)) * 4000.
out[row, 12] = out[row, 9] - out[row, 10]
if alpha == 0.0:
out[row, 2] = b * average_cost(f, net, d)
out[row, 3] = b * average_cost_all_or_nothing(f, net, d)
return
if alpha == 1.0:
L = all_or_nothing_assignment(cost(f, net2), net, d)
out[row, 2] = b * cost(f, net).dot(L) / np.sum(d[:, 2])
out[row, 3] = b * average_cost(f, net, d)
return
out[row, 2] = b * cost(f, net).dot(fs[:, 0]) / \
np.sum((1 - alpha) * d[:, 2])
out[row, 3] = b * cost(f, net).dot(fs[:, 1]) / np.sum(alpha * d[:, 2])
def save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=0,
length_unit='Mile', time_unit='Minute'):
out = np.zeros((len(alphas) * len(betas), 13))
for beta in betas:
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(
net, features, 1000., 3000.)
subset = small_capacity
a = 0
if alphas[0] == 0.0:
alpha = 0.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[0], alphas[0])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows= | a = 60.0 | conditional_block |
scripts_LA_2.py | 3()
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
links = process_links(graph, node, features, in_order=True)
names = ['capacity', 'length', 'fftt']
color = 3 * (linkToCity[:, 1] == city)
color = color + 10 * (features[:, 0] > 900.)
weight = (features[:, 0] <= 900.) + 3. * (features[:, 0] > 900.)
geojson_link(links, names, color, weight)
def process_LA_net_attack(thres, beta):
process_net_attack('data/LA_net.txt',
'data/LA_net_attack.csv', thres, beta)
def load_LA_4():
graph = np.loadtxt('data/LA_net_attack.csv', delimiter=',', skiprows=1)
demand = np.loadtxt('data/LA_od_3.csv', delimiter=',', skiprows=1)
node = np.loadtxt('data/LA_node.csv', delimiter=',')
# features = table in the format [[capacity, length, FreeFlowTime]]
features = extract_features('data/LA_net.txt')
# increase capacities of these two links because they have a travel time
# in equilibrium that that is too big
features[10787, 0] = features[10787, 0] * 1.5
graph[10787, -1] = graph[10787, -1] / (1.5**4)
features[3348, :] = features[3348, :] * 1.2
graph[3348, -1] = graph[3348, -1] / (1.2**4)
# divide demand going to node 106 by 10 because too large
for i in range(demand.shape[0]):
if demand[i, 1] == 106.:
demand[i, 2] = demand[i, 2] / 10.
return graph, demand, node, features
def LA_parametric_study_attack(alphas, thres, betas):
for beta in betas:
net2, d, node, features = LA_metrics_attacks_all(beta, thres)
parametric_study_3(alphas, beta, net2, d, node, features,
1000., 3000., 'data/LA/test_attack_{}_{}.csv',
stop=1e-2)
# beta is the coefficient of reduction of capacity: capacity = beta*capacity
# load_LA_4() loads the modified network
def LA_metrics_attack(alphas, input, output, beta):
net, d, node, features = load_LA_4()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
net2, small_capacity = multiply_cognitive_cost(
net, features, beta, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attack_2(alphas, input, output, thres, beta):
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(net, features, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attacks_city(beta, thres, city):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# extract the mapping from links to cities
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
print linkToCity
links_affected = np.logical_and(
linkToCity[:, 1] == city, features[:, 0] < thres)
print np.sum(links_affected)
# modify all small capacity links in GLendale
net2 = modify_capacity(net, links_affected, beta)
print net2
def LA_metrics_attacks_all(beta, thres):
|
def compute_metrics_beta(alpha, beta, f, net, d, feat, subset, out, row,
fs=None, net2=None,
length_unit='Mile', time_unit='Minute'):
'''
Save in the numpy array 'out' at the specific 'row' the following metrics
- average cost for non-routed
- average cost for routed
- average cost
- average cost on a subset (e.g. local routes)
- average cost outside of a subset (e.g. non-local routes)
- total gas emissions
- total gas emissions on a subset (e.g. local routes)
- total gas emissions outside of a subset (e.g. non-local routes)
- total flow in the network
- total flow in the network on a subset (e.g. local routes)
- total flow in the network outside of a subset (e.g. non-local routes)
'''
if length_unit == 'Meter':
lengths = feat[:, 1] / 1609.34 # convert into miles
elif length_unit == 'Mile':
lengths = feat[:, 1]
if time_unit == 'Minute':
a = 60.0
elif time_unit == 'Second':
a = 3600.
b = 60. / a
speed = a * np.divide(lengths, np.maximum(cost(f, net), 10e-8))
co2 = np.multiply(gas_emission(speed), lengths)
out[row, 0] = alpha
out[row, 1] = beta
out[row, 4] = b * average_cost(f, net, d)
out[row, 5] = b * average_cost_subset(f, net, d, subset)
out[row, 6] = out[row, 3] - out[row, 4]
out[row, 7] = co2.dot(f) / f.dot(lengths)
out[row, 8] = np.multiply(co2, subset).dot(f) / f.dot(lengths)
out[row, 9] = out[row, 6] - out[row, 7]
out[row, 10] = np.sum(np.multiply(f, lengths)) * 4000.
out[row, 11] = np.sum(np.multiply(np.multiply(f, lengths), subset)) * 4000.
out[row, 12] = out[row, 9] - out[row, 10]
if alpha == 0.0:
out[row, 2] = b * average_cost(f, net, d)
out[row, 3] = b * average_cost_all_or_nothing(f, net, d)
return
if alpha == 1.0:
L = all_or_nothing_assignment(cost(f, net2), net, d)
out[row, 2] = b * cost(f, net).dot(L) / np.sum(d[:, 2])
out[row, 3] = b * average_cost(f, net, d)
return
out[row, 2] = b * cost(f, net).dot(fs[:, 0]) / \
np.sum((1 - alpha) * d[:, 2])
out[row, 3] = b * cost(f, net).dot(fs[:, 1]) / np.sum(alpha * d[:, 2])
def save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=0,
length_unit='Mile', time_unit='Minute'):
out = np.zeros((len(alphas) * len(betas), 13))
for beta in betas:
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(
net, features, 1000., 3000.)
subset = small_capacity
a = 0
if alphas[0] == 0.0:
alpha = 0.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[0], alphas[0])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows= | net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# modify all small capacity links
links_affected = (features[:, 0] < thres)
net2 = modify_capacity(net, links_affected, beta)
return net2, d, node, features | identifier_body |
scripts_LA_2.py | 3()
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
links = process_links(graph, node, features, in_order=True)
names = ['capacity', 'length', 'fftt']
color = 3 * (linkToCity[:, 1] == city)
color = color + 10 * (features[:, 0] > 900.)
weight = (features[:, 0] <= 900.) + 3. * (features[:, 0] > 900.)
geojson_link(links, names, color, weight)
def process_LA_net_attack(thres, beta):
process_net_attack('data/LA_net.txt',
'data/LA_net_attack.csv', thres, beta)
def load_LA_4():
graph = np.loadtxt('data/LA_net_attack.csv', delimiter=',', skiprows=1)
demand = np.loadtxt('data/LA_od_3.csv', delimiter=',', skiprows=1)
node = np.loadtxt('data/LA_node.csv', delimiter=',')
# features = table in the format [[capacity, length, FreeFlowTime]]
features = extract_features('data/LA_net.txt')
# increase capacities of these two links because they have a travel time
# in equilibrium that that is too big
features[10787, 0] = features[10787, 0] * 1.5
graph[10787, -1] = graph[10787, -1] / (1.5**4)
features[3348, :] = features[3348, :] * 1.2
graph[3348, -1] = graph[3348, -1] / (1.2**4)
# divide demand going to node 106 by 10 because too large
for i in range(demand.shape[0]):
if demand[i, 1] == 106.:
demand[i, 2] = demand[i, 2] / 10.
return graph, demand, node, features
def LA_parametric_study_attack(alphas, thres, betas):
for beta in betas:
net2, d, node, features = LA_metrics_attacks_all(beta, thres)
parametric_study_3(alphas, beta, net2, d, node, features,
1000., 3000., 'data/LA/test_attack_{}_{}.csv',
stop=1e-2)
# beta is the coefficient of reduction of capacity: capacity = beta*capacity
# load_LA_4() loads the modified network
def LA_metrics_attack(alphas, input, output, beta):
net, d, node, features = load_LA_4()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
net2, small_capacity = multiply_cognitive_cost(
net, features, beta, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def | (alphas, input, output, thres, beta):
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(net, features, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attacks_city(beta, thres, city):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# extract the mapping from links to cities
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
print linkToCity
links_affected = np.logical_and(
linkToCity[:, 1] == city, features[:, 0] < thres)
print np.sum(links_affected)
# modify all small capacity links in GLendale
net2 = modify_capacity(net, links_affected, beta)
print net2
def LA_metrics_attacks_all(beta, thres):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# modify all small capacity links
links_affected = (features[:, 0] < thres)
net2 = modify_capacity(net, links_affected, beta)
return net2, d, node, features
def compute_metrics_beta(alpha, beta, f, net, d, feat, subset, out, row,
fs=None, net2=None,
length_unit='Mile', time_unit='Minute'):
'''
Save in the numpy array 'out' at the specific 'row' the following metrics
- average cost for non-routed
- average cost for routed
- average cost
- average cost on a subset (e.g. local routes)
- average cost outside of a subset (e.g. non-local routes)
- total gas emissions
- total gas emissions on a subset (e.g. local routes)
- total gas emissions outside of a subset (e.g. non-local routes)
- total flow in the network
- total flow in the network on a subset (e.g. local routes)
- total flow in the network outside of a subset (e.g. non-local routes)
'''
if length_unit == 'Meter':
lengths = feat[:, 1] / 1609.34 # convert into miles
elif length_unit == 'Mile':
lengths = feat[:, 1]
if time_unit == 'Minute':
a = 60.0
elif time_unit == 'Second':
a = 3600.
b = 60. / a
speed = a * np.divide(lengths, np.maximum(cost(f, net), 10e-8))
co2 = np.multiply(gas_emission(speed), lengths)
out[row, 0] = alpha
out[row, 1] = beta
out[row, 4] = b * average_cost(f, net, d)
out[row, 5] = b * average_cost_subset(f, net, d, subset)
out[row, 6] = out[row, 3] - out[row, 4]
out[row, 7] = co2.dot(f) / f.dot(lengths)
out[row, 8] = np.multiply(co2, subset).dot(f) / f.dot(lengths)
out[row, 9] = out[row, 6] - out[row, 7]
out[row, 10] = np.sum(np.multiply(f, lengths)) * 4000.
out[row, 11] = np.sum(np.multiply(np.multiply(f, lengths), subset)) * 4000.
out[row, 12] = out[row, 9] - out[row, 10]
if alpha == 0.0:
out[row, 2] = b * average_cost(f, net, d)
out[row, 3] = b * average_cost_all_or_nothing(f, net, d)
return
if alpha == 1.0:
L = all_or_nothing_assignment(cost(f, net2), net, d)
out[row, 2] = b * cost(f, net).dot(L) / np.sum(d[:, 2])
out[row, 3] = b * average_cost(f, net, d)
return
out[row, 2] = b * cost(f, net).dot(fs[:, 0]) / \
np.sum((1 - alpha) * d[:, 2])
out[row, 3] = b * cost(f, net).dot(fs[:, 1]) / np.sum(alpha * d[:, 2])
def save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=0,
length_unit='Mile', time_unit='Minute'):
out = np.zeros((len(alphas) * len(betas), 13))
for beta in betas:
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(
net, features, 1000., 3000.)
subset = small_capacity
a = 0
if alphas[0] == 0.0:
alpha = 0.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[0], alphas[0])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows | LA_metrics_attack_2 | identifier_name |
scripts_LA_2.py | features = extract_features('data/LA_net.txt')
# increase capacities of these two links because they have a travel time
# in equilibrium that that is too big
features[10787, 0] = features[10787, 0] * 1.5
graph[10787, -1] = graph[10787, -1] / (1.5**4)
features[3348, :] = features[3348, :] * 1.2
graph[3348, -1] = graph[3348, -1] / (1.2**4)
# divide demand going to node 106 by 10 because too large
for i in range(demand.shape[0]):
if demand[i, 1] == 106.:
demand[i, 2] = demand[i, 2] / 10.
return graph, demand, node, features
def LA_parametric_study_attack(alphas, thres, betas):
for beta in betas:
net2, d, node, features = LA_metrics_attacks_all(beta, thres)
parametric_study_3(alphas, beta, net2, d, node, features,
1000., 3000., 'data/LA/test_attack_{}_{}.csv',
stop=1e-2)
# beta is the coefficient of reduction of capacity: capacity = beta*capacity
# load_LA_4() loads the modified network
def LA_metrics_attack(alphas, input, output, beta):
net, d, node, features = load_LA_4()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
net2, small_capacity = multiply_cognitive_cost(
net, features, beta, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attack_2(alphas, input, output, thres, beta):
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(net, features, 1000., 3000.)
save_metrics(alphas, net, net2, d, features, small_capacity, input,
output, skiprows=1,
length_unit='Meter', time_unit='Second')
def LA_metrics_attacks_city(beta, thres, city):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# extract the mapping from links to cities
linkToCity = np.genfromtxt('data/LA/link_to_cities.csv', delimiter=',',
skiprows=1, dtype='str')
print linkToCity
links_affected = np.logical_and(
linkToCity[:, 1] == city, features[:, 0] < thres)
print np.sum(links_affected)
# modify all small capacity links in GLendale
net2 = modify_capacity(net, links_affected, beta)
print net2
def LA_metrics_attacks_all(beta, thres):
net, d, node, features = load_LA_3()
# import pdb; pdb.set_trace()
d[:, 2] = d[:, 2] / 4000.
# modify all small capacity links
links_affected = (features[:, 0] < thres)
net2 = modify_capacity(net, links_affected, beta)
return net2, d, node, features
def compute_metrics_beta(alpha, beta, f, net, d, feat, subset, out, row,
fs=None, net2=None,
length_unit='Mile', time_unit='Minute'):
'''
Save in the numpy array 'out' at the specific 'row' the following metrics
- average cost for non-routed
- average cost for routed
- average cost
- average cost on a subset (e.g. local routes)
- average cost outside of a subset (e.g. non-local routes)
- total gas emissions
- total gas emissions on a subset (e.g. local routes)
- total gas emissions outside of a subset (e.g. non-local routes)
- total flow in the network
- total flow in the network on a subset (e.g. local routes)
- total flow in the network outside of a subset (e.g. non-local routes)
'''
if length_unit == 'Meter':
lengths = feat[:, 1] / 1609.34 # convert into miles
elif length_unit == 'Mile':
lengths = feat[:, 1]
if time_unit == 'Minute':
a = 60.0
elif time_unit == 'Second':
a = 3600.
b = 60. / a
speed = a * np.divide(lengths, np.maximum(cost(f, net), 10e-8))
co2 = np.multiply(gas_emission(speed), lengths)
out[row, 0] = alpha
out[row, 1] = beta
out[row, 4] = b * average_cost(f, net, d)
out[row, 5] = b * average_cost_subset(f, net, d, subset)
out[row, 6] = out[row, 3] - out[row, 4]
out[row, 7] = co2.dot(f) / f.dot(lengths)
out[row, 8] = np.multiply(co2, subset).dot(f) / f.dot(lengths)
out[row, 9] = out[row, 6] - out[row, 7]
out[row, 10] = np.sum(np.multiply(f, lengths)) * 4000.
out[row, 11] = np.sum(np.multiply(np.multiply(f, lengths), subset)) * 4000.
out[row, 12] = out[row, 9] - out[row, 10]
if alpha == 0.0:
out[row, 2] = b * average_cost(f, net, d)
out[row, 3] = b * average_cost_all_or_nothing(f, net, d)
return
if alpha == 1.0:
L = all_or_nothing_assignment(cost(f, net2), net, d)
out[row, 2] = b * cost(f, net).dot(L) / np.sum(d[:, 2])
out[row, 3] = b * average_cost(f, net, d)
return
out[row, 2] = b * cost(f, net).dot(fs[:, 0]) / \
np.sum((1 - alpha) * d[:, 2])
out[row, 3] = b * cost(f, net).dot(fs[:, 1]) / np.sum(alpha * d[:, 2])
def save_metrics_beta_LA(alphas, betas, thres, input, output, skiprows=0,
length_unit='Mile', time_unit='Minute'):
out = np.zeros((len(alphas) * len(betas), 13))
for beta in betas:
net, d, node, features = LA_metrics_attacks_all(beta, thres)
net2, small_capacity = multiply_cognitive_cost(
net, features, 1000., 3000.)
subset = small_capacity
a = 0
if alphas[0] == 0.0:
alpha = 0.0
print 'compute for nr = {}, r = {}'.format(
1 - alphas[0], alphas[0])
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(0.0, beta, f, net, d, features,
subset, out, 0,
length_unit=length_unit, time_unit=time_unit)
a = 1
b = 1 if alphas[-1] == 1.0 else 0
for i, alpha in enumerate(alphas[a:len(alphas) - b]):
print 'compute for nr = {}, r = {}'.format(1 - alpha, alpha)
fs = np.loadtxt(input.format(int(alpha * 100),
int(beta * 100)), delimiter=',',
skiprows=skiprows)
f = np.sum(fs, axis=1)
compute_metrics_beta(alpha, beta, f, net, d, features, subset,
out, i + a, fs=fs,
length_unit=length_unit, time_unit=time_unit)
if alphas[-1] == 1.0:
alpha = 1.0 | print 'compute for nr = {}, r = {}'.format(
1 - alphas[-1], alphas[-1])
fs = np.loadtxt(input.format(int(alpha * 100), | random_line_split | |
align.rs | }
fn align(&self, x: &[u8], y: &[u8], mode: InternalMode) -> Vec<Op> {
if x[..] == y[..] {
return vec![Op::Match; x.len()];
}
if self.band == Banded::Normal {
RustBio.align(self, mode, x, y)
} else {
align_banded(self, mode, x, y)
}
}
/// Aligns x to y as a whole
fn align_whole(
&self,
x: FileContent,
y: FileContent,
mode: InternalMode,
sender: Sender<AlignedMessage>,
) {
let alignment = self.align(&x, &y, mode);
let _ = sender.send(AlignedMessage::Append(
AlignElement::from_array(&alignment, &x, &y, 0, 0).0,
));
}
fn align_with_selection(
&self,
files: [FileContent; 2],
selection: (Range<usize>, bool),
end: bool,
sender: Sender<AlignedMessage>,
) {
let (select, right) = selection;
let full_pattern = &files[right as usize].clone();
let pattern = &files[right as usize].clone()[select.clone()];
let text = &files[(!right) as usize].clone()[..];
let alignment = self.align(pattern, text, InternalMode::Semiglobal);
let (alignment, textaddr) = ops_pattern_subrange(&alignment);
let (mut array, pattern_end, text_end) =
AlignElement::from_array(alignment, full_pattern, text, select.start, textaddr);
let (start_addr, end_addr) = if right {
array.iter_mut().for_each(|x| *x = x.mirror());
((textaddr, select.start), (text_end, pattern_end))
} else {
((select.start, textaddr), (pattern_end, text_end))
};
let (prepend, append) = if end {
let ap = array.pop().into_iter().collect();
(array, ap)
} else {
(Vec::new(), array)
};
if sender.send(AlignedMessage::Append(append)).is_err() {
return;
}
if sender.send(AlignedMessage::Prepend(prepend)).is_err() {
return;
}
let blocksize = if let AlignMode::Blockwise(s) = self.mode {
s
} else {
usize::MAX
};
let files2 = files.clone();
let sender2 = sender.clone();
let algo = *self;
std::thread::spawn(move || {
algo.align_end(
files2[0].clone(),
files2[1].clone(),
end_addr,
blocksize,
sender2,
);
});
self.align_front(
files[0].clone(),
files[1].clone(),
start_addr,
blocksize,
sender,
);
}
/// Blockwise alignment in the ascending address direction
pub fn align_end(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
// we want to have the beginning of our two arrays aligned at the same place
// since we start from a previous alignment or a cursor
while xaddr < x.len() && yaddr < y.len() {
// align at most block_size bytes from each sequence
let end_aligned = self.align(
&x[xaddr..(xaddr + block_size).min(x.len())],
&y[yaddr..(yaddr + block_size).min(y.len())],
self.mode.into(),
);
// we only actually append at most half of the block size since we make sure gaps crossing
// block boundaries are better detected
let ops = &end_aligned[0..end_aligned.len().min(block_size / 2)];
// we will not progress like this, so might as well quit
if ops.is_empty() {
break;
}
let (end, new_xaddr, new_yaddr) = AlignElement::from_array(ops, &x, &y, xaddr, yaddr);
if sender.send(AlignedMessage::Append(end)).is_err() {
return;
}
xaddr = new_xaddr;
yaddr = new_yaddr;
}
let clip = if x.len() == xaddr {
Op::Yclip(y.len() - yaddr)
} else if y.len() == yaddr {
Op::Xclip(x.len() - xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, xaddr, yaddr).0;
let _ = sender.send(AlignedMessage::Append(leftover));
}
/// Same as align_end, but in the other direction
pub fn align_front(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
while xaddr > 0 && yaddr > 0 {
let lower_xaddr = xaddr.saturating_sub(block_size);
let lower_yaddr = yaddr.saturating_sub(block_size);
let aligned = self.align(
&x[lower_xaddr..xaddr],
&y[lower_yaddr..yaddr],
self.mode.into(),
);
// unlike in align_end, we create the Alignelement from the whole array and then cut it
// in half. This is because the addresses returned from from_array are at the end, which
// we already know, so we instead take the start addresses from the array itself
let (end, _, _) = AlignElement::from_array(&aligned, &x, &y, lower_xaddr, lower_yaddr);
let real_end = Vec::from(&end[end.len().saturating_sub(block_size / 2)..end.len()]);
// if this is empty, we will not progress, so send the leftover out and quit after that
if real_end.is_empty() {
break;
}
let first = real_end.first().unwrap();
xaddr = first.xaddr;
yaddr = first.yaddr;
if sender.send(AlignedMessage::Prepend(real_end)).is_err() {
return;
}
}
let clip = if xaddr == 0 {
Op::Yclip(yaddr)
} else if yaddr == 0 {
Op::Xclip(xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, 0, 0).0;
let _ = sender.send(AlignedMessage::Prepend(leftover));
}
}
/// Representation of the alignment that saves the original addresses of the bytes.
/// This has some space overhead, but alignment is slow enough for that not to matter in most cases.
#[derive(Clone, Copy, Debug)]
pub struct AlignElement {
pub xaddr: usize,
pub xbyte: Option<u8>,
pub yaddr: usize,
pub ybyte: Option<u8>,
}
impl AlignElement {
/// mirrors the values
pub fn mirror(&self) -> AlignElement {
AlignElement {
xaddr: self.yaddr,
xbyte: self.ybyte,
yaddr: self.xaddr,
ybyte: self.xbyte,
}
}
/// Creates a vector out of `AlignElement`s from the operations outputted by rust-bio.
/// Also outputs the addresses at the end of the array.
fn from_array(
r: &[Op],
x: &[u8],
y: &[u8],
mut xaddr: usize,
mut yaddr: usize,
) -> (Vec<AlignElement>, usize, usize) {
let mut v = Vec::new();
for op in r {
match op {
Op::Match | Op::Subst => {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: Some(y[yaddr]),
});
xaddr += 1;
yaddr += 1;
}
Op::Ins => {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: None,
});
xaddr += 1;
}
Op::Del => {
v.push(AlignElement {
xaddr,
xbyte: None,
yaddr,
ybyte: Some(y[yaddr]),
});
yaddr += 1;
}
Op::Xclip(size) => {
v.extend((xaddr..xaddr + size).map(|s| AlignElement {
xaddr: s,
xbyte: Some(x[s]),
yaddr,
ybyte: None,
}));
xaddr += size
}
Op::Yclip(size) => {
v.extend((yaddr..yaddr + size).map(|s| AlignElement {
xaddr,
xbyte: None,
yaddr: s,
ybyte: Some(y[s]), | })); | random_line_split | |
align.rs | {
Local,
Global,
Semiglobal,
}
impl From<AlignMode> for InternalMode {
fn from(value: AlignMode) -> Self {
match value {
AlignMode::Local => InternalMode::Local,
AlignMode::Global | AlignMode::Blockwise(_) => InternalMode::Global,
}
}
}
trait Align {
fn align(&self, algo: &AlignAlgorithm, mode: InternalMode, x: &[u8], y: &[u8]) -> Vec<Op>;
}
/// Determines whether to use the banded variant of the algorithm with given k-mer length
/// and window size
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Banded {
Normal,
Banded { kmer: usize, window: usize },
}
/// Contains parameters to run the alignment algorithm with
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct AlignAlgorithm {
pub gap_open: i32,
pub gap_extend: i32,
pub mismatch_score: i32,
pub match_score: i32,
pub mode: AlignMode,
pub band: Banded,
}
impl Default for AlignAlgorithm {
fn default() -> Self {
AlignAlgorithm {
gap_open: -5,
gap_extend: -1,
mismatch_score: -1,
match_score: 1,
mode: AlignMode::Blockwise(DEFAULT_BLOCKSIZE),
band: Banded::Normal,
}
}
}
impl AlignAlgorithm {
/// This function starts the threads for the alignment, which send the data over the sender.
/// It should then immediately return.
pub fn start_align(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
sender: Sender<AlignedMessage>,
) {
let algo = *self;
match self.mode {
AlignMode::Local => {
// we only need one thread
std::thread::spawn(move || algo.align_whole(x, y, InternalMode::Local, sender));
}
AlignMode::Global => {
std::thread::spawn(move || algo.align_whole(x, y, InternalMode::Global, sender));
}
AlignMode::Blockwise(blocksize) => {
// for Blockwise, we need one thread for each direction from the cursor
// Clone the data for the second thread here
let x_cp = x.clone();
let y_cp = y.clone();
let sender_cp = sender.clone();
std::thread::spawn(move || algo.align_end(x, y, addr, blocksize, sender));
std::thread::spawn(move || {
algo.align_front(x_cp, y_cp, addr, blocksize, sender_cp)
});
}
}
}
pub fn start_align_with_selection(
&self,
files: [FileContent; 2],
selection: [Option<Range<usize>>; 2],
addr: [usize; 2],
sender: Sender<AlignedMessage>,
) {
let (selected, right, end) = match selection.clone() {
[None, None] | [Some(_), Some(_)] => {
let [file0, file1] = files;
// if both or none are selected, just do the normal process
return self.start_align(file0, file1, (addr[0], addr[1]), sender);
}
[Some(x), None] | [None, Some(x)] => {
if x.is_empty() {
// selection is empty, does not really make sense to do glocal alignment
let [file0, file1] = files;
return self.start_align(file0, file1, (addr[0], addr[1]), sender);
}
let right = selection[1].is_some();
(
x.clone(),
selection[1].is_some(),
addr[right as usize] != x.start,
)
}
};
let algo = *self;
std::thread::spawn(move || {
algo.align_with_selection(files, (selected, right), end, sender)
});
}
fn align(&self, x: &[u8], y: &[u8], mode: InternalMode) -> Vec<Op> {
if x[..] == y[..] {
return vec![Op::Match; x.len()];
}
if self.band == Banded::Normal {
RustBio.align(self, mode, x, y)
} else {
align_banded(self, mode, x, y)
}
}
/// Aligns x to y as a whole
fn align_whole(
&self,
x: FileContent,
y: FileContent,
mode: InternalMode,
sender: Sender<AlignedMessage>,
) {
let alignment = self.align(&x, &y, mode);
let _ = sender.send(AlignedMessage::Append(
AlignElement::from_array(&alignment, &x, &y, 0, 0).0,
));
}
fn align_with_selection(
&self,
files: [FileContent; 2],
selection: (Range<usize>, bool),
end: bool,
sender: Sender<AlignedMessage>,
) {
let (select, right) = selection;
let full_pattern = &files[right as usize].clone();
let pattern = &files[right as usize].clone()[select.clone()];
let text = &files[(!right) as usize].clone()[..];
let alignment = self.align(pattern, text, InternalMode::Semiglobal);
let (alignment, textaddr) = ops_pattern_subrange(&alignment);
let (mut array, pattern_end, text_end) =
AlignElement::from_array(alignment, full_pattern, text, select.start, textaddr);
let (start_addr, end_addr) = if right {
array.iter_mut().for_each(|x| *x = x.mirror());
((textaddr, select.start), (text_end, pattern_end))
} else {
((select.start, textaddr), (pattern_end, text_end))
};
let (prepend, append) = if end {
let ap = array.pop().into_iter().collect();
(array, ap)
} else {
(Vec::new(), array)
};
if sender.send(AlignedMessage::Append(append)).is_err() {
return;
}
if sender.send(AlignedMessage::Prepend(prepend)).is_err() {
return;
}
let blocksize = if let AlignMode::Blockwise(s) = self.mode {
s
} else {
usize::MAX
};
let files2 = files.clone();
let sender2 = sender.clone();
let algo = *self;
std::thread::spawn(move || {
algo.align_end(
files2[0].clone(),
files2[1].clone(),
end_addr,
blocksize,
sender2,
);
});
self.align_front(
files[0].clone(),
files[1].clone(),
start_addr,
blocksize,
sender,
);
}
/// Blockwise alignment in the ascending address direction
pub fn align_end(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
// we want to have the beginning of our two arrays aligned at the same place
// since we start from a previous alignment or a cursor
while xaddr < x.len() && yaddr < y.len() {
// align at most block_size bytes from each sequence
let end_aligned = self.align(
&x[xaddr..(xaddr + block_size).min(x.len())],
&y[yaddr..(yaddr + block_size).min(y.len())],
self.mode.into(),
);
// we only actually append at most half of the block size since we make sure gaps crossing
// block boundaries are better detected
let ops = &end_aligned[0..end_aligned.len().min(block_size / 2)];
// we will not progress like this, so might as well quit
if ops.is_empty() {
break;
}
let (end, new_xaddr, new_yaddr) = AlignElement::from_array(ops, &x, &y, xaddr, yaddr);
if sender.send(AlignedMessage::Append(end)).is_err() {
return;
}
xaddr = new_xaddr;
yaddr = new_yaddr;
}
let clip = if x.len() == xaddr {
Op::Yclip(y.len() - yaddr)
} else if y.len() == yaddr {
Op::Xclip(x.len() - xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, xaddr, yaddr).0;
let _ = sender.send(AlignedMessage::Append(leftover));
}
/// Same as align_end, but in the other direction
pub fn align_front(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
while x | InternalMode | identifier_name | |
align.rs | addr[0], addr[1]), sender);
}
[Some(x), None] | [None, Some(x)] => {
if x.is_empty() {
// selection is empty, does not really make sense to do glocal alignment
let [file0, file1] = files;
return self.start_align(file0, file1, (addr[0], addr[1]), sender);
}
let right = selection[1].is_some();
(
x.clone(),
selection[1].is_some(),
addr[right as usize] != x.start,
)
}
};
let algo = *self;
std::thread::spawn(move || {
algo.align_with_selection(files, (selected, right), end, sender)
});
}
fn align(&self, x: &[u8], y: &[u8], mode: InternalMode) -> Vec<Op> {
if x[..] == y[..] {
return vec![Op::Match; x.len()];
}
if self.band == Banded::Normal {
RustBio.align(self, mode, x, y)
} else {
align_banded(self, mode, x, y)
}
}
/// Aligns x to y as a whole
fn align_whole(
&self,
x: FileContent,
y: FileContent,
mode: InternalMode,
sender: Sender<AlignedMessage>,
) {
let alignment = self.align(&x, &y, mode);
let _ = sender.send(AlignedMessage::Append(
AlignElement::from_array(&alignment, &x, &y, 0, 0).0,
));
}
fn align_with_selection(
&self,
files: [FileContent; 2],
selection: (Range<usize>, bool),
end: bool,
sender: Sender<AlignedMessage>,
) {
let (select, right) = selection;
let full_pattern = &files[right as usize].clone();
let pattern = &files[right as usize].clone()[select.clone()];
let text = &files[(!right) as usize].clone()[..];
let alignment = self.align(pattern, text, InternalMode::Semiglobal);
let (alignment, textaddr) = ops_pattern_subrange(&alignment);
let (mut array, pattern_end, text_end) =
AlignElement::from_array(alignment, full_pattern, text, select.start, textaddr);
let (start_addr, end_addr) = if right {
array.iter_mut().for_each(|x| *x = x.mirror());
((textaddr, select.start), (text_end, pattern_end))
} else {
((select.start, textaddr), (pattern_end, text_end))
};
let (prepend, append) = if end {
let ap = array.pop().into_iter().collect();
(array, ap)
} else {
(Vec::new(), array)
};
if sender.send(AlignedMessage::Append(append)).is_err() {
return;
}
if sender.send(AlignedMessage::Prepend(prepend)).is_err() {
return;
}
let blocksize = if let AlignMode::Blockwise(s) = self.mode {
s
} else {
usize::MAX
};
let files2 = files.clone();
let sender2 = sender.clone();
let algo = *self;
std::thread::spawn(move || {
algo.align_end(
files2[0].clone(),
files2[1].clone(),
end_addr,
blocksize,
sender2,
);
});
self.align_front(
files[0].clone(),
files[1].clone(),
start_addr,
blocksize,
sender,
);
}
/// Blockwise alignment in the ascending address direction
pub fn align_end(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
// we want to have the beginning of our two arrays aligned at the same place
// since we start from a previous alignment or a cursor
while xaddr < x.len() && yaddr < y.len() {
// align at most block_size bytes from each sequence
let end_aligned = self.align(
&x[xaddr..(xaddr + block_size).min(x.len())],
&y[yaddr..(yaddr + block_size).min(y.len())],
self.mode.into(),
);
// we only actually append at most half of the block size since we make sure gaps crossing
// block boundaries are better detected
let ops = &end_aligned[0..end_aligned.len().min(block_size / 2)];
// we will not progress like this, so might as well quit
if ops.is_empty() {
break;
}
let (end, new_xaddr, new_yaddr) = AlignElement::from_array(ops, &x, &y, xaddr, yaddr);
if sender.send(AlignedMessage::Append(end)).is_err() {
return;
}
xaddr = new_xaddr;
yaddr = new_yaddr;
}
let clip = if x.len() == xaddr {
Op::Yclip(y.len() - yaddr)
} else if y.len() == yaddr {
Op::Xclip(x.len() - xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, xaddr, yaddr).0;
let _ = sender.send(AlignedMessage::Append(leftover));
}
/// Same as align_end, but in the other direction
pub fn align_front(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
while xaddr > 0 && yaddr > 0 {
let lower_xaddr = xaddr.saturating_sub(block_size);
let lower_yaddr = yaddr.saturating_sub(block_size);
let aligned = self.align(
&x[lower_xaddr..xaddr],
&y[lower_yaddr..yaddr],
self.mode.into(),
);
// unlike in align_end, we create the Alignelement from the whole array and then cut it
// in half. This is because the addresses returned from from_array are at the end, which
// we already know, so we instead take the start addresses from the array itself
let (end, _, _) = AlignElement::from_array(&aligned, &x, &y, lower_xaddr, lower_yaddr);
let real_end = Vec::from(&end[end.len().saturating_sub(block_size / 2)..end.len()]);
// if this is empty, we will not progress, so send the leftover out and quit after that
if real_end.is_empty() {
break;
}
let first = real_end.first().unwrap();
xaddr = first.xaddr;
yaddr = first.yaddr;
if sender.send(AlignedMessage::Prepend(real_end)).is_err() {
return;
}
}
let clip = if xaddr == 0 {
Op::Yclip(yaddr)
} else if yaddr == 0 {
Op::Xclip(xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, 0, 0).0;
let _ = sender.send(AlignedMessage::Prepend(leftover));
}
}
/// Representation of the alignment that saves the original addresses of the bytes.
/// This has some space overhead, but alignment is slow enough for that not to matter in most cases.
#[derive(Clone, Copy, Debug)]
pub struct AlignElement {
pub xaddr: usize,
pub xbyte: Option<u8>,
pub yaddr: usize,
pub ybyte: Option<u8>,
}
impl AlignElement {
/// mirrors the values
pub fn mirror(&self) -> AlignElement {
AlignElement {
xaddr: self.yaddr,
xbyte: self.ybyte,
yaddr: self.xaddr,
ybyte: self.xbyte,
}
}
/// Creates a vector out of `AlignElement`s from the operations outputted by rust-bio.
/// Also outputs the addresses at the end of the array.
fn from_array(
r: &[Op],
x: &[u8],
y: &[u8],
mut xaddr: usize,
mut yaddr: usize,
) -> (Vec<AlignElement>, usize, usize) {
let mut v = Vec::new();
for op in r {
match op {
Op::Match | Op::Subst => {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: Some(y[yaddr]),
});
xaddr += 1;
yaddr += 1;
}
Op::Ins => | {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: None,
});
xaddr += 1;
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.