file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
lib.rs | Self::IOError(_) => write!(f, "An IO error happened"),
Self::InvalidHeaderMagic(value) => write!(f, "The header is invalid. It should either be PKDPX or AT4PX. The actual value of this header (in base 10) is {:?}", value),
Self::InvalidDecompressedLength => write!(f, "The decompressed lenght doesn't correspond to what is indicated in the file"),
Self::FileToCompressTooLong(lenght) => write!(f, "The file to compress is too long (real size: {}, max size: 256*256)", lenght)
}
}
}
impl From<io::Error> for PXError {
fn from(err: io::Error) -> Self {
Self::IOError(err)
}
}
#[derive(Debug)]
struct ControlFlags {
value: [u8; 9],
}
impl ControlFlags {
fn new(value: [u8; 9]) -> ControlFlags {
ControlFlags { value }
}
fn find(&self, nb_high: u8) -> Option<usize> {
for v in 0..self.value.len() {
if self.value[v] == nb_high {
return Some(v);
}
}
None
}
}
fn px_read_u16<T: Read>(file: &mut T) -> Result<u16, PXError> {
let mut buf = [0; 2];
file.read_exact(&mut buf)?;
Ok(u16::from_le_bytes(buf))
}
fn px_read_u32<T: Read>(file: &mut T) -> Result<u32, PXError> {
let mut buf = [0; 4];
file.read_exact(&mut buf)?;
Ok(u32::from_le_bytes(buf))
}
fn px_read_u8<T: Read>(file: &mut T) -> Result<u8, PXError> {
let mut buf = [0];
file.read_exact(&mut buf)?;
Ok(buf[0])
}
/// decompress a pkdpx or at4px file. It take as input a Bytes buffer, and return a decompressed buffer (or an error)
///
/// If atomatically determine if it is a pkdpx or an at4px based on the header
/// If the file isn't the good lenght, it check if what is missing is a padding of a sir0. If it isn't, it return an error.
pub fn decompress_px<F: Read + Seek>(mut file: F) -> Result<Vec<u8>, PXError> {
debug!("decompressing a px-compressed file file");
file.seek(SeekFrom::Start(0))?;
let mut header_5 = [0; 5];
file.read_exact(&mut header_5)?;
let container_lenght = px_read_u16(&mut file)?;
let mut control_flags_buffer = [0; 9];
file.read_exact(&mut control_flags_buffer)?;
let control_flags = ControlFlags::new(control_flags_buffer);
if &header_5 == b"PKDPX" {
let decompressed_lenght = px_read_u32(&mut file)?;
Ok(decompress_px_raw(
file,
control_flags,
decompressed_lenght,
container_lenght,
20,
)?)
} else if &header_5 == b"AT4PX" {
let decompressed_lenght = px_read_u16(&mut file)? as u32;
Ok(decompress_px_raw(
file,
control_flags,
decompressed_lenght,
container_lenght,
18,
)?)
} else {
Err(PXError::InvalidHeaderMagic(header_5))
}
}
fn decompress_px_raw<T: Read + Seek>(
mut file: T,
control_flags: ControlFlags,
decompressed_lenght: u32,
container_lenght: u16,
header_lenght: u64,
) -> Result<Vec<u8>, PXError> {
let mut result = Vec::new();
let current_file_position = file.seek(SeekFrom::Current(0))?;
let current_file_len = file.seek(SeekFrom::End(0))?;
let mut raw_file = Partition::new(
file,
current_file_position,
current_file_len - current_file_position,
)
.unwrap();
trace!("starting decompression ...");
'main: loop {
let mut bit_num = 0;
let byte_info = px_read_u8(&mut raw_file)?;
trace!("command byte: 0x{:x}", byte_info);
while bit_num < 8 {
let this_bit = get_bit(byte_info, bit_num).unwrap();
let this_byte = px_read_u8(&mut raw_file)?;
if this_bit {
trace!("bit is 1: pushing 0x{:2x}", this_byte);
result.push(this_byte);
} else {
let nb_high: u8 = this_byte >> 4;
let nb_low: u8 = this_byte << 4 >> 4;
match control_flags.find(nb_high) {
Some(ctrlflagindex) => {
let byte_to_add = match ctrlflagindex {
0 => {
let byte1 = (nb_low << 4) + nb_low;
(byte1, byte1)
}
_ => {
let mut nybbleval = nb_low;
match ctrlflagindex {
1 => nybbleval += 1,
5 => nybbleval -= 1,
_ => (),
};
let mut nybbles = (nybbleval, nybbleval, nybbleval, nybbleval);
match ctrlflagindex {
1 => nybbles.0 -= 1,
2 => nybbles.1 -= 1,
3 => nybbles.2 -= 1,
4 => nybbles.3 -= 1,
5 => nybbles.0 += 1,
6 => nybbles.1 += 1,
7 => nybbles.2 += 1,
8 => nybbles.3 += 1,
_ => panic!(),
}
((nybbles.0 << 4) + nybbles.1, (nybbles.2 << 4) + nybbles.3)
}
};
trace!("bit is 0: ctrlflagindex is {:x}, nb_high is {:x}, nb_low is {:x}, adding 0x{:2x}{:2x}", ctrlflagindex, nb_high, nb_low, byte_to_add.0, byte_to_add.1);
result.push(byte_to_add.0);
result.push(byte_to_add.1);
}
None => {
let new_byte = px_read_u8(&mut raw_file)?;
let offset_rel: i16 =
-0x1000 + (((nb_low as i16) * 256) + (new_byte as i16));
let offset = (offset_rel as i32) + (result.len() as i32);
let lenght = (nb_high as i32) + 3;
trace!("bit is 0: pushing from past, relative offset is {}, lenght is {} (nb_low:{}, nb_high:{}, new_byte:0x{:2x})", offset_rel, lenght, nb_low, nb_high, new_byte);
// the old, good looking code
/*result.seek(offset as u64);
for c in result.read(lenght as u64)? {
result.add_a_byte(c)?;
}*/
//TODO: check for panic
for c in offset..(offset + lenght) {
result.push(result[c as usize])
}
}
}
};
bit_num += 1;
if result.len() >= decompressed_lenght as usize {
break 'main;
};
}
trace!("current output size : {}", result.len());
}
trace!("decoding loop finished.");
trace!(
"expected container lenght: {}, read: {}",
container_lenght,
raw_file.seek(SeekFrom::Current(0))? + 20
);
trace!(
"expected decompressed lenght: {}, real decompressed lenght: {}",
decompressed_lenght,
result.len()
);
if container_lenght as u64 != raw_file.seek(SeekFrom::Current(0))? + header_lenght {
return Err(PXError::InvalidDecompressedLength);
};
Ok(result)
}
/// check if a file is a px-compressed filed (PKDPX or AT4PX) .
/// return true if it is one, false otherwise.
///
/// It doesn't do extensive test and don't guaranty that the file is a valid PKDPX (only check the header)
/// Also doesn't save the position of the cursor in the file
pub fn | <F: Read + Seek>(file: &mut F) -> Result<bool, PXError> {
if file.seek(SeekFrom::End(0))? < 4 {
return Ok(false);
};
file.seek(SeekFrom::Start(0))?;
let mut header_5 = [0; 5];
file.read_exact(&mut header_5)?;
if &header_5 == b"PKDPX" {
return Ok(true);
};
if &header_5 == b"AT4PX | is_px | identifier_name |
lib.rs | Self::IOError(_) => write!(f, "An IO error happened"),
Self::InvalidHeaderMagic(value) => write!(f, "The header is invalid. It should either be PKDPX or AT4PX. The actual value of this header (in base 10) is {:?}", value),
Self::InvalidDecompressedLength => write!(f, "The decompressed lenght doesn't correspond to what is indicated in the file"),
Self::FileToCompressTooLong(lenght) => write!(f, "The file to compress is too long (real size: {}, max size: 256*256)", lenght)
}
}
}
impl From<io::Error> for PXError {
fn from(err: io::Error) -> Self {
Self::IOError(err)
}
}
#[derive(Debug)]
struct ControlFlags {
value: [u8; 9],
}
impl ControlFlags {
fn new(value: [u8; 9]) -> ControlFlags {
ControlFlags { value }
}
fn find(&self, nb_high: u8) -> Option<usize> {
for v in 0..self.value.len() {
if self.value[v] == nb_high {
return Some(v);
}
}
None
}
}
fn px_read_u16<T: Read>(file: &mut T) -> Result<u16, PXError> {
let mut buf = [0; 2];
file.read_exact(&mut buf)?;
Ok(u16::from_le_bytes(buf))
}
fn px_read_u32<T: Read>(file: &mut T) -> Result<u32, PXError> {
let mut buf = [0; 4];
file.read_exact(&mut buf)?;
Ok(u32::from_le_bytes(buf))
}
fn px_read_u8<T: Read>(file: &mut T) -> Result<u8, PXError> {
let mut buf = [0];
file.read_exact(&mut buf)?;
Ok(buf[0])
}
/// decompress a pkdpx or at4px file. It take as input a Bytes buffer, and return a decompressed buffer (or an error)
///
/// If atomatically determine if it is a pkdpx or an at4px based on the header
/// If the file isn't the good lenght, it check if what is missing is a padding of a sir0. If it isn't, it return an error.
pub fn decompress_px<F: Read + Seek>(mut file: F) -> Result<Vec<u8>, PXError> {
debug!("decompressing a px-compressed file file");
file.seek(SeekFrom::Start(0))?;
let mut header_5 = [0; 5];
file.read_exact(&mut header_5)?;
let container_lenght = px_read_u16(&mut file)?;
let mut control_flags_buffer = [0; 9];
file.read_exact(&mut control_flags_buffer)?;
let control_flags = ControlFlags::new(control_flags_buffer);
if &header_5 == b"PKDPX" {
let decompressed_lenght = px_read_u32(&mut file)?;
Ok(decompress_px_raw(
file,
control_flags,
decompressed_lenght,
container_lenght,
20,
)?)
} else if &header_5 == b"AT4PX" {
let decompressed_lenght = px_read_u16(&mut file)? as u32;
Ok(decompress_px_raw(
file,
control_flags,
decompressed_lenght,
container_lenght,
18,
)?)
} else {
Err(PXError::InvalidHeaderMagic(header_5))
}
}
fn decompress_px_raw<T: Read + Seek>(
mut file: T,
control_flags: ControlFlags,
decompressed_lenght: u32,
container_lenght: u16,
header_lenght: u64,
) -> Result<Vec<u8>, PXError> {
let mut result = Vec::new();
let current_file_position = file.seek(SeekFrom::Current(0))?;
let current_file_len = file.seek(SeekFrom::End(0))?;
let mut raw_file = Partition::new(
file,
current_file_position,
current_file_len - current_file_position,
)
.unwrap();
trace!("starting decompression ...");
'main: loop {
let mut bit_num = 0;
let byte_info = px_read_u8(&mut raw_file)?;
trace!("command byte: 0x{:x}", byte_info);
while bit_num < 8 {
let this_bit = get_bit(byte_info, bit_num).unwrap();
let this_byte = px_read_u8(&mut raw_file)?;
if this_bit {
trace!("bit is 1: pushing 0x{:2x}", this_byte);
result.push(this_byte);
} else {
let nb_high: u8 = this_byte >> 4;
let nb_low: u8 = this_byte << 4 >> 4;
match control_flags.find(nb_high) {
Some(ctrlflagindex) => {
let byte_to_add = match ctrlflagindex {
0 => {
let byte1 = (nb_low << 4) + nb_low;
(byte1, byte1)
}
_ => {
let mut nybbleval = nb_low;
match ctrlflagindex {
1 => nybbleval += 1,
5 => nybbleval -= 1,
_ => (),
};
let mut nybbles = (nybbleval, nybbleval, nybbleval, nybbleval);
match ctrlflagindex {
1 => nybbles.0 -= 1,
2 => nybbles.1 -= 1,
3 => nybbles.2 -= 1,
4 => nybbles.3 -= 1,
5 => nybbles.0 += 1,
6 => nybbles.1 += 1,
7 => nybbles.2 += 1,
8 => nybbles.3 += 1,
_ => panic!(),
}
((nybbles.0 << 4) + nybbles.1, (nybbles.2 << 4) + nybbles.3)
}
};
trace!("bit is 0: ctrlflagindex is {:x}, nb_high is {:x}, nb_low is {:x}, adding 0x{:2x}{:2x}", ctrlflagindex, nb_high, nb_low, byte_to_add.0, byte_to_add.1);
result.push(byte_to_add.0);
result.push(byte_to_add.1); | }
None => {
let new_byte = px_read_u8(&mut raw_file)?;
let offset_rel: i16 =
-0x1000 + (((nb_low as i16) * 256) + (new_byte as i16));
let offset = (offset_rel as i32) + (result.len() as i32);
let lenght = (nb_high as i32) + 3;
trace!("bit is 0: pushing from past, relative offset is {}, lenght is {} (nb_low:{}, nb_high:{}, new_byte:0x{:2x})", offset_rel, lenght, nb_low, nb_high, new_byte);
// the old, good looking code
/*result.seek(offset as u64);
for c in result.read(lenght as u64)? {
result.add_a_byte(c)?;
}*/
//TODO: check for panic
for c in offset..(offset + lenght) {
result.push(result[c as usize])
}
}
}
};
bit_num += 1;
if result.len() >= decompressed_lenght as usize {
break 'main;
};
}
trace!("current output size : {}", result.len());
}
trace!("decoding loop finished.");
trace!(
"expected container lenght: {}, read: {}",
container_lenght,
raw_file.seek(SeekFrom::Current(0))? + 20
);
trace!(
"expected decompressed lenght: {}, real decompressed lenght: {}",
decompressed_lenght,
result.len()
);
if container_lenght as u64 != raw_file.seek(SeekFrom::Current(0))? + header_lenght {
return Err(PXError::InvalidDecompressedLength);
};
Ok(result)
}
/// check if a file is a px-compressed filed (PKDPX or AT4PX) .
/// return true if it is one, false otherwise.
///
/// It doesn't do extensive test and don't guaranty that the file is a valid PKDPX (only check the header)
/// Also doesn't save the position of the cursor in the file
pub fn is_px<F: Read + Seek>(file: &mut F) -> Result<bool, PXError> {
if file.seek(SeekFrom::End(0))? < 4 {
return Ok(false);
};
file.seek(SeekFrom::Start(0))?;
let mut header_5 = [0; 5];
file.read_exact(&mut header_5)?;
if &header_5 == b"PKDPX" {
return Ok(true);
};
if &header_5 == b"AT4PX" | random_line_split | |
lib.rs | Self::IOError(_) => write!(f, "An IO error happened"),
Self::InvalidHeaderMagic(value) => write!(f, "The header is invalid. It should either be PKDPX or AT4PX. The actual value of this header (in base 10) is {:?}", value),
Self::InvalidDecompressedLength => write!(f, "The decompressed lenght doesn't correspond to what is indicated in the file"),
Self::FileToCompressTooLong(lenght) => write!(f, "The file to compress is too long (real size: {}, max size: 256*256)", lenght)
}
}
}
impl From<io::Error> for PXError {
fn from(err: io::Error) -> Self {
Self::IOError(err)
}
}
#[derive(Debug)]
struct ControlFlags {
value: [u8; 9],
}
impl ControlFlags {
fn new(value: [u8; 9]) -> ControlFlags {
ControlFlags { value }
}
fn find(&self, nb_high: u8) -> Option<usize> {
for v in 0..self.value.len() {
if self.value[v] == nb_high {
return Some(v);
}
}
None
}
}
fn px_read_u16<T: Read>(file: &mut T) -> Result<u16, PXError> {
let mut buf = [0; 2];
file.read_exact(&mut buf)?;
Ok(u16::from_le_bytes(buf))
}
fn px_read_u32<T: Read>(file: &mut T) -> Result<u32, PXError> {
let mut buf = [0; 4];
file.read_exact(&mut buf)?;
Ok(u32::from_le_bytes(buf))
}
fn px_read_u8<T: Read>(file: &mut T) -> Result<u8, PXError> {
let mut buf = [0];
file.read_exact(&mut buf)?;
Ok(buf[0])
}
/// decompress a pkdpx or at4px file. It take as input a Bytes buffer, and return a decompressed buffer (or an error)
///
/// If atomatically determine if it is a pkdpx or an at4px based on the header
/// If the file isn't the good lenght, it check if what is missing is a padding of a sir0. If it isn't, it return an error.
pub fn decompress_px<F: Read + Seek>(mut file: F) -> Result<Vec<u8>, PXError> | )?)
} else if &header_5 == b"AT4PX" {
let decompressed_lenght = px_read_u16(&mut file)? as u32;
Ok(decompress_px_raw(
file,
control_flags,
decompressed_lenght,
container_lenght,
18,
)?)
} else {
Err(PXError::InvalidHeaderMagic(header_5))
}
}
fn decompress_px_raw<T: Read + Seek>(
mut file: T,
control_flags: ControlFlags,
decompressed_lenght: u32,
container_lenght: u16,
header_lenght: u64,
) -> Result<Vec<u8>, PXError> {
let mut result = Vec::new();
let current_file_position = file.seek(SeekFrom::Current(0))?;
let current_file_len = file.seek(SeekFrom::End(0))?;
let mut raw_file = Partition::new(
file,
current_file_position,
current_file_len - current_file_position,
)
.unwrap();
trace!("starting decompression ...");
'main: loop {
let mut bit_num = 0;
let byte_info = px_read_u8(&mut raw_file)?;
trace!("command byte: 0x{:x}", byte_info);
while bit_num < 8 {
let this_bit = get_bit(byte_info, bit_num).unwrap();
let this_byte = px_read_u8(&mut raw_file)?;
if this_bit {
trace!("bit is 1: pushing 0x{:2x}", this_byte);
result.push(this_byte);
} else {
let nb_high: u8 = this_byte >> 4;
let nb_low: u8 = this_byte << 4 >> 4;
match control_flags.find(nb_high) {
Some(ctrlflagindex) => {
let byte_to_add = match ctrlflagindex {
0 => {
let byte1 = (nb_low << 4) + nb_low;
(byte1, byte1)
}
_ => {
let mut nybbleval = nb_low;
match ctrlflagindex {
1 => nybbleval += 1,
5 => nybbleval -= 1,
_ => (),
};
let mut nybbles = (nybbleval, nybbleval, nybbleval, nybbleval);
match ctrlflagindex {
1 => nybbles.0 -= 1,
2 => nybbles.1 -= 1,
3 => nybbles.2 -= 1,
4 => nybbles.3 -= 1,
5 => nybbles.0 += 1,
6 => nybbles.1 += 1,
7 => nybbles.2 += 1,
8 => nybbles.3 += 1,
_ => panic!(),
}
((nybbles.0 << 4) + nybbles.1, (nybbles.2 << 4) + nybbles.3)
}
};
trace!("bit is 0: ctrlflagindex is {:x}, nb_high is {:x}, nb_low is {:x}, adding 0x{:2x}{:2x}", ctrlflagindex, nb_high, nb_low, byte_to_add.0, byte_to_add.1);
result.push(byte_to_add.0);
result.push(byte_to_add.1);
}
None => {
let new_byte = px_read_u8(&mut raw_file)?;
let offset_rel: i16 =
-0x1000 + (((nb_low as i16) * 256) + (new_byte as i16));
let offset = (offset_rel as i32) + (result.len() as i32);
let lenght = (nb_high as i32) + 3;
trace!("bit is 0: pushing from past, relative offset is {}, lenght is {} (nb_low:{}, nb_high:{}, new_byte:0x{:2x})", offset_rel, lenght, nb_low, nb_high, new_byte);
// the old, good looking code
/*result.seek(offset as u64);
for c in result.read(lenght as u64)? {
result.add_a_byte(c)?;
}*/
//TODO: check for panic
for c in offset..(offset + lenght) {
result.push(result[c as usize])
}
}
}
};
bit_num += 1;
if result.len() >= decompressed_lenght as usize {
break 'main;
};
}
trace!("current output size : {}", result.len());
}
trace!("decoding loop finished.");
trace!(
"expected container lenght: {}, read: {}",
container_lenght,
raw_file.seek(SeekFrom::Current(0))? + 20
);
trace!(
"expected decompressed lenght: {}, real decompressed lenght: {}",
decompressed_lenght,
result.len()
);
if container_lenght as u64 != raw_file.seek(SeekFrom::Current(0))? + header_lenght {
return Err(PXError::InvalidDecompressedLength);
};
Ok(result)
}
/// check if a file is a px-compressed filed (PKDPX or AT4PX) .
/// return true if it is one, false otherwise.
///
/// It doesn't do extensive test and don't guaranty that the file is a valid PKDPX (only check the header)
/// Also doesn't save the position of the cursor in the file
pub fn is_px<F: Read + Seek>(file: &mut F) -> Result<bool, PXError> {
if file.seek(SeekFrom::End(0))? < 4 {
return Ok(false);
};
file.seek(SeekFrom::Start(0))?;
let mut header_5 = [0; 5];
file.read_exact(&mut header_5)?;
if &header_5 == b"PKDPX" {
return Ok(true);
};
if &header_5 == b"AT4PX | {
debug!("decompressing a px-compressed file file");
file.seek(SeekFrom::Start(0))?;
let mut header_5 = [0; 5];
file.read_exact(&mut header_5)?;
let container_lenght = px_read_u16(&mut file)?;
let mut control_flags_buffer = [0; 9];
file.read_exact(&mut control_flags_buffer)?;
let control_flags = ControlFlags::new(control_flags_buffer);
if &header_5 == b"PKDPX" {
let decompressed_lenght = px_read_u32(&mut file)?;
Ok(decompress_px_raw(
file,
control_flags,
decompressed_lenght,
container_lenght,
20, | identifier_body |
contributions-infromation-form.component.ts | .module";
import {CustomStoreService} from "../../shared/services/custom-store.service";
import {CapitalRepairNotifyService} from "../../shared/services/capital-repair-notify.service";
import {ContributionsInformationMistakeService} from "../../shared/services/contributions-information-mistake.service";
import {DxiGroupComponent, DxiGroupModule} from "devextreme-angular/ui/nested";
import {getContent} from "../../shared/tools/contrib-info-act";
import saveAs from "file-saver"
@Component({
selector: 'app-contributions-information-form',
templateUrl: './contributions-information-form.component.html',
styleUrls: ['./contributions-information-form.component.scss']
})
export class ContributionsInfromationFormComponent implements OnInit {
SubmitType = SubmitType;
history: any = {};
file_backend_url = environment.file_url
@ViewChild("form", {static: false}) form: DxFormComponent;
@ViewChild("mistakes", {static: false}) mistakes: DxSelectBoxComponent;
get uploadAuthorization() {
return 'Token ' + this.auth.token;
};
get uploadUrl() {
return `${environment.file_url}create/`
}
get delta(): number {
return Number((this.contrib_info.assessed_contributions_total - this.contrib_info.received_contributions_total).toFixed(2))
}
id = '';
contrib_info: ContributionsInformation = new ContributionsInformation();
clean_contrib_info = new ContributionsInformation();
acceptButtonVisibility = false;
rejectButtonVisibility = false;
sendForApprovalButtonVisibility = false;
saveButtonVisibility = false;
contribInfoDataSource: any = {};
mistakesDataSource: any = {};
get comment_visibility() {
return this.auth.current_user.permissions.findIndex(p => p.codename == 'view_comment2') > 0
}
get mistakes_visibility() {
return this.comment_visibility || this.contrib_info.mistakes.length > 0
}
get skip_verification() {
return this.auth.current_user.permissions.findIndex(p => p.codename == 'view_comment2') > 0
}
get dateIsReadOnly() {
if (this.auth.current_user) {
return !this.auth.current_user.is_staff
} else {
return false
}
};
addDays(date, days) {
const result = new Date(date);
result.setDate(result.getDate() + days);
return result;
}
formatDate(date) {
const ye = new Intl.DateTimeFormat('en', {year: 'numeric'}).format(date);
const mo = new Intl.DateTimeFormat('en', {month: '2-digit'}).format(date);
const da = new Intl.DateTimeFormat('en', {day: '2-digit'}).format(date);
return `${ye}-${mo}-${da}`;
}
constructor(private route: ActivatedRoute,
private router: Router,
private contribInfoService: ContributionsInformationService,
private notifyService: CapitalRepairNotifyService,
private contribInfoMistakesService: ContributionsInformationMistakeService,
private _location: Location,
public auth: AuthService,
private customStoreService: CustomStoreService) {
this.contribInfoDataSource = customStoreService.getSearchCustomStore(notifyService);
this.contribInfoDataSource.pageSize(10);
//отображаются только согласованые уведомления, у которых не указана ороанизация Фонд кап. ремонта ПК(5902990563)
//и по которым недавно не подавались сведения о взносах
const today = new Date();
const t2 = this.formatDate(this.addDays(today, -10));
const t1 = this.formatDate(today);
//this.contribInfoDataSource.filter([['status.id', '=', '3'], 'and', ['organization.inn', '<>', '5902990563'], 'and',
// ['!', [['contributionsinformation__date', '>', t2], 'and', ['contributionsinformation__date', '<=', t1]]]]);
this.contribInfoDataSource.filter([['status.id', '=', '3'], 'and', ['organization.inn', '<>', '5902990563']]);
this.mistakesDataSource = customStoreService.getSearchCustomStore(contribInfoMistakesService);
}
act() {
const data = getContent(this.contrib_info.notify, this.contrib_info.mistakes)
generate(`${environment.backend_url}/media/templates/act.docx`, data).then(a => {
saveAs(a, "file.docx")
})
//window.location.href=`/api/v1/cr/contrib_info/generate_act/${this.id}/`;
}
setPermissions(user) {
if (this.contrib_info.notify.organization.id == this.auth.current_user.organization.id || user.is_staff) {
if (this.c | }
} else {
this.sendForApprovalButtonVisibility = false;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
this.saveButtonVisibility = false;
}
}
}
ngOnInit() {
this.route.params.subscribe((params: Params) => {
this.id = params.id;
if (this.id !== '0') {
this.contribInfoService.retrieve(this.id).subscribe(res => {
this.contrib_info = res;
this.contrib_info = JSON.parse(JSON.stringify(res));
this.setPermissions(this.auth.current_user);
}
);
if (this.auth.current_user.is_staff) {
this.contribInfoService.getHistory(this.id).subscribe(res => {
this.history = res;
});
}
} else {
const a = new Date();
this.contrib_info.date = `${a.getFullYear()}-${a.getMonth() + 1}-${a.getDate()}`;
this.sendForApprovalButtonVisibility = true;
this.saveButtonVisibility = true;
}
})
}
back() {
if (this._location.getState()['navigationId'] > 1) {
this._location.back();
} else {
this.router.navigate(['/pages/contrib-info']);
}
}
onMistakeSelected(e) {
if (e) {
console.log(e);
this.contrib_info.mistakes.push(e);
this.mistakes.writeValue(undefined)
}
}
mistakeDelete(mistake) {
const index = this.contrib_info.mistakes.findIndex(f => f.id == mistake.id);
if (index > -1) {
this.contrib_info.mistakes.splice(index, 1);
}
}
onFormSubmit(e) {
let is_form_valid = true;
let is_credit_org_valid = true;
let is_house_valid = true;
let is_files_attached = true;
this.contrib_info.delta_total = this.delta;
if (e != SubmitType.Exclusion && !this.skip_verification) {
is_form_valid = this.form.instance.validate().isValid;
if (this.saveButtonVisibility) {
is_files_attached = this.contrib_info.files.length !== 0;
}
}
if (is_form_valid &&
is_credit_org_valid &&
is_house_valid &&
is_files_attached) {
switch (e) {
case SubmitType.Sending: {
this.contrib_info.status.id = NotifyStatus.Approving;
break;
}
case SubmitType.Rejecting: {
if (this.auth.current_user.is_staff) {
this.contrib_info.status.id = NotifyStatus.Rejected;
} else {
this.contrib_info.status.id = NotifyStatus.Editing;
}
break;
}
case SubmitType.Accepting: {
this.contrib_info.status.id = NotifyStatus.Approved;
break;
}
case SubmitType.Saving: {
if (this.contrib_info.status.id == 0) {
this.contrib_info.status.id = NotifyStatus.Editing;
}
break;
}
case SubmitType.Exclusion: {
this.contrib_info.status.id = NotifyStatus.Excluded;
break;
}
}
if (this.id != '0') {
let n = getDifference(this.contrib_info, this.clean_contrib_info);
if (n) {
if (this.contrib_info.files.length == 0) {
n[0].files = 'empty'
} else {
n[0].files = this.contrib_info.files
}
if (this.contrib_info.mistakes.length == 0) {
n[0].mistakes = 'empty'
} else {
n[0].mistakes = this.contrib_info.mistakes
}
this.contribInfoService.update(this.id, n[0]).subscribe(res => {
notify({
message: "Форма сохранена",
position: {
my: "center top",
at: | ontrib_info.status.id == NotifyStatus.Approving) {
this.acceptButtonVisibility = false;
this.sendForApprovalButtonVisibility = false;
this.saveButtonVisibility = false;
this.rejectButtonVisibility = true;
if (user.is_staff) {
this.acceptButtonVisibility = true;
}
} else if (this.contrib_info.status.id == NotifyStatus.Editing || this.contrib_info.status.id == NotifyStatus.Rejected) {
this.saveButtonVisibility = true;
this.sendForApprovalButtonVisibility = true;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
} else if (this.contrib_info.status.id == NotifyStatus.Approved) {
this.saveButtonVisibility = false;
this.sendForApprovalButtonVisibility = false;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
if (user.is_staff) {
this.saveButtonVisibility = true; | identifier_body |
contributions-infromation-form.component.ts | ipes.module";
import {CustomStoreService} from "../../shared/services/custom-store.service";
import {CapitalRepairNotifyService} from "../../shared/services/capital-repair-notify.service";
import {ContributionsInformationMistakeService} from "../../shared/services/contributions-information-mistake.service";
import {DxiGroupComponent, DxiGroupModule} from "devextreme-angular/ui/nested";
import {getContent} from "../../shared/tools/contrib-info-act";
import saveAs from "file-saver"
@Component({
selector: 'app-contributions-information-form',
templateUrl: './contributions-information-form.component.html',
styleUrls: ['./contributions-information-form.component.scss']
})
export class ContributionsInfromationFormComponent implements OnInit {
SubmitType = SubmitType;
history: any = {};
file_backend_url = environment.file_url
@ViewChild("form", {static: false}) form: DxFormComponent;
@ViewChild("mistakes", {static: false}) mistakes: DxSelectBoxComponent;
get uploadAuthorization() {
return 'Token ' + this.auth.token;
};
get uploadUrl() {
return `${environment.file_url}create/`
}
get delta(): number {
return Number((this.contrib_info.assessed_contributions_total - this.contrib_info.received_contributions_total).toFixed(2))
}
id = '';
contrib_info: ContributionsInformation = new ContributionsInformation();
clean_contrib_info = new ContributionsInformation();
acceptButtonVisibility = false;
rejectButtonVisibility = false;
sendForApprovalButtonVisibility = false;
saveButtonVisibility = false;
contribInfoDataSource: any = {};
mistakesDataSource: any = {};
get comment_visibility() {
return this.auth.current_user.permissions.findIndex(p => p.codename == 'view_comment2') > 0
}
get mistakes_visibility() {
return this.comment_visibility || this.contrib_info.mistakes.length > 0
}
get skip_verification() {
return this.auth.current_user.permissions.findIndex(p => p.codename == 'view_comment2') > 0
}
get dateIsReadOnly() {
if (this.auth.current_user) {
return !this.auth.current_user.is_staff
} else {
return false
}
};
addDays(date, days) {
const result = new Date(date);
result.setDate(result.getDate() + days);
return result;
}
formatDate(date) {
const ye = new Intl.DateTimeFormat('en', {year: 'numeric'}).format(date);
const mo = new Intl.DateTimeFormat('en', {month: '2-digit'}).format(date);
const da = new Intl.DateTimeFormat('en', {day: '2-digit'}).format(date);
return `${ye}-${mo}-${da}`;
}
constructor(private route: ActivatedRoute,
private router: Router,
private contribInfoService: ContributionsInformationService,
private notifyService: CapitalRepairNotifyService,
private contribInfoMistakesService: ContributionsInformationMistakeService,
private _location: Location,
public auth: AuthService,
private customStoreService: CustomStoreService) {
this.contribInfoDataSource = customStoreService.getSearchCustomStore(notifyService);
this.contribInfoDataSource.pageSize(10);
//отображаются только согласованые уведомления, у которых не указана ороанизация Фонд кап. ремонта ПК(5902990563)
//и по которым недавно не подавались сведения о взносах
const today = new Date();
const t2 = this.formatDate(this.addDays(today, -10));
const t1 = this.formatDate(today);
//this.contribInfoDataSource.filter([['status.id', '=', '3'], 'and', ['organization.inn', '<>', '5902990563'], 'and',
// ['!', [['contributionsinformation__date', '>', t2], 'and', ['contributionsinformation__date', '<=', t1]]]]);
this.contribInfoDataSource.filter([['status.id', '=', '3'], 'and', ['organization.inn', '<>', '5902990563']]);
this.mistakesDataSource = customStoreService.getSearchCustomStore(contribInfoMistakesService);
}
act() {
const data = getContent(this.contrib_info.notify, this.contrib_info.mistakes)
generate(`${environment.backend_url}/media/templates/act.docx`, data).then(a => {
saveAs(a, "file.docx")
})
//window.location.href=`/api/v1/cr/contrib_info/generate_act/${this.id}/`;
}
setPermissions(user) {
if (this.contrib_info.notify.organization.id == this.auth.current_user.organization.id || user.is_staff) {
if (this.contrib_info.status.id == NotifyStatus.Approving) {
this.acceptButtonVisibility = false;
this.sendForApprovalButtonVisibility = false;
this.saveButtonVisibility = false;
this.rejectButtonVisibility = true;
if (user.is_staff) {
this.acceptButtonVisibility = true;
}
} else if (this.contrib_info.status.id == NotifyStatus.Editing || this.contrib_info.status.id == NotifyStatus.Rejected) {
this.saveButtonVisibility = true;
this.sendForApprovalButtonVisibility = true;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
} else if (this.contrib_info.status.id == NotifyStatus.Approved) {
this.saveButtonVisibility = false;
this.sendForApprovalButtonVisibility = false;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
if (user.is_staff) {
this.saveButtonVisibility = true;
}
} else {
this.sendForApprovalButtonVisibility = false;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
this.saveButtonVisibility = false;
}
}
}
ngOnInit() {
this.route.params.subscribe((params: Params) => {
this.id = params.id;
if (this.id !== '0') {
| contribInfoService.retrieve(this.id).subscribe(res => {
this.contrib_info = res;
this.contrib_info = JSON.parse(JSON.stringify(res));
this.setPermissions(this.auth.current_user);
}
);
if (this.auth.current_user.is_staff) {
this.contribInfoService.getHistory(this.id).subscribe(res => {
this.history = res;
});
}
} else {
const a = new Date();
this.contrib_info.date = `${a.getFullYear()}-${a.getMonth() + 1}-${a.getDate()}`;
this.sendForApprovalButtonVisibility = true;
this.saveButtonVisibility = true;
}
})
}
back() {
if (this._location.getState()['navigationId'] > 1) {
this._location.back();
} else {
this.router.navigate(['/pages/contrib-info']);
}
}
onMistakeSelected(e) {
if (e) {
console.log(e);
this.contrib_info.mistakes.push(e);
this.mistakes.writeValue(undefined)
}
}
mistakeDelete(mistake) {
const index = this.contrib_info.mistakes.findIndex(f => f.id == mistake.id);
if (index > -1) {
this.contrib_info.mistakes.splice(index, 1);
}
}
onFormSubmit(e) {
let is_form_valid = true;
let is_credit_org_valid = true;
let is_house_valid = true;
let is_files_attached = true;
this.contrib_info.delta_total = this.delta;
if (e != SubmitType.Exclusion && !this.skip_verification) {
is_form_valid = this.form.instance.validate().isValid;
if (this.saveButtonVisibility) {
is_files_attached = this.contrib_info.files.length !== 0;
}
}
if (is_form_valid &&
is_credit_org_valid &&
is_house_valid &&
is_files_attached) {
switch (e) {
case SubmitType.Sending: {
this.contrib_info.status.id = NotifyStatus.Approving;
break;
}
case SubmitType.Rejecting: {
if (this.auth.current_user.is_staff) {
this.contrib_info.status.id = NotifyStatus.Rejected;
} else {
this.contrib_info.status.id = NotifyStatus.Editing;
}
break;
}
case SubmitType.Accepting: {
this.contrib_info.status.id = NotifyStatus.Approved;
break;
}
case SubmitType.Saving: {
if (this.contrib_info.status.id == 0) {
this.contrib_info.status.id = NotifyStatus.Editing;
}
break;
}
case SubmitType.Exclusion: {
this.contrib_info.status.id = NotifyStatus.Excluded;
break;
}
}
if (this.id != '0') {
let n = getDifference(this.contrib_info, this.clean_contrib_info);
if (n) {
if (this.contrib_info.files.length == 0) {
n[0].files = 'empty'
} else {
n[0].files = this.contrib_info.files
}
if (this.contrib_info.mistakes.length == 0) {
n[0].mistakes = 'empty'
} else {
n[0].mistakes = this.contrib_info.mistakes
}
this.contribInfoService.update(this.id, n[0]).subscribe(res => {
notify({
message: "Форма сохранена",
position: {
my: "center top",
at: | this. | identifier_name |
contributions-infromation-form.component.ts | ipes.module";
import {CustomStoreService} from "../../shared/services/custom-store.service";
import {CapitalRepairNotifyService} from "../../shared/services/capital-repair-notify.service";
import {ContributionsInformationMistakeService} from "../../shared/services/contributions-information-mistake.service";
import {DxiGroupComponent, DxiGroupModule} from "devextreme-angular/ui/nested";
import {getContent} from "../../shared/tools/contrib-info-act";
import saveAs from "file-saver"
@Component({
selector: 'app-contributions-information-form',
templateUrl: './contributions-information-form.component.html',
styleUrls: ['./contributions-information-form.component.scss']
})
export class ContributionsInfromationFormComponent implements OnInit {
SubmitType = SubmitType;
history: any = {};
file_backend_url = environment.file_url
@ViewChild("form", {static: false}) form: DxFormComponent;
@ViewChild("mistakes", {static: false}) mistakes: DxSelectBoxComponent;
get uploadAuthorization() {
return 'Token ' + this.auth.token;
};
get uploadUrl() {
return `${environment.file_url}create/`
}
get delta(): number {
return Number((this.contrib_info.assessed_contributions_total - this.contrib_info.received_contributions_total).toFixed(2))
}
id = '';
contrib_info: ContributionsInformation = new ContributionsInformation();
clean_contrib_info = new ContributionsInformation();
acceptButtonVisibility = false;
rejectButtonVisibility = false;
sendForApprovalButtonVisibility = false;
saveButtonVisibility = false;
contribInfoDataSource: any = {};
mistakesDataSource: any = {};
get comment_visibility() {
return this.auth.current_user.permissions.findIndex(p => p.codename == 'view_comment2') > 0
}
get mistakes_visibility() {
return this.comment_visibility || this.contrib_info.mistakes.length > 0
}
get skip_verification() {
return this.auth.current_user.permissions.findIndex(p => p.codename == 'view_comment2') > 0
}
get dateIsReadOnly() {
if (this.auth.current_user) {
return !this.auth.current_user.is_staff
} else {
return false
}
};
addDays(date, days) {
const result = new Date(date);
result.setDate(result.getDate() + days);
return result;
}
formatDate(date) {
const ye = new Intl.DateTimeFormat('en', {year: 'numeric'}).format(date);
const mo = new Intl.DateTimeFormat('en', {month: '2-digit'}).format(date);
const da = new Intl.DateTimeFormat('en', {day: '2-digit'}).format(date);
return `${ye}-${mo}-${da}`;
}
constructor(private route: ActivatedRoute,
private router: Router,
private contribInfoService: ContributionsInformationService,
private notifyService: CapitalRepairNotifyService,
private contribInfoMistakesService: ContributionsInformationMistakeService,
private _location: Location,
public auth: AuthService,
private customStoreService: CustomStoreService) {
this.contribInfoDataSource = customStoreService.getSearchCustomStore(notifyService);
this.contribInfoDataSource.pageSize(10);
//отображаются только согласованые уведомления, у которых не указана ороанизация Фонд кап. ремонта ПК(5902990563)
//и по которым недавно не подавались сведения о взносах
const today = new Date();
const t2 = this.formatDate(this.addDays(today, -10));
const t1 = this.formatDate(today);
//this.contribInfoDataSource.filter([['status.id', '=', '3'], 'and', ['organization.inn', '<>', '5902990563'], 'and',
// ['!', [['contributionsinformation__date', '>', t2], 'and', ['contributionsinformation__date', '<=', t1]]]]);
this.contribInfoDataSource.filter([['status.id', '=', '3'], 'and', ['organization.inn', '<>', '5902990563']]);
this.mistakesDataSource = customStoreService.getSearchCustomStore(contribInfoMistakesService);
}
act() {
const data = getContent(this.contrib_info.notify, this.contrib_info.mistakes)
generate(`${environment.backend_url}/media/templates/act.docx`, data).then(a => {
saveAs(a, "file.docx")
})
//window.location.href=`/api/v1/cr/contrib_info/generate_act/${this.id}/`;
}
setPermissions(user) {
if (this.contrib_info.notify.organization.id == this.auth.current_user.organization.id || user.is_staff) {
if (this.contrib_info.status.id == NotifyStatus.Approving) {
this.acceptButtonVisibility = false;
this.sendForApprovalButtonVisibility = false;
this.saveButtonVisibility = false;
this.rejectButtonVisibility = true;
if (user.is_staff) {
this.acceptButtonVisibility = true;
}
} else if (this.contrib_info.status.id == NotifyStatus.Editing || this.contrib_info.status.id == NotifyStatus.Rejected) {
this.saveButtonVisibility = true;
this.sendForApprovalButtonVisibility = true;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
} else if (this.contrib_info.status.id == NotifyStatus.Approved) {
this.saveButtonVisibility = false;
this.sendForApprovalButtonVisibility = false;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
if (user.is_staff) {
this.saveButtonVisibility = true;
}
} else {
this.sendForApprovalButtonVisibility = false;
| ceptButtonVisibility = false;
this.saveButtonVisibility = false;
}
}
}
ngOnInit() {
this.route.params.subscribe((params: Params) => {
this.id = params.id;
if (this.id !== '0') {
this.contribInfoService.retrieve(this.id).subscribe(res => {
this.contrib_info = res;
this.contrib_info = JSON.parse(JSON.stringify(res));
this.setPermissions(this.auth.current_user);
}
);
if (this.auth.current_user.is_staff) {
this.contribInfoService.getHistory(this.id).subscribe(res => {
this.history = res;
});
}
} else {
const a = new Date();
this.contrib_info.date = `${a.getFullYear()}-${a.getMonth() + 1}-${a.getDate()}`;
this.sendForApprovalButtonVisibility = true;
this.saveButtonVisibility = true;
}
})
}
back() {
if (this._location.getState()['navigationId'] > 1) {
this._location.back();
} else {
this.router.navigate(['/pages/contrib-info']);
}
}
onMistakeSelected(e) {
if (e) {
console.log(e);
this.contrib_info.mistakes.push(e);
this.mistakes.writeValue(undefined)
}
}
mistakeDelete(mistake) {
const index = this.contrib_info.mistakes.findIndex(f => f.id == mistake.id);
if (index > -1) {
this.contrib_info.mistakes.splice(index, 1);
}
}
onFormSubmit(e) {
let is_form_valid = true;
let is_credit_org_valid = true;
let is_house_valid = true;
let is_files_attached = true;
this.contrib_info.delta_total = this.delta;
if (e != SubmitType.Exclusion && !this.skip_verification) {
is_form_valid = this.form.instance.validate().isValid;
if (this.saveButtonVisibility) {
is_files_attached = this.contrib_info.files.length !== 0;
}
}
if (is_form_valid &&
is_credit_org_valid &&
is_house_valid &&
is_files_attached) {
switch (e) {
case SubmitType.Sending: {
this.contrib_info.status.id = NotifyStatus.Approving;
break;
}
case SubmitType.Rejecting: {
if (this.auth.current_user.is_staff) {
this.contrib_info.status.id = NotifyStatus.Rejected;
} else {
this.contrib_info.status.id = NotifyStatus.Editing;
}
break;
}
case SubmitType.Accepting: {
this.contrib_info.status.id = NotifyStatus.Approved;
break;
}
case SubmitType.Saving: {
if (this.contrib_info.status.id == 0) {
this.contrib_info.status.id = NotifyStatus.Editing;
}
break;
}
case SubmitType.Exclusion: {
this.contrib_info.status.id = NotifyStatus.Excluded;
break;
}
}
if (this.id != '0') {
let n = getDifference(this.contrib_info, this.clean_contrib_info);
if (n) {
if (this.contrib_info.files.length == 0) {
n[0].files = 'empty'
} else {
n[0].files = this.contrib_info.files
}
if (this.contrib_info.mistakes.length == 0) {
n[0].mistakes = 'empty'
} else {
n[0].mistakes = this.contrib_info.mistakes
}
this.contribInfoService.update(this.id, n[0]).subscribe(res => {
notify({
message: "Форма сохранена",
position: {
my: "center top",
at: | this.rejectButtonVisibility = false;
this.ac | conditional_block |
contributions-infromation-form.component.ts | ipes.module";
import {CustomStoreService} from "../../shared/services/custom-store.service";
import {CapitalRepairNotifyService} from "../../shared/services/capital-repair-notify.service";
import {ContributionsInformationMistakeService} from "../../shared/services/contributions-information-mistake.service";
import {DxiGroupComponent, DxiGroupModule} from "devextreme-angular/ui/nested";
import {getContent} from "../../shared/tools/contrib-info-act";
import saveAs from "file-saver"
@Component({
selector: 'app-contributions-information-form',
templateUrl: './contributions-information-form.component.html',
styleUrls: ['./contributions-information-form.component.scss']
})
export class ContributionsInfromationFormComponent implements OnInit {
SubmitType = SubmitType;
history: any = {};
file_backend_url = environment.file_url
@ViewChild("form", {static: false}) form: DxFormComponent;
@ViewChild("mistakes", {static: false}) mistakes: DxSelectBoxComponent;
get uploadAuthorization() {
return 'Token ' + this.auth.token;
};
get uploadUrl() {
return `${environment.file_url}create/`
}
get delta(): number {
return Number((this.contrib_info.assessed_contributions_total - this.contrib_info.received_contributions_total).toFixed(2))
}
id = '';
contrib_info: ContributionsInformation = new ContributionsInformation();
clean_contrib_info = new ContributionsInformation();
acceptButtonVisibility = false;
rejectButtonVisibility = false;
sendForApprovalButtonVisibility = false;
saveButtonVisibility = false;
contribInfoDataSource: any = {};
mistakesDataSource: any = {};
get comment_visibility() {
return this.auth.current_user.permissions.findIndex(p => p.codename == 'view_comment2') > 0
}
get mistakes_visibility() {
return this.comment_visibility || this.contrib_info.mistakes.length > 0
}
get skip_verification() {
return this.auth.current_user.permissions.findIndex(p => p.codename == 'view_comment2') > 0
}
get dateIsReadOnly() {
if (this.auth.current_user) {
return !this.auth.current_user.is_staff
} else {
return false
}
};
addDays(date, days) {
const result = new Date(date);
result.setDate(result.getDate() + days);
return result;
}
formatDate(date) {
const ye = new Intl.DateTimeFormat('en', {year: 'numeric'}).format(date);
const mo = new Intl.DateTimeFormat('en', {month: '2-digit'}).format(date);
const da = new Intl.DateTimeFormat('en', {day: '2-digit'}).format(date);
return `${ye}-${mo}-${da}`;
}
constructor(private route: ActivatedRoute,
private router: Router,
private contribInfoService: ContributionsInformationService,
private notifyService: CapitalRepairNotifyService,
private contribInfoMistakesService: ContributionsInformationMistakeService,
private _location: Location,
public auth: AuthService,
private customStoreService: CustomStoreService) {
this.contribInfoDataSource = customStoreService.getSearchCustomStore(notifyService);
this.contribInfoDataSource.pageSize(10); | //и по которым недавно не подавались сведения о взносах
const today = new Date();
const t2 = this.formatDate(this.addDays(today, -10));
const t1 = this.formatDate(today);
//this.contribInfoDataSource.filter([['status.id', '=', '3'], 'and', ['organization.inn', '<>', '5902990563'], 'and',
// ['!', [['contributionsinformation__date', '>', t2], 'and', ['contributionsinformation__date', '<=', t1]]]]);
this.contribInfoDataSource.filter([['status.id', '=', '3'], 'and', ['organization.inn', '<>', '5902990563']]);
this.mistakesDataSource = customStoreService.getSearchCustomStore(contribInfoMistakesService);
}
act() {
const data = getContent(this.contrib_info.notify, this.contrib_info.mistakes)
generate(`${environment.backend_url}/media/templates/act.docx`, data).then(a => {
saveAs(a, "file.docx")
})
//window.location.href=`/api/v1/cr/contrib_info/generate_act/${this.id}/`;
}
setPermissions(user) {
if (this.contrib_info.notify.organization.id == this.auth.current_user.organization.id || user.is_staff) {
if (this.contrib_info.status.id == NotifyStatus.Approving) {
this.acceptButtonVisibility = false;
this.sendForApprovalButtonVisibility = false;
this.saveButtonVisibility = false;
this.rejectButtonVisibility = true;
if (user.is_staff) {
this.acceptButtonVisibility = true;
}
} else if (this.contrib_info.status.id == NotifyStatus.Editing || this.contrib_info.status.id == NotifyStatus.Rejected) {
this.saveButtonVisibility = true;
this.sendForApprovalButtonVisibility = true;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
} else if (this.contrib_info.status.id == NotifyStatus.Approved) {
this.saveButtonVisibility = false;
this.sendForApprovalButtonVisibility = false;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
if (user.is_staff) {
this.saveButtonVisibility = true;
}
} else {
this.sendForApprovalButtonVisibility = false;
this.rejectButtonVisibility = false;
this.acceptButtonVisibility = false;
this.saveButtonVisibility = false;
}
}
}
ngOnInit() {
this.route.params.subscribe((params: Params) => {
this.id = params.id;
if (this.id !== '0') {
this.contribInfoService.retrieve(this.id).subscribe(res => {
this.contrib_info = res;
this.contrib_info = JSON.parse(JSON.stringify(res));
this.setPermissions(this.auth.current_user);
}
);
if (this.auth.current_user.is_staff) {
this.contribInfoService.getHistory(this.id).subscribe(res => {
this.history = res;
});
}
} else {
const a = new Date();
this.contrib_info.date = `${a.getFullYear()}-${a.getMonth() + 1}-${a.getDate()}`;
this.sendForApprovalButtonVisibility = true;
this.saveButtonVisibility = true;
}
})
}
back() {
if (this._location.getState()['navigationId'] > 1) {
this._location.back();
} else {
this.router.navigate(['/pages/contrib-info']);
}
}
onMistakeSelected(e) {
if (e) {
console.log(e);
this.contrib_info.mistakes.push(e);
this.mistakes.writeValue(undefined)
}
}
mistakeDelete(mistake) {
const index = this.contrib_info.mistakes.findIndex(f => f.id == mistake.id);
if (index > -1) {
this.contrib_info.mistakes.splice(index, 1);
}
}
onFormSubmit(e) {
let is_form_valid = true;
let is_credit_org_valid = true;
let is_house_valid = true;
let is_files_attached = true;
this.contrib_info.delta_total = this.delta;
if (e != SubmitType.Exclusion && !this.skip_verification) {
is_form_valid = this.form.instance.validate().isValid;
if (this.saveButtonVisibility) {
is_files_attached = this.contrib_info.files.length !== 0;
}
}
if (is_form_valid &&
is_credit_org_valid &&
is_house_valid &&
is_files_attached) {
switch (e) {
case SubmitType.Sending: {
this.contrib_info.status.id = NotifyStatus.Approving;
break;
}
case SubmitType.Rejecting: {
if (this.auth.current_user.is_staff) {
this.contrib_info.status.id = NotifyStatus.Rejected;
} else {
this.contrib_info.status.id = NotifyStatus.Editing;
}
break;
}
case SubmitType.Accepting: {
this.contrib_info.status.id = NotifyStatus.Approved;
break;
}
case SubmitType.Saving: {
if (this.contrib_info.status.id == 0) {
this.contrib_info.status.id = NotifyStatus.Editing;
}
break;
}
case SubmitType.Exclusion: {
this.contrib_info.status.id = NotifyStatus.Excluded;
break;
}
}
if (this.id != '0') {
let n = getDifference(this.contrib_info, this.clean_contrib_info);
if (n) {
if (this.contrib_info.files.length == 0) {
n[0].files = 'empty'
} else {
n[0].files = this.contrib_info.files
}
if (this.contrib_info.mistakes.length == 0) {
n[0].mistakes = 'empty'
} else {
n[0].mistakes = this.contrib_info.mistakes
}
this.contribInfoService.update(this.id, n[0]).subscribe(res => {
notify({
message: "Форма сохранена",
position: {
my: "center top",
at: "center | //отображаются только согласованые уведомления, у которых не указана ороанизация Фонд кап. ремонта ПК(5902990563) | random_line_split |
film.rs | , crop_pixel_bounds);
// Allocate film image storage
let pixels = vec![Pixel::default(); crop_pixel_bounds.area() as usize];
// TODO: filmPixelMemory
// Precompute filter weight table
let mut offset = 0;
let mut filter_table = [0.0; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH];
for y in 0..FILTER_TABLE_WIDTH {
for x in 0..FILTER_TABLE_WIDTH {
let p = Point2f::new(
(x as Float + 0.5) * filt.radius().x / FILTER_TABLE_WIDTH as Float,
(y as Float + 0.5) * filt.radius().y / FILTER_TABLE_WIDTH as Float
);
filter_table[offset] = filt.evaluate(&p);
offset += 1;
}
}
Self {
full_resolution: *resolution,
diagonal: diagonal * 0.001,
filter: filt,
filename: filename.to_owned(),
scale,
max_sample_luminance,
cropped_pixel_bounds: crop_pixel_bounds,
pixels: RwLock::new(pixels),
filter_table
}
}
pub fn get_sample_bounds(&self) -> Bounds2i {
let p1 = (Point2f::from(self.cropped_pixel_bounds.p_min) +
Vector2f::new(0.5, 0.5) - self.filter.radius()).floor();
let p2 = (Point2f::from(self.cropped_pixel_bounds.p_max) -
Vector2f::new(0.5, 0.5) + self.filter.radius()).ceil();
Bounds2i::from_points(&Point2i::from(p1), &Point2i::from(p2))
}
pub fn get_physical_extent(&self) -> Bounds2f {
let aspect = self.full_resolution.y as Float / self.full_resolution.x as Float;
let x = (self.diagonal * self.diagonal / (1.0 + aspect * aspect)).sqrt();
let y = aspect * x;
Bounds2f::new(
&Point2f::new(-x / 2.0, -y / 2.0),
&Point2f::new(x / 2.0, y / 2.0)
)
}
pub fn get_film_tile(&self, sample_bounds: &Bounds2i) -> FilmTile {
// Bound image pixels that samples in sampleBounds contribute to
let half_pixel = Vector2f::new(0.5, 0.5);
let float_bounds = Bounds2f {
p_min: Point2f::from(sample_bounds.p_min),
p_max: Point2f::from(sample_bounds.p_max)
};
let p0f = (float_bounds.p_min - half_pixel - self.filter.radius()).ceil();
let p1f = (float_bounds.p_max - half_pixel + self.filter.radius()).floor();
let p0 = Point2i::from(p0f);
let p1 = Point2i::from(p1f) + Point2i::new(1, 1);
let tile_bounds = Bounds2i::from_points(&p0, &p1).intersect(&self.cropped_pixel_bounds);
FilmTile::new(&tile_bounds, &self.filter.radius(),
&self.filter_table, FILTER_TABLE_WIDTH, self.max_sample_luminance)
}
pub fn merge_film_tile(&self, tile: &mut FilmTile) {
// TODO: ProfilePhase
let mut pixels = self.pixels.write().unwrap();
info!("Merging film tile {}", tile.pixel_bounds);
for p in &tile.get_pixel_bounds() {
// Merge pixel into Film::pixels
let tile_pixel = tile.get_pixel(&p);
let offset = self.get_pixel(&p);
let merge_pixel = &mut pixels[offset];
let xyz = tile_pixel.contrib_sum.to_xyz();
for i in 0..3 {
merge_pixel.xyz[i] += xyz[i];
}
merge_pixel.filter_weight_sum += tile_pixel.filter_weight_sum;
}
}
fn get_pixel(&self, p: &Point2i) -> usize {
assert!(self.cropped_pixel_bounds.inside_exclusive(p));
let width = self.cropped_pixel_bounds.p_max.x - self.cropped_pixel_bounds.p_min.x;
let offset = (p.x - self.cropped_pixel_bounds.p_min.x) + (p.y - self.cropped_pixel_bounds.p_min.y) * width;
offset as usize
}
pub fn set_image(&self, img: &[Spectrum]) {
let npixels = self.cropped_pixel_bounds.area() as usize;
let mut pixels = self.pixels.write().unwrap();
for i in 0..npixels {
let p = &mut pixels[i];
p.xyz = img[i].to_xyz();
p.filter_weight_sum = 1.0;
p.splat_xyz[0] = AtomicFloat::new(0.0);
p.splat_xyz[1] = AtomicFloat::new(0.0);
p.splat_xyz[2] = AtomicFloat::new(0.0);
}
}
pub fn add_splat(&self, p: &Point2f, mut v: Spectrum) {
// TODO: ProfilePhase
if v.has_nans() {
error!("Ignoring splatted spectrum with NaN values at ({}, {})", p.x, p.y);
return;
} else if v.y() < 0.0 {
error!("Ignoring splatted spectrum with negative luminance {} at ({}, {})", v.y(), p.x, p.y);
return
} else if v.y().is_infinite() |
let pi = Point2i::from(p.floor());
if !self.cropped_pixel_bounds.inside_exclusive(&pi) { return; }
if v.y() > self.max_sample_luminance {
v *= self.max_sample_luminance / v.y();
}
let mut pixels = self.pixels.write().unwrap();
let xyz = v.to_xyz();
let offset = self.get_pixel(&pi);
let pixel = &mut pixels[offset];
for i in 0..3 {
pixel.splat_xyz[i].add(xyz[i]);
}
}
pub fn write_image(&self, splat_scale: Float) -> Result<()> {
// Convert image to RGB and compute final pixel values
info!("Converting image to RGB and computing final weighted pixel values");
let mut rgb = vec![0.0; (3 * self.cropped_pixel_bounds.area()) as usize];
let mut offset: usize;
for p in &self.cropped_pixel_bounds {
// Convert pixel XYZ color to RGB
offset = self.get_pixel(&p);
let pixel = &self.pixels.read().unwrap()[offset];
let start = offset * 3;
let xyz = xyz_to_rgb(pixel.xyz);
rgb[start] = xyz[0];
rgb[start + 1] = xyz[1];
rgb[start + 2] = xyz[2];
// Normalize pixel with weight sum
let filter_weight_sum = pixel.filter_weight_sum;
if filter_weight_sum != 0.0 {
let invwt = 1.0 / filter_weight_sum;
rgb[start] = (rgb[start] * invwt).max(0.0);
rgb[start + 1] = (rgb[start + 1] * invwt).max(0.0);
rgb[start + 2] = (rgb[start + 2] * invwt).max(0.0);
}
// splate value at pixel
let splat_xyz: [Float; 3] = [
pixel.splat_xyz[0].clone().into(),
pixel.splat_xyz[1].clone().into(),
pixel.splat_xyz[2].clone().into()
];
let splat_rgb = xyz_to_rgb(splat_xyz);
rgb[start] += splat_scale * splat_rgb[0];
rgb[start + 1] += splat_scale * splat_rgb[1];
rgb[start + 2] += splat_scale * splat_rgb[2];
// Scale pixel value by scale
rgb[start] *= self.scale;
rgb[start + 1] *= self.scale;
rgb[start + 2] *= self.scale;
}
info!("Writing image {} with bounds {}", self.filename.display(), self.cropped_pixel_bounds);
// TODO: WriteImage
write_image(&self.filename, &rgb, &self.cropped_pixel_bounds, &self.full_resolution)
}
}
pub struct FilmTile<'a> {
pub pixel_bounds : Bounds2i,
filter_radius : Vector2f,
inv_filter_radius : Vector2f,
filter_table : &'a[Float],
filter_table_size : usize,
pixels : Vec<FilmTilePixel>,
max_sample_luminance: Float
}
impl<'a> FilmTile<'a> {
pub fn new(pixel_bounds: &Bounds2i, filter_radius: &Vector2f, filter_table: &'a[Float],
filter_table_size: usize, max_sample_luminance: Float) -> Self {
Self {
filter_table,
filter_table_size,
max_sample_luminance,
pixel_bounds: *pixel_bounds,
filter_radius: *filter_radius,
inv_filter_radius | {
error!("Ignoring slatted spectrum with infinite luminance at ({}, {})", p.x, p.y);
return;
} | conditional_block |
film.rs | , crop_pixel_bounds);
// Allocate film image storage
let pixels = vec![Pixel::default(); crop_pixel_bounds.area() as usize];
// TODO: filmPixelMemory
// Precompute filter weight table
let mut offset = 0;
let mut filter_table = [0.0; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH];
for y in 0..FILTER_TABLE_WIDTH {
for x in 0..FILTER_TABLE_WIDTH {
let p = Point2f::new(
(x as Float + 0.5) * filt.radius().x / FILTER_TABLE_WIDTH as Float,
(y as Float + 0.5) * filt.radius().y / FILTER_TABLE_WIDTH as Float
);
filter_table[offset] = filt.evaluate(&p);
offset += 1;
}
}
Self {
full_resolution: *resolution,
diagonal: diagonal * 0.001,
filter: filt,
filename: filename.to_owned(),
scale,
max_sample_luminance,
cropped_pixel_bounds: crop_pixel_bounds,
pixels: RwLock::new(pixels),
filter_table
}
}
pub fn get_sample_bounds(&self) -> Bounds2i {
let p1 = (Point2f::from(self.cropped_pixel_bounds.p_min) +
Vector2f::new(0.5, 0.5) - self.filter.radius()).floor();
let p2 = (Point2f::from(self.cropped_pixel_bounds.p_max) -
Vector2f::new(0.5, 0.5) + self.filter.radius()).ceil();
Bounds2i::from_points(&Point2i::from(p1), &Point2i::from(p2))
}
pub fn get_physical_extent(&self) -> Bounds2f {
let aspect = self.full_resolution.y as Float / self.full_resolution.x as Float;
let x = (self.diagonal * self.diagonal / (1.0 + aspect * aspect)).sqrt();
let y = aspect * x;
Bounds2f::new(
&Point2f::new(-x / 2.0, -y / 2.0),
&Point2f::new(x / 2.0, y / 2.0)
)
}
pub fn get_film_tile(&self, sample_bounds: &Bounds2i) -> FilmTile {
// Bound image pixels that samples in sampleBounds contribute to
let half_pixel = Vector2f::new(0.5, 0.5);
let float_bounds = Bounds2f {
p_min: Point2f::from(sample_bounds.p_min),
p_max: Point2f::from(sample_bounds.p_max)
};
let p0f = (float_bounds.p_min - half_pixel - self.filter.radius()).ceil();
let p1f = (float_bounds.p_max - half_pixel + self.filter.radius()).floor();
let p0 = Point2i::from(p0f);
let p1 = Point2i::from(p1f) + Point2i::new(1, 1);
let tile_bounds = Bounds2i::from_points(&p0, &p1).intersect(&self.cropped_pixel_bounds);
FilmTile::new(&tile_bounds, &self.filter.radius(),
&self.filter_table, FILTER_TABLE_WIDTH, self.max_sample_luminance)
}
pub fn merge_film_tile(&self, tile: &mut FilmTile) {
// TODO: ProfilePhase
let mut pixels = self.pixels.write().unwrap();
info!("Merging film tile {}", tile.pixel_bounds);
for p in &tile.get_pixel_bounds() {
// Merge pixel into Film::pixels
let tile_pixel = tile.get_pixel(&p);
let offset = self.get_pixel(&p);
let merge_pixel = &mut pixels[offset];
let xyz = tile_pixel.contrib_sum.to_xyz();
for i in 0..3 {
merge_pixel.xyz[i] += xyz[i];
}
merge_pixel.filter_weight_sum += tile_pixel.filter_weight_sum;
}
}
fn get_pixel(&self, p: &Point2i) -> usize {
assert!(self.cropped_pixel_bounds.inside_exclusive(p));
let width = self.cropped_pixel_bounds.p_max.x - self.cropped_pixel_bounds.p_min.x;
let offset = (p.x - self.cropped_pixel_bounds.p_min.x) + (p.y - self.cropped_pixel_bounds.p_min.y) * width;
offset as usize
}
pub fn set_image(&self, img: &[Spectrum]) {
let npixels = self.cropped_pixel_bounds.area() as usize;
let mut pixels = self.pixels.write().unwrap();
for i in 0..npixels {
let p = &mut pixels[i];
p.xyz = img[i].to_xyz();
p.filter_weight_sum = 1.0;
p.splat_xyz[0] = AtomicFloat::new(0.0);
p.splat_xyz[1] = AtomicFloat::new(0.0);
p.splat_xyz[2] = AtomicFloat::new(0.0);
}
}
pub fn | (&self, p: &Point2f, mut v: Spectrum) {
// TODO: ProfilePhase
if v.has_nans() {
error!("Ignoring splatted spectrum with NaN values at ({}, {})", p.x, p.y);
return;
} else if v.y() < 0.0 {
error!("Ignoring splatted spectrum with negative luminance {} at ({}, {})", v.y(), p.x, p.y);
return
} else if v.y().is_infinite() {
error!("Ignoring slatted spectrum with infinite luminance at ({}, {})", p.x, p.y);
return;
}
let pi = Point2i::from(p.floor());
if !self.cropped_pixel_bounds.inside_exclusive(&pi) { return; }
if v.y() > self.max_sample_luminance {
v *= self.max_sample_luminance / v.y();
}
let mut pixels = self.pixels.write().unwrap();
let xyz = v.to_xyz();
let offset = self.get_pixel(&pi);
let pixel = &mut pixels[offset];
for i in 0..3 {
pixel.splat_xyz[i].add(xyz[i]);
}
}
pub fn write_image(&self, splat_scale: Float) -> Result<()> {
// Convert image to RGB and compute final pixel values
info!("Converting image to RGB and computing final weighted pixel values");
let mut rgb = vec![0.0; (3 * self.cropped_pixel_bounds.area()) as usize];
let mut offset: usize;
for p in &self.cropped_pixel_bounds {
// Convert pixel XYZ color to RGB
offset = self.get_pixel(&p);
let pixel = &self.pixels.read().unwrap()[offset];
let start = offset * 3;
let xyz = xyz_to_rgb(pixel.xyz);
rgb[start] = xyz[0];
rgb[start + 1] = xyz[1];
rgb[start + 2] = xyz[2];
// Normalize pixel with weight sum
let filter_weight_sum = pixel.filter_weight_sum;
if filter_weight_sum != 0.0 {
let invwt = 1.0 / filter_weight_sum;
rgb[start] = (rgb[start] * invwt).max(0.0);
rgb[start + 1] = (rgb[start + 1] * invwt).max(0.0);
rgb[start + 2] = (rgb[start + 2] * invwt).max(0.0);
}
// splate value at pixel
let splat_xyz: [Float; 3] = [
pixel.splat_xyz[0].clone().into(),
pixel.splat_xyz[1].clone().into(),
pixel.splat_xyz[2].clone().into()
];
let splat_rgb = xyz_to_rgb(splat_xyz);
rgb[start] += splat_scale * splat_rgb[0];
rgb[start + 1] += splat_scale * splat_rgb[1];
rgb[start + 2] += splat_scale * splat_rgb[2];
// Scale pixel value by scale
rgb[start] *= self.scale;
rgb[start + 1] *= self.scale;
rgb[start + 2] *= self.scale;
}
info!("Writing image {} with bounds {}", self.filename.display(), self.cropped_pixel_bounds);
// TODO: WriteImage
write_image(&self.filename, &rgb, &self.cropped_pixel_bounds, &self.full_resolution)
}
}
pub struct FilmTile<'a> {
pub pixel_bounds : Bounds2i,
filter_radius : Vector2f,
inv_filter_radius : Vector2f,
filter_table : &'a[Float],
filter_table_size : usize,
pixels : Vec<FilmTilePixel>,
max_sample_luminance: Float
}
impl<'a> FilmTile<'a> {
pub fn new(pixel_bounds: &Bounds2i, filter_radius: &Vector2f, filter_table: &'a[Float],
filter_table_size: usize, max_sample_luminance: Float) -> Self {
Self {
filter_table,
filter_table_size,
max_sample_luminance,
pixel_bounds: *pixel_bounds,
filter_radius: *filter_radius,
inv_filter_radius: | add_splat | identifier_name |
film.rs | _window, crop_pixel_bounds);
// Allocate film image storage
let pixels = vec![Pixel::default(); crop_pixel_bounds.area() as usize];
// TODO: filmPixelMemory
// Precompute filter weight table | let p = Point2f::new(
(x as Float + 0.5) * filt.radius().x / FILTER_TABLE_WIDTH as Float,
(y as Float + 0.5) * filt.radius().y / FILTER_TABLE_WIDTH as Float
);
filter_table[offset] = filt.evaluate(&p);
offset += 1;
}
}
Self {
full_resolution: *resolution,
diagonal: diagonal * 0.001,
filter: filt,
filename: filename.to_owned(),
scale,
max_sample_luminance,
cropped_pixel_bounds: crop_pixel_bounds,
pixels: RwLock::new(pixels),
filter_table
}
}
pub fn get_sample_bounds(&self) -> Bounds2i {
let p1 = (Point2f::from(self.cropped_pixel_bounds.p_min) +
Vector2f::new(0.5, 0.5) - self.filter.radius()).floor();
let p2 = (Point2f::from(self.cropped_pixel_bounds.p_max) -
Vector2f::new(0.5, 0.5) + self.filter.radius()).ceil();
Bounds2i::from_points(&Point2i::from(p1), &Point2i::from(p2))
}
pub fn get_physical_extent(&self) -> Bounds2f {
let aspect = self.full_resolution.y as Float / self.full_resolution.x as Float;
let x = (self.diagonal * self.diagonal / (1.0 + aspect * aspect)).sqrt();
let y = aspect * x;
Bounds2f::new(
&Point2f::new(-x / 2.0, -y / 2.0),
&Point2f::new(x / 2.0, y / 2.0)
)
}
pub fn get_film_tile(&self, sample_bounds: &Bounds2i) -> FilmTile {
// Bound image pixels that samples in sampleBounds contribute to
let half_pixel = Vector2f::new(0.5, 0.5);
let float_bounds = Bounds2f {
p_min: Point2f::from(sample_bounds.p_min),
p_max: Point2f::from(sample_bounds.p_max)
};
let p0f = (float_bounds.p_min - half_pixel - self.filter.radius()).ceil();
let p1f = (float_bounds.p_max - half_pixel + self.filter.radius()).floor();
let p0 = Point2i::from(p0f);
let p1 = Point2i::from(p1f) + Point2i::new(1, 1);
let tile_bounds = Bounds2i::from_points(&p0, &p1).intersect(&self.cropped_pixel_bounds);
FilmTile::new(&tile_bounds, &self.filter.radius(),
&self.filter_table, FILTER_TABLE_WIDTH, self.max_sample_luminance)
}
pub fn merge_film_tile(&self, tile: &mut FilmTile) {
// TODO: ProfilePhase
let mut pixels = self.pixels.write().unwrap();
info!("Merging film tile {}", tile.pixel_bounds);
for p in &tile.get_pixel_bounds() {
// Merge pixel into Film::pixels
let tile_pixel = tile.get_pixel(&p);
let offset = self.get_pixel(&p);
let merge_pixel = &mut pixels[offset];
let xyz = tile_pixel.contrib_sum.to_xyz();
for i in 0..3 {
merge_pixel.xyz[i] += xyz[i];
}
merge_pixel.filter_weight_sum += tile_pixel.filter_weight_sum;
}
}
fn get_pixel(&self, p: &Point2i) -> usize {
assert!(self.cropped_pixel_bounds.inside_exclusive(p));
let width = self.cropped_pixel_bounds.p_max.x - self.cropped_pixel_bounds.p_min.x;
let offset = (p.x - self.cropped_pixel_bounds.p_min.x) + (p.y - self.cropped_pixel_bounds.p_min.y) * width;
offset as usize
}
pub fn set_image(&self, img: &[Spectrum]) {
let npixels = self.cropped_pixel_bounds.area() as usize;
let mut pixels = self.pixels.write().unwrap();
for i in 0..npixels {
let p = &mut pixels[i];
p.xyz = img[i].to_xyz();
p.filter_weight_sum = 1.0;
p.splat_xyz[0] = AtomicFloat::new(0.0);
p.splat_xyz[1] = AtomicFloat::new(0.0);
p.splat_xyz[2] = AtomicFloat::new(0.0);
}
}
pub fn add_splat(&self, p: &Point2f, mut v: Spectrum) {
// TODO: ProfilePhase
if v.has_nans() {
error!("Ignoring splatted spectrum with NaN values at ({}, {})", p.x, p.y);
return;
} else if v.y() < 0.0 {
error!("Ignoring splatted spectrum with negative luminance {} at ({}, {})", v.y(), p.x, p.y);
return
} else if v.y().is_infinite() {
error!("Ignoring slatted spectrum with infinite luminance at ({}, {})", p.x, p.y);
return;
}
let pi = Point2i::from(p.floor());
if !self.cropped_pixel_bounds.inside_exclusive(&pi) { return; }
if v.y() > self.max_sample_luminance {
v *= self.max_sample_luminance / v.y();
}
let mut pixels = self.pixels.write().unwrap();
let xyz = v.to_xyz();
let offset = self.get_pixel(&pi);
let pixel = &mut pixels[offset];
for i in 0..3 {
pixel.splat_xyz[i].add(xyz[i]);
}
}
pub fn write_image(&self, splat_scale: Float) -> Result<()> {
// Convert image to RGB and compute final pixel values
info!("Converting image to RGB and computing final weighted pixel values");
let mut rgb = vec![0.0; (3 * self.cropped_pixel_bounds.area()) as usize];
let mut offset: usize;
for p in &self.cropped_pixel_bounds {
// Convert pixel XYZ color to RGB
offset = self.get_pixel(&p);
let pixel = &self.pixels.read().unwrap()[offset];
let start = offset * 3;
let xyz = xyz_to_rgb(pixel.xyz);
rgb[start] = xyz[0];
rgb[start + 1] = xyz[1];
rgb[start + 2] = xyz[2];
// Normalize pixel with weight sum
let filter_weight_sum = pixel.filter_weight_sum;
if filter_weight_sum != 0.0 {
let invwt = 1.0 / filter_weight_sum;
rgb[start] = (rgb[start] * invwt).max(0.0);
rgb[start + 1] = (rgb[start + 1] * invwt).max(0.0);
rgb[start + 2] = (rgb[start + 2] * invwt).max(0.0);
}
// splate value at pixel
let splat_xyz: [Float; 3] = [
pixel.splat_xyz[0].clone().into(),
pixel.splat_xyz[1].clone().into(),
pixel.splat_xyz[2].clone().into()
];
let splat_rgb = xyz_to_rgb(splat_xyz);
rgb[start] += splat_scale * splat_rgb[0];
rgb[start + 1] += splat_scale * splat_rgb[1];
rgb[start + 2] += splat_scale * splat_rgb[2];
// Scale pixel value by scale
rgb[start] *= self.scale;
rgb[start + 1] *= self.scale;
rgb[start + 2] *= self.scale;
}
info!("Writing image {} with bounds {}", self.filename.display(), self.cropped_pixel_bounds);
// TODO: WriteImage
write_image(&self.filename, &rgb, &self.cropped_pixel_bounds, &self.full_resolution)
}
}
pub struct FilmTile<'a> {
pub pixel_bounds : Bounds2i,
filter_radius : Vector2f,
inv_filter_radius : Vector2f,
filter_table : &'a[Float],
filter_table_size : usize,
pixels : Vec<FilmTilePixel>,
max_sample_luminance: Float
}
impl<'a> FilmTile<'a> {
pub fn new(pixel_bounds: &Bounds2i, filter_radius: &Vector2f, filter_table: &'a[Float],
filter_table_size: usize, max_sample_luminance: Float) -> Self {
Self {
filter_table,
filter_table_size,
max_sample_luminance,
pixel_bounds: *pixel_bounds,
filter_radius: *filter_radius,
inv_filter_radius: Vector | let mut offset = 0;
let mut filter_table = [0.0; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH];
for y in 0..FILTER_TABLE_WIDTH {
for x in 0..FILTER_TABLE_WIDTH { | random_line_split |
film.rs |
}
pub struct Film {
pub full_resolution : Point2i,
pub diagonal : Float,
pub filter : Filters,
pub filename : PathBuf,
pub cropped_pixel_bounds: Bounds2i,
pixels : RwLock<Vec<Pixel>>,
filter_table : [Float; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH],
scale : Float,
max_sample_luminance : Float
}
impl Film {
pub fn new(resolution: &Point2i, crop_window: &Bounds2f, filt: Filters, diagonal: Float,
filename: PathBuf, scale: Float, max_sample_luminance: Float) -> Self {
let crop_pixel_bounds = Bounds2i::from_points(
&Point2i::new(
(resolution.x as Float * crop_window.p_min.x).ceil() as isize,
(resolution.y as Float * crop_window.p_min.y).ceil() as isize),
&Point2i::new(
(resolution.x as Float * crop_window.p_max.x).ceil() as isize,
(resolution.y as Float * crop_window.p_max.y).ceil() as isize
)
);
info!("Created film with full resolution {}\
. Crop window of {} -> croppedPixelBounds {}",
resolution, crop_window, crop_pixel_bounds);
// Allocate film image storage
let pixels = vec![Pixel::default(); crop_pixel_bounds.area() as usize];
// TODO: filmPixelMemory
// Precompute filter weight table
let mut offset = 0;
let mut filter_table = [0.0; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH];
for y in 0..FILTER_TABLE_WIDTH {
for x in 0..FILTER_TABLE_WIDTH {
let p = Point2f::new(
(x as Float + 0.5) * filt.radius().x / FILTER_TABLE_WIDTH as Float,
(y as Float + 0.5) * filt.radius().y / FILTER_TABLE_WIDTH as Float
);
filter_table[offset] = filt.evaluate(&p);
offset += 1;
}
}
Self {
full_resolution: *resolution,
diagonal: diagonal * 0.001,
filter: filt,
filename: filename.to_owned(),
scale,
max_sample_luminance,
cropped_pixel_bounds: crop_pixel_bounds,
pixels: RwLock::new(pixels),
filter_table
}
}
pub fn get_sample_bounds(&self) -> Bounds2i {
let p1 = (Point2f::from(self.cropped_pixel_bounds.p_min) +
Vector2f::new(0.5, 0.5) - self.filter.radius()).floor();
let p2 = (Point2f::from(self.cropped_pixel_bounds.p_max) -
Vector2f::new(0.5, 0.5) + self.filter.radius()).ceil();
Bounds2i::from_points(&Point2i::from(p1), &Point2i::from(p2))
}
pub fn get_physical_extent(&self) -> Bounds2f {
let aspect = self.full_resolution.y as Float / self.full_resolution.x as Float;
let x = (self.diagonal * self.diagonal / (1.0 + aspect * aspect)).sqrt();
let y = aspect * x;
Bounds2f::new(
&Point2f::new(-x / 2.0, -y / 2.0),
&Point2f::new(x / 2.0, y / 2.0)
)
}
pub fn get_film_tile(&self, sample_bounds: &Bounds2i) -> FilmTile {
// Bound image pixels that samples in sampleBounds contribute to
let half_pixel = Vector2f::new(0.5, 0.5);
let float_bounds = Bounds2f {
p_min: Point2f::from(sample_bounds.p_min),
p_max: Point2f::from(sample_bounds.p_max)
};
let p0f = (float_bounds.p_min - half_pixel - self.filter.radius()).ceil();
let p1f = (float_bounds.p_max - half_pixel + self.filter.radius()).floor();
let p0 = Point2i::from(p0f);
let p1 = Point2i::from(p1f) + Point2i::new(1, 1);
let tile_bounds = Bounds2i::from_points(&p0, &p1).intersect(&self.cropped_pixel_bounds);
FilmTile::new(&tile_bounds, &self.filter.radius(),
&self.filter_table, FILTER_TABLE_WIDTH, self.max_sample_luminance)
}
pub fn merge_film_tile(&self, tile: &mut FilmTile) {
// TODO: ProfilePhase
let mut pixels = self.pixels.write().unwrap();
info!("Merging film tile {}", tile.pixel_bounds);
for p in &tile.get_pixel_bounds() {
// Merge pixel into Film::pixels
let tile_pixel = tile.get_pixel(&p);
let offset = self.get_pixel(&p);
let merge_pixel = &mut pixels[offset];
let xyz = tile_pixel.contrib_sum.to_xyz();
for i in 0..3 {
merge_pixel.xyz[i] += xyz[i];
}
merge_pixel.filter_weight_sum += tile_pixel.filter_weight_sum;
}
}
fn get_pixel(&self, p: &Point2i) -> usize {
assert!(self.cropped_pixel_bounds.inside_exclusive(p));
let width = self.cropped_pixel_bounds.p_max.x - self.cropped_pixel_bounds.p_min.x;
let offset = (p.x - self.cropped_pixel_bounds.p_min.x) + (p.y - self.cropped_pixel_bounds.p_min.y) * width;
offset as usize
}
pub fn set_image(&self, img: &[Spectrum]) {
let npixels = self.cropped_pixel_bounds.area() as usize;
let mut pixels = self.pixels.write().unwrap();
for i in 0..npixels {
let p = &mut pixels[i];
p.xyz = img[i].to_xyz();
p.filter_weight_sum = 1.0;
p.splat_xyz[0] = AtomicFloat::new(0.0);
p.splat_xyz[1] = AtomicFloat::new(0.0);
p.splat_xyz[2] = AtomicFloat::new(0.0);
}
}
pub fn add_splat(&self, p: &Point2f, mut v: Spectrum) {
// TODO: ProfilePhase
if v.has_nans() {
error!("Ignoring splatted spectrum with NaN values at ({}, {})", p.x, p.y);
return;
} else if v.y() < 0.0 {
error!("Ignoring splatted spectrum with negative luminance {} at ({}, {})", v.y(), p.x, p.y);
return
} else if v.y().is_infinite() {
error!("Ignoring slatted spectrum with infinite luminance at ({}, {})", p.x, p.y);
return;
}
let pi = Point2i::from(p.floor());
if !self.cropped_pixel_bounds.inside_exclusive(&pi) { return; }
if v.y() > self.max_sample_luminance {
v *= self.max_sample_luminance / v.y();
}
let mut pixels = self.pixels.write().unwrap();
let xyz = v.to_xyz();
let offset = self.get_pixel(&pi);
let pixel = &mut pixels[offset];
for i in 0..3 {
pixel.splat_xyz[i].add(xyz[i]);
}
}
pub fn write_image(&self, splat_scale: Float) -> Result<()> {
// Convert image to RGB and compute final pixel values
info!("Converting image to RGB and computing final weighted pixel values");
let mut rgb = vec![0.0; (3 * self.cropped_pixel_bounds.area()) as usize];
let mut offset: usize;
for p in &self.cropped_pixel_bounds {
// Convert pixel XYZ color to RGB
offset = self.get_pixel(&p);
let pixel = &self.pixels.read().unwrap()[offset];
let start = offset * 3;
let xyz = xyz_to_rgb(pixel.xyz);
rgb[start] = xyz[0];
rgb[start + 1] = xyz[1];
rgb[start + 2] = xyz[2];
// Normalize pixel with weight sum
let filter_weight_sum = pixel.filter_weight_sum;
if filter_weight_sum != 0.0 {
let invwt = 1.0 / filter_weight_sum;
rgb[start] = (rgb[start] * invwt).max(0.0);
rgb[start + 1] = (rgb[start + 1] * invwt).max(0.0);
rgb[start + 2] = (rgb[start + 2] * invwt).max(0.0);
}
// splate value at pixel
let splat_xyz: [Float; 3] = [
pixel.splat_xyz[0].clone().into(),
pixel.splat_xyz[1].clone().into(),
pixel.splat_xyz[2 | {
Self {
xyz: [0.0; 3],
filter_weight_sum: 0.0,
splat_xyz: [AtomicFloat::default(), AtomicFloat::default(), AtomicFloat::default()],
_pad: 0.0
}
} | identifier_body | |
auth.rs | (AuthResult::Rejected(resp))
}
fn cookie_params(req: &RequestWrapper) -> &'static str {
if req.is_https() && get_config().is_cors_enabled(&req.request) {
"SameSite=None; Secure"
} else {
"SameSite=Lax"
}
}
impl Authenticator for SharedSecretAuthenticator {
type Credentials = ();
fn authenticate(&self, mut req: RequestWrapper) -> AuthFuture<()> {
// this is part where client can authenticate itself and get token
if req.method() == Method::POST && req.path() == "/authenticate" {
debug!("Authentication request");
let auth = self.secrets.clone();
return Box::pin(async move {
match req.body_bytes().await {
Err(e) => bail!(e),
Ok(b) => {
let content_type = req
.headers()
.get("Content-Type")
.and_then(|v| v.to_str().ok())
.map(|s| s.to_lowercase());
let params = if let Some(ct) = content_type {
if ct.starts_with("application/x-www-form-urlencoded") {
form_urlencoded::parse(b.as_ref())
.into_owned()
.collect::<HashMap<String, String>>()
} else if ct.starts_with("application/json") {
match serde_json::from_slice::<HashMap<String, String>>(&b) {
Ok(m) => m,
Err(e) => {
error!("Invalid JSON: {}", e);
return deny(&req);
}
}
} else {
error!("Invalid content type {}", ct);
return deny(&req);
}
} else {
error!("Content-Type header is missing");
return deny(&req);
};
if let Some(secret) = params.get("secret") {
debug!("Authenticating user");
if auth.auth_token_ok(secret) {
debug!("Authentication success");
let token = auth.new_auth_token();
let resp = Response::builder()
.typed_header(ContentType::text())
.typed_header(ContentLength(token.len() as u64))
.header(
SET_COOKIE,
format!(
"{}={}; Max-Age={}; {}",
COOKIE_NAME,
token,
get_config().token_validity_hours * 3600,
cookie_params(&req)
)
.as_str(),
);
Ok(AuthResult::LoggedIn(resp.body(token.into()).unwrap()))
} else {
error!(
"Invalid authentication: invalid shared secret, client: {:?}",
req.remote_addr()
);
// Let's not return failure immediately, because somebody is using wrong shared secret
// Legitimate user can wait a bit, but for brute force attack it can be advantage not to reply quickly
sleep(Duration::from_millis(500)).await;
deny(&req)
}
} else {
error!(
"Invalid authentication: missing shared secret, client: {:?}",
req.remote_addr()
);
deny(&req)
}
}
}
});
} else {
// And in this part we check token
let mut token = req
.headers()
.typed_get::<Authorization<Bearer>>()
.map(|a| a.0.token().to_owned());
if token.is_none() {
token = req
.headers()
.typed_get::<Cookie>()
.and_then(|c| c.get(COOKIE_NAME).map(borrow::ToOwned::to_owned));
}
if token.is_none() {
error!(
"Invalid access: missing token on path {}, client: {:?}",
req.path(),
req.remote_addr()
);
return Box::pin(future::ready(deny(&req)));
}
if !self.secrets.token_ok(&token.unwrap()) {
error!(
"Invalid access: invalid token on path {}, client: {:?}",
req.path(),
req.remote_addr()
);
return Box::pin(future::ready(deny(&req)));
}
}
// If everything is ok we return credentials (in this case they are just unit type) and we return back request
Box::pin(future::ok(AuthResult::Authenticated {
request: req,
credentials: (),
}))
}
}
impl Secrets {
fn auth_token_ok(&self, token: &str) -> bool {
let parts = token
.split('|')
.filter_map(|s| match BASE64.decode(s.as_bytes()) {
Ok(x) => Some(x),
Err(e) => {
error!(
"Invalid base64 in authentication token {} in string {}",
e, s
);
None
}
})
.collect::<Vec<_>>();
if parts.len() == 2 {
if parts[0].len() != 32 {
error!("Random salt must be 32 bytes");
return false;
}
let mut hash2 = self.shared_secret.clone().into_bytes();
let hash = &parts[1];
hash2.extend(&parts[0]);
let hash2 = digest(&SHA256, &hash2);
return hash2.as_ref() == &hash[..];
} else {
error!("Incorrectly formed login token - {} parts", parts.len())
}
false
}
fn new_auth_token(&self) -> String {
Token::new(self.token_validity_hours, &self.server_secret).into()
}
fn token_ok(&self, token: &str) -> bool {
match token.parse::<Token>() {
Ok(token) => token.is_valid(&self.server_secret),
Err(e) => {
warn!("Invalid token: {}", e);
false
}
}
}
}
#[derive(Clone, PartialEq, Debug)]
struct Token {
random: [u8; 32],
validity: [u8; 8],
signature: [u8; 32],
}
fn prepare_data(r: &[u8; 32], v: [u8; 8]) -> [u8; 40] {
let mut to_sign = [0u8; 40];
to_sign[0..32].copy_from_slice(&r[..]);
to_sign[32..40].copy_from_slice(&v[..]);
to_sign
}
fn now() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Invalid system time")
.as_secs()
}
impl Token {
fn new(token_validity_hours: u32, secret: &[u8]) -> Self {
let mut random = [0u8; 32];
let rng = SystemRandom::new();
rng.fill(&mut random)
.expect("Cannot generate random number");
let validity: u64 = now() + u64::from(token_validity_hours) * 3600;
let validity: [u8; 8] = validity.to_be_bytes();
let to_sign = prepare_data(&random, validity);
let key = hmac::Key::new(hmac::HMAC_SHA256, secret);
let sig = hmac::sign(&key, &to_sign);
let slice = sig.as_ref();
assert!(slice.len() == 32);
let mut signature = [0u8; 32];
signature.copy_from_slice(slice);
Token {
random,
validity,
signature,
}
}
fn is_valid(&self, secret: &[u8]) -> bool {
let key = hmac::Key::new(hmac::HMAC_SHA256, secret);
let data = prepare_data(&self.random, self.validity);
if hmac::verify(&key, &data, &self.signature).is_err() {
return false;
};
self.validity() > now()
}
fn validity(&self) -> u64 {
let ts: u64 = unsafe { ::std::mem::transmute_copy(&self.validity) };
u64::from_be(ts)
}
}
impl From<Token> for String {
fn from(token: Token) -> String {
let data = [&token.random[..], &token.validity[..], &token.signature[..]].concat();
BASE64.encode(&data)
}
}
#[derive(Error, Debug, PartialEq)]
enum TokenError {
#[error("Invalid token size")]
InvalidSize,
#[error("Invalid token encoding")]
InvalidEncoding(#[from] ::data_encoding::DecodeError),
}
impl ::std::str::FromStr for Token {
type Err = TokenError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = BASE64.decode(s.as_bytes())?;
if bytes.len() != 72 {
return Err(TokenError::InvalidSize);
};
let mut random = [0u8; 32];
let mut validity = [0u8; 8];
let mut signature = [0u8; 32];
random.copy_from_slice(&bytes[0..32]);
validity.copy_from_slice(&bytes[32..40]);
signature.copy_from_slice(&bytes[40..72]);
Ok(Token {
random,
validity,
signature,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::init::init_default_config;
use borrow::Cow;
use hyper::{Request, StatusCode};
#[test]
fn | test_token | identifier_name | |
auth.rs | _NAME).is_some())
.unwrap_or(false)
{
resp.headers_mut().append(
SET_COOKIE,
HeaderValue::from_str(&format!(
"{}=; Expires={}; {}",
COOKIE_NAME,
COOKIE_DELETE_DATE, | ); // unwrap is safe as we control
}
Ok(AuthResult::Rejected(resp))
}
fn cookie_params(req: &RequestWrapper) -> &'static str {
if req.is_https() && get_config().is_cors_enabled(&req.request) {
"SameSite=None; Secure"
} else {
"SameSite=Lax"
}
}
impl Authenticator for SharedSecretAuthenticator {
type Credentials = ();
fn authenticate(&self, mut req: RequestWrapper) -> AuthFuture<()> {
// this is part where client can authenticate itself and get token
if req.method() == Method::POST && req.path() == "/authenticate" {
debug!("Authentication request");
let auth = self.secrets.clone();
return Box::pin(async move {
match req.body_bytes().await {
Err(e) => bail!(e),
Ok(b) => {
let content_type = req
.headers()
.get("Content-Type")
.and_then(|v| v.to_str().ok())
.map(|s| s.to_lowercase());
let params = if let Some(ct) = content_type {
if ct.starts_with("application/x-www-form-urlencoded") {
form_urlencoded::parse(b.as_ref())
.into_owned()
.collect::<HashMap<String, String>>()
} else if ct.starts_with("application/json") {
match serde_json::from_slice::<HashMap<String, String>>(&b) {
Ok(m) => m,
Err(e) => {
error!("Invalid JSON: {}", e);
return deny(&req);
}
}
} else {
error!("Invalid content type {}", ct);
return deny(&req);
}
} else {
error!("Content-Type header is missing");
return deny(&req);
};
if let Some(secret) = params.get("secret") {
debug!("Authenticating user");
if auth.auth_token_ok(secret) {
debug!("Authentication success");
let token = auth.new_auth_token();
let resp = Response::builder()
.typed_header(ContentType::text())
.typed_header(ContentLength(token.len() as u64))
.header(
SET_COOKIE,
format!(
"{}={}; Max-Age={}; {}",
COOKIE_NAME,
token,
get_config().token_validity_hours * 3600,
cookie_params(&req)
)
.as_str(),
);
Ok(AuthResult::LoggedIn(resp.body(token.into()).unwrap()))
} else {
error!(
"Invalid authentication: invalid shared secret, client: {:?}",
req.remote_addr()
);
// Let's not return failure immediately, because somebody is using wrong shared secret
// Legitimate user can wait a bit, but for brute force attack it can be advantage not to reply quickly
sleep(Duration::from_millis(500)).await;
deny(&req)
}
} else {
error!(
"Invalid authentication: missing shared secret, client: {:?}",
req.remote_addr()
);
deny(&req)
}
}
}
});
} else {
// And in this part we check token
let mut token = req
.headers()
.typed_get::<Authorization<Bearer>>()
.map(|a| a.0.token().to_owned());
if token.is_none() {
token = req
.headers()
.typed_get::<Cookie>()
.and_then(|c| c.get(COOKIE_NAME).map(borrow::ToOwned::to_owned));
}
if token.is_none() {
error!(
"Invalid access: missing token on path {}, client: {:?}",
req.path(),
req.remote_addr()
);
return Box::pin(future::ready(deny(&req)));
}
if !self.secrets.token_ok(&token.unwrap()) {
error!(
"Invalid access: invalid token on path {}, client: {:?}",
req.path(),
req.remote_addr()
);
return Box::pin(future::ready(deny(&req)));
}
}
// If everything is ok we return credentials (in this case they are just unit type) and we return back request
Box::pin(future::ok(AuthResult::Authenticated {
request: req,
credentials: (),
}))
}
}
impl Secrets {
fn auth_token_ok(&self, token: &str) -> bool {
let parts = token
.split('|')
.filter_map(|s| match BASE64.decode(s.as_bytes()) {
Ok(x) => Some(x),
Err(e) => {
error!(
"Invalid base64 in authentication token {} in string {}",
e, s
);
None
}
})
.collect::<Vec<_>>();
if parts.len() == 2 {
if parts[0].len() != 32 {
error!("Random salt must be 32 bytes");
return false;
}
let mut hash2 = self.shared_secret.clone().into_bytes();
let hash = &parts[1];
hash2.extend(&parts[0]);
let hash2 = digest(&SHA256, &hash2);
return hash2.as_ref() == &hash[..];
} else {
error!("Incorrectly formed login token - {} parts", parts.len())
}
false
}
fn new_auth_token(&self) -> String {
Token::new(self.token_validity_hours, &self.server_secret).into()
}
fn token_ok(&self, token: &str) -> bool {
match token.parse::<Token>() {
Ok(token) => token.is_valid(&self.server_secret),
Err(e) => {
warn!("Invalid token: {}", e);
false
}
}
}
}
#[derive(Clone, PartialEq, Debug)]
struct Token {
random: [u8; 32],
validity: [u8; 8],
signature: [u8; 32],
}
fn prepare_data(r: &[u8; 32], v: [u8; 8]) -> [u8; 40] {
let mut to_sign = [0u8; 40];
to_sign[0..32].copy_from_slice(&r[..]);
to_sign[32..40].copy_from_slice(&v[..]);
to_sign
}
fn now() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Invalid system time")
.as_secs()
}
impl Token {
fn new(token_validity_hours: u32, secret: &[u8]) -> Self {
let mut random = [0u8; 32];
let rng = SystemRandom::new();
rng.fill(&mut random)
.expect("Cannot generate random number");
let validity: u64 = now() + u64::from(token_validity_hours) * 3600;
let validity: [u8; 8] = validity.to_be_bytes();
let to_sign = prepare_data(&random, validity);
let key = hmac::Key::new(hmac::HMAC_SHA256, secret);
let sig = hmac::sign(&key, &to_sign);
let slice = sig.as_ref();
assert!(slice.len() == 32);
let mut signature = [0u8; 32];
signature.copy_from_slice(slice);
Token {
random,
validity,
signature,
}
}
fn is_valid(&self, secret: &[u8]) -> bool {
let key = hmac::Key::new(hmac::HMAC_SHA256, secret);
let data = prepare_data(&self.random, self.validity);
if hmac::verify(&key, &data, &self.signature).is_err() {
return false;
};
self.validity() > now()
}
fn validity(&self) -> u64 {
let ts: u64 = unsafe { ::std::mem::transmute_copy(&self.validity) };
u64::from_be(ts)
}
}
impl From<Token> for String {
fn from(token: Token) -> String {
let data = [&token.random[..], &token.validity[..], &token.signature[..]].concat();
BASE64.encode(&data)
}
}
#[derive(Error, Debug, PartialEq)]
enum TokenError {
#[error("Invalid token size")]
InvalidSize,
#[error("Invalid token encoding")]
InvalidEncoding(#[from] ::data_encoding::DecodeError),
}
impl ::std::str::FromStr for Token {
type Err = TokenError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = BASE64.decode(s.as_bytes())?;
if bytes.len() != 72 {
return Err(TokenError::InvalidSize);
};
let mut random = [0u8; 32];
let mut validity = [0u8; 8];
let mut signature = [0u8; 32];
random.copy_from_slice(&bytes[0..32]);
validity.copy_from_slice(&bytes[32..40]);
signature | cookie_params(req)
))
.unwrap(), | random_line_split |
wixSelector.ts | ?: Connection
$wScope?: $WScope
itemId?: string
}) => SdkInstance | Array<SdkInstance> | null
$wFactory: (controllerId: string, getInstancesForRole: GetInstanceFunction, repeaterId?: string) => $W
flushOnReadyCallbacks: () => Promise<any>
onPageReady: (onReadyCallback: () => Promise<any>, controllerId: string) => void
create$wRepeaterScope(params: { compId: string; itemId: string }): $WScope
}
const resolveSelectorType = (rawSelector: string): 'role' | 'nickname' | 'type' => {
switch (rawSelector[0]) {
case '@':
return 'role'
case '#':
return 'nickname'
default:
return 'type'
}
}
export default function ({
modelsApi,
getSdkFactoryParams,
controllerEventsFactory,
sdkInstancesCache,
componentSdksManager,
logger,
bootstrapData,
}: {
modelsApi: ModelsAPI
getSdkFactoryParams: SdkFactoryParams['getSdkFactoryParams']
controllerEventsFactory: IControllerEvents
sdkInstancesCache: InstanceCacheFactory
componentSdksManager: ComponentSdksManager
logger: PlatformLogger
bootstrapData: BootstrapData
}) {
// Controls whether to queue or execute onReady callbacks.
let isFlushingOnReadyCallbacks = false
let onReadyCallbacks: { [controllerCompId: string]: Array<() => void> } = {}
const create$wRepeaterScope = ({ compId, itemId }: { compId: string; itemId: string }) => ({ type: EVENT_CONTEXT_SCOPE.COMPONENT_SCOPE, id: compId, compId, additionalData: { itemId } })
const create$wGlobalScope = () => ({ type: EVENT_CONTEXT_SCOPE.GLOBAL_SCOPE, additionalData: {} })
// self-exection function, allow caching the calculations of mapping all componentTypes to their sdkType
const resolveCompTypeForSdkType = (() => {
const _cache: Record<string, Record<string, boolean>> = {}
return (sdkType: string, compId: string) => {
const fromCache = () => {
const compType = modelsApi.getCompType(compId)
return compType && _cache[sdkType][compType] ? compType : null
}
if (_cache[sdkType]) {
return fromCache()
}
_cache[sdkType] = componentSdksManager.getSdkTypeToComponentTypes(sdkType).reduce(
(result, _compType) => ({
...result,
[_compType]: true,
}),
{} as Record<string, boolean>
)
return fromCache()
}
})()
const invokeControllerOnReady = (controllerCompId: string) => {
// It's possible to have a controller without an onReady Callback, for example wix code without any $w.onReady().
if (!onReadyCallbacks[controllerCompId]) {
return Promise.resolve()
}
const promises = onReadyCallbacks[controllerCompId].map((onReady) => onReady())
return promises
}
const flushOnReadyCallbacks = async () => {
await componentSdksManager.waitForSdksToLoad()
isFlushingOnReadyCallbacks = true
const onReadyPromise = Promise.all(_.flatMap(modelsApi.getControllers(), invokeControllerOnReady))
onReadyCallbacks = {}
return onReadyPromise
}
function getInstance({
controllerCompId,
compId,
connection,
compType,
role,
$wScope = create$wGlobalScope(),
itemId,
}: {
controllerCompId: string
compId: string
compType: string
connection?: Connection
role: string
$wScope?: $WScope
itemId?: string
}): SdkInstance | Array<SdkInstance> | null {
const compCacheParams: CompCacheParams = {
controllerCompId,
compId: getFullId(compId),
role,
itemId: itemId ?? getItemId(compId),
}
const instanceFromCache = sdkInstancesCache.getSdkInstance(compCacheParams)
if (instanceFromCache) {
return instanceFromCache
}
modelsApi.updateDisplayedIdPropsFromTemplate(compId)
const componentSdkFactory = componentSdksManager.getComponentSdkFactory(compType, { compId, role, controllerCompId })
if (!componentSdkFactory) {
return {}
}
const sdkFactoryParams = getSdkFactoryParams({
$wScope,
compId,
controllerCompId,
connection,
compType,
role,
getInstance,
create$w: () => create$w(controllerCompId),
})
const instance = componentSdkFactory(sdkFactoryParams)
sdkInstancesCache.setSdkInstance(compCacheParams, instance)
return instance
}
function queueOnReadyCallback(onReadyCallback: () => Promise<any>, controllerId: string) {
onReadyCallbacks[controllerId] = onReadyCallbacks[controllerId] || []
onReadyCallbacks[controllerId].push(onReadyCallback)
}
const createInstancesGetter = (controllerId: string): GetInstanceFunction => (role: string) => {
const connections = modelsApi.getConnectionsByCompId(controllerId, role)
return connections.map((connection: Connection) => {
const compId = connection.compId
const compType = modelsApi.getCompType(compId)
if (!compType) {
logger.captureError(new Error('$W Error 2: Failed to find component from connection in structure'), {
tags: {
GetInstanceFunction: true,
},
extra: {
controllerCompId: controllerId,
role,
compId,
structureModel: modelsApi.getStructureModel(),
connection,
currentPageId: bootstrapData.currentPageId,
currentContextId: bootstrapData.currentContextId,
},
})
return {}
}
return getInstance({
controllerCompId: controllerId,
compId,
connection,
role,
compType,
})
})
}
const $wDocument = (controllerId: string) => {
const DocumentSdkFactory = componentSdksManager.getComponentSdkFactory('Document', { compId: 'Document', controllerCompId: controllerId, role: 'Document' })
if (!DocumentSdkFactory) {
return
}
return DocumentSdkFactory(
getSdkFactoryParams({
compId: controllerId,
controllerCompId: controllerId,
compType: 'Document',
role: 'Document',
getInstance,
create$w: () => create$w(controllerId),
$wScope: create$wGlobalScope(),
})
)
}
const $wComponent = (
selector: string,
controllerId: string,
{ getInstancesForRole, findOnlyNestedComponents }: { getInstancesForRole: GetInstanceFunction; findOnlyNestedComponents: boolean }
) => {
const getInstancesForType = (sdkType: string, connections: Array<Connection>): Array<SdkInstance> => {
return connections.reduce((instances, connection) => {
const { compId, role } = connection
const compType = resolveCompTypeForSdkType(sdkType, compId)
if (!compType) {
return instances
}
const instance: SdkInstance | Array<SdkInstance> | null = getInstance({
controllerCompId: controllerId,
compId,
connection, | })
if (_.isArray(instance)) {
instances.push(...instance)
} else if (instance) {
instances.push(instance)
}
return instances
}, [] as Array<SdkInstance>)
}
const getComponentInstances = (slctr: string): Array<SdkInstance> => {
if (resolveSelectorType(slctr) === 'type') {
const connections = _.flatMap(Object.values(modelsApi.getControllerConnections(controllerId)))
return getInstancesForType(slctr, connections)
}
const roleOrId = slctr.slice(1)
return getInstancesForRole(roleOrId, findOnlyNestedComponents)
}
const selectors = selector.split(',').map((s) => s.trim())
const instances = _.chain(selectors)
.map(getComponentInstances)
.flatMap()
.uniqBy('uniqueId') // all SdkInstance have id
.value()
if (selectors.length === 1 && resolveSelectorType(selector) === 'nickname') {
return _.first(instances) || []
}
return instancesObjectFactory(instances)
}
const $wFactory: WixSelector['$wFactory'] = (controllerId: string, getInstancesForRole, repeaterId): $W => {
const wixSelectorInternal = (selector: string, { findOnlyNestedComponents } = { findOnlyNestedComponents: false }) => {
if (selector === 'Document') {
return $wDocument(controllerId)
}
return $wComponent(selector, controllerId, { getInstancesForRole, findOnlyNestedComponents })
}
const $w = (selector: string) => wixSelectorInternal(selector)
const controllerEvents | role,
compType, | random_line_split |
wixSelector.ts | Connection
$wScope?: $WScope
itemId?: string
}) => SdkInstance | Array<SdkInstance> | null
$wFactory: (controllerId: string, getInstancesForRole: GetInstanceFunction, repeaterId?: string) => $W
flushOnReadyCallbacks: () => Promise<any>
onPageReady: (onReadyCallback: () => Promise<any>, controllerId: string) => void
create$wRepeaterScope(params: { compId: string; itemId: string }): $WScope
}
const resolveSelectorType = (rawSelector: string): 'role' | 'nickname' | 'type' => {
switch (rawSelector[0]) {
case '@':
return 'role'
case '#':
return 'nickname'
default:
return 'type'
}
}
export default function ({
modelsApi,
getSdkFactoryParams,
controllerEventsFactory,
sdkInstancesCache,
componentSdksManager,
logger,
bootstrapData,
}: {
modelsApi: ModelsAPI
getSdkFactoryParams: SdkFactoryParams['getSdkFactoryParams']
controllerEventsFactory: IControllerEvents
sdkInstancesCache: InstanceCacheFactory
componentSdksManager: ComponentSdksManager
logger: PlatformLogger
bootstrapData: BootstrapData
}) {
// Controls whether to queue or execute onReady callbacks.
let isFlushingOnReadyCallbacks = false
let onReadyCallbacks: { [controllerCompId: string]: Array<() => void> } = {}
const create$wRepeaterScope = ({ compId, itemId }: { compId: string; itemId: string }) => ({ type: EVENT_CONTEXT_SCOPE.COMPONENT_SCOPE, id: compId, compId, additionalData: { itemId } })
const create$wGlobalScope = () => ({ type: EVENT_CONTEXT_SCOPE.GLOBAL_SCOPE, additionalData: {} })
// self-exection function, allow caching the calculations of mapping all componentTypes to their sdkType
const resolveCompTypeForSdkType = (() => {
const _cache: Record<string, Record<string, boolean>> = {}
return (sdkType: string, compId: string) => {
const fromCache = () => {
const compType = modelsApi.getCompType(compId)
return compType && _cache[sdkType][compType] ? compType : null
}
if (_cache[sdkType]) {
return fromCache()
}
_cache[sdkType] = componentSdksManager.getSdkTypeToComponentTypes(sdkType).reduce(
(result, _compType) => ({
...result,
[_compType]: true,
}),
{} as Record<string, boolean>
)
return fromCache()
}
})()
const invokeControllerOnReady = (controllerCompId: string) => {
// It's possible to have a controller without an onReady Callback, for example wix code without any $w.onReady().
if (!onReadyCallbacks[controllerCompId]) {
return Promise.resolve()
}
const promises = onReadyCallbacks[controllerCompId].map((onReady) => onReady())
return promises
}
const flushOnReadyCallbacks = async () => {
await componentSdksManager.waitForSdksToLoad()
isFlushingOnReadyCallbacks = true
const onReadyPromise = Promise.all(_.flatMap(modelsApi.getControllers(), invokeControllerOnReady))
onReadyCallbacks = {}
return onReadyPromise
}
function getInstance({
controllerCompId,
compId,
connection,
compType,
role,
$wScope = create$wGlobalScope(),
itemId,
}: {
controllerCompId: string
compId: string
compType: string
connection?: Connection
role: string
$wScope?: $WScope
itemId?: string
}): SdkInstance | Array<SdkInstance> | null {
const compCacheParams: CompCacheParams = {
controllerCompId,
compId: getFullId(compId),
role,
itemId: itemId ?? getItemId(compId),
}
const instanceFromCache = sdkInstancesCache.getSdkInstance(compCacheParams)
if (instanceFromCache) {
return instanceFromCache
}
modelsApi.updateDisplayedIdPropsFromTemplate(compId)
const componentSdkFactory = componentSdksManager.getComponentSdkFactory(compType, { compId, role, controllerCompId })
if (!componentSdkFactory) {
return {}
}
const sdkFactoryParams = getSdkFactoryParams({
$wScope,
compId,
controllerCompId,
connection,
compType,
role,
getInstance,
create$w: () => create$w(controllerCompId),
})
const instance = componentSdkFactory(sdkFactoryParams)
sdkInstancesCache.setSdkInstance(compCacheParams, instance)
return instance
}
function queueOnReadyCallback(onReadyCallback: () => Promise<any>, controllerId: string) {
onReadyCallbacks[controllerId] = onReadyCallbacks[controllerId] || []
onReadyCallbacks[controllerId].push(onReadyCallback)
}
const createInstancesGetter = (controllerId: string): GetInstanceFunction => (role: string) => {
const connections = modelsApi.getConnectionsByCompId(controllerId, role)
return connections.map((connection: Connection) => {
const compId = connection.compId
const compType = modelsApi.getCompType(compId)
if (!compType) {
logger.captureError(new Error('$W Error 2: Failed to find component from connection in structure'), {
tags: {
GetInstanceFunction: true,
},
extra: {
controllerCompId: controllerId,
role,
compId,
structureModel: modelsApi.getStructureModel(),
connection,
currentPageId: bootstrapData.currentPageId,
currentContextId: bootstrapData.currentContextId,
},
})
return {}
}
return getInstance({
controllerCompId: controllerId,
compId,
connection,
role,
compType,
})
})
}
const $wDocument = (controllerId: string) => {
const DocumentSdkFactory = componentSdksManager.getComponentSdkFactory('Document', { compId: 'Document', controllerCompId: controllerId, role: 'Document' })
if (!DocumentSdkFactory) {
return
}
return DocumentSdkFactory(
getSdkFactoryParams({
compId: controllerId,
controllerCompId: controllerId,
compType: 'Document',
role: 'Document',
getInstance,
create$w: () => create$w(controllerId),
$wScope: create$wGlobalScope(),
})
)
}
const $wComponent = (
selector: string,
controllerId: string,
{ getInstancesForRole, findOnlyNestedComponents }: { getInstancesForRole: GetInstanceFunction; findOnlyNestedComponents: boolean }
) => {
const getInstancesForType = (sdkType: string, connections: Array<Connection>): Array<SdkInstance> => {
return connections.reduce((instances, connection) => {
const { compId, role } = connection
const compType = resolveCompTypeForSdkType(sdkType, compId)
if (!compType) {
return instances
}
const instance: SdkInstance | Array<SdkInstance> | null = getInstance({
controllerCompId: controllerId,
compId,
connection,
role,
compType,
})
if (_.isArray(instance)) {
instances.push(...instance)
} else if (instance) {
instances.push(instance)
}
return instances
}, [] as Array<SdkInstance>)
}
const getComponentInstances = (slctr: string): Array<SdkInstance> => {
if (resolveSelectorType(slctr) === 'type') {
const connections = _.flatMap(Object.values(modelsApi.getControllerConnections(controllerId)))
return getInstancesForType(slctr, connections)
}
const roleOrId = slctr.slice(1)
return getInstancesForRole(roleOrId, findOnlyNestedComponents)
}
const selectors = selector.split(',').map((s) => s.trim())
const instances = _.chain(selectors)
.map(getComponentInstances)
.flatMap()
.uniqBy('uniqueId') // all SdkInstance have id
.value()
if (selectors.length === 1 && resolveSelectorType(selector) === 'nickname') |
return instancesObjectFactory(instances)
}
const $wFactory: WixSelector['$wFactory'] = (controllerId: string, getInstancesForRole, repeaterId): $W => {
const wixSelectorInternal = (selector: string, { findOnlyNestedComponents } = { findOnlyNestedComponents: false }) => {
if (selector === 'Document') {
return $wDocument(controllerId)
}
return $wComponent(selector, controllerId, { getInstancesForRole, findOnlyNestedComponents })
}
const $w = (selector: string) => wixSelectorInternal(selector)
const controller | {
return _.first(instances) || []
} | conditional_block |
wixSelector.ts | Connection
$wScope?: $WScope
itemId?: string
}) => SdkInstance | Array<SdkInstance> | null
$wFactory: (controllerId: string, getInstancesForRole: GetInstanceFunction, repeaterId?: string) => $W
flushOnReadyCallbacks: () => Promise<any>
onPageReady: (onReadyCallback: () => Promise<any>, controllerId: string) => void
create$wRepeaterScope(params: { compId: string; itemId: string }): $WScope
}
const resolveSelectorType = (rawSelector: string): 'role' | 'nickname' | 'type' => {
switch (rawSelector[0]) {
case '@':
return 'role'
case '#':
return 'nickname'
default:
return 'type'
}
}
export default function ({
modelsApi,
getSdkFactoryParams,
controllerEventsFactory,
sdkInstancesCache,
componentSdksManager,
logger,
bootstrapData,
}: {
modelsApi: ModelsAPI
getSdkFactoryParams: SdkFactoryParams['getSdkFactoryParams']
controllerEventsFactory: IControllerEvents
sdkInstancesCache: InstanceCacheFactory
componentSdksManager: ComponentSdksManager
logger: PlatformLogger
bootstrapData: BootstrapData
}) {
// Controls whether to queue or execute onReady callbacks.
let isFlushingOnReadyCallbacks = false
let onReadyCallbacks: { [controllerCompId: string]: Array<() => void> } = {}
const create$wRepeaterScope = ({ compId, itemId }: { compId: string; itemId: string }) => ({ type: EVENT_CONTEXT_SCOPE.COMPONENT_SCOPE, id: compId, compId, additionalData: { itemId } })
const create$wGlobalScope = () => ({ type: EVENT_CONTEXT_SCOPE.GLOBAL_SCOPE, additionalData: {} })
// self-exection function, allow caching the calculations of mapping all componentTypes to their sdkType
const resolveCompTypeForSdkType = (() => {
const _cache: Record<string, Record<string, boolean>> = {}
return (sdkType: string, compId: string) => {
const fromCache = () => {
const compType = modelsApi.getCompType(compId)
return compType && _cache[sdkType][compType] ? compType : null
}
if (_cache[sdkType]) {
return fromCache()
}
_cache[sdkType] = componentSdksManager.getSdkTypeToComponentTypes(sdkType).reduce(
(result, _compType) => ({
...result,
[_compType]: true,
}),
{} as Record<string, boolean>
)
return fromCache()
}
})()
const invokeControllerOnReady = (controllerCompId: string) => {
// It's possible to have a controller without an onReady Callback, for example wix code without any $w.onReady().
if (!onReadyCallbacks[controllerCompId]) {
return Promise.resolve()
}
const promises = onReadyCallbacks[controllerCompId].map((onReady) => onReady())
return promises
}
const flushOnReadyCallbacks = async () => {
await componentSdksManager.waitForSdksToLoad()
isFlushingOnReadyCallbacks = true
const onReadyPromise = Promise.all(_.flatMap(modelsApi.getControllers(), invokeControllerOnReady))
onReadyCallbacks = {}
return onReadyPromise
}
function getInstance({
controllerCompId,
compId,
connection,
compType,
role,
$wScope = create$wGlobalScope(),
itemId,
}: {
controllerCompId: string
compId: string
compType: string
connection?: Connection
role: string
$wScope?: $WScope
itemId?: string
}): SdkInstance | Array<SdkInstance> | null {
const compCacheParams: CompCacheParams = {
controllerCompId,
compId: getFullId(compId),
role,
itemId: itemId ?? getItemId(compId),
}
const instanceFromCache = sdkInstancesCache.getSdkInstance(compCacheParams)
if (instanceFromCache) {
return instanceFromCache
}
modelsApi.updateDisplayedIdPropsFromTemplate(compId)
const componentSdkFactory = componentSdksManager.getComponentSdkFactory(compType, { compId, role, controllerCompId })
if (!componentSdkFactory) {
return {}
}
const sdkFactoryParams = getSdkFactoryParams({
$wScope,
compId,
controllerCompId,
connection,
compType,
role,
getInstance,
create$w: () => create$w(controllerCompId),
})
const instance = componentSdkFactory(sdkFactoryParams)
sdkInstancesCache.setSdkInstance(compCacheParams, instance)
return instance
}
function queueOnReadyCallback(onReadyCallback: () => Promise<any>, controllerId: string) |
const createInstancesGetter = (controllerId: string): GetInstanceFunction => (role: string) => {
const connections = modelsApi.getConnectionsByCompId(controllerId, role)
return connections.map((connection: Connection) => {
const compId = connection.compId
const compType = modelsApi.getCompType(compId)
if (!compType) {
logger.captureError(new Error('$W Error 2: Failed to find component from connection in structure'), {
tags: {
GetInstanceFunction: true,
},
extra: {
controllerCompId: controllerId,
role,
compId,
structureModel: modelsApi.getStructureModel(),
connection,
currentPageId: bootstrapData.currentPageId,
currentContextId: bootstrapData.currentContextId,
},
})
return {}
}
return getInstance({
controllerCompId: controllerId,
compId,
connection,
role,
compType,
})
})
}
const $wDocument = (controllerId: string) => {
const DocumentSdkFactory = componentSdksManager.getComponentSdkFactory('Document', { compId: 'Document', controllerCompId: controllerId, role: 'Document' })
if (!DocumentSdkFactory) {
return
}
return DocumentSdkFactory(
getSdkFactoryParams({
compId: controllerId,
controllerCompId: controllerId,
compType: 'Document',
role: 'Document',
getInstance,
create$w: () => create$w(controllerId),
$wScope: create$wGlobalScope(),
})
)
}
const $wComponent = (
selector: string,
controllerId: string,
{ getInstancesForRole, findOnlyNestedComponents }: { getInstancesForRole: GetInstanceFunction; findOnlyNestedComponents: boolean }
) => {
const getInstancesForType = (sdkType: string, connections: Array<Connection>): Array<SdkInstance> => {
return connections.reduce((instances, connection) => {
const { compId, role } = connection
const compType = resolveCompTypeForSdkType(sdkType, compId)
if (!compType) {
return instances
}
const instance: SdkInstance | Array<SdkInstance> | null = getInstance({
controllerCompId: controllerId,
compId,
connection,
role,
compType,
})
if (_.isArray(instance)) {
instances.push(...instance)
} else if (instance) {
instances.push(instance)
}
return instances
}, [] as Array<SdkInstance>)
}
const getComponentInstances = (slctr: string): Array<SdkInstance> => {
if (resolveSelectorType(slctr) === 'type') {
const connections = _.flatMap(Object.values(modelsApi.getControllerConnections(controllerId)))
return getInstancesForType(slctr, connections)
}
const roleOrId = slctr.slice(1)
return getInstancesForRole(roleOrId, findOnlyNestedComponents)
}
const selectors = selector.split(',').map((s) => s.trim())
const instances = _.chain(selectors)
.map(getComponentInstances)
.flatMap()
.uniqBy('uniqueId') // all SdkInstance have id
.value()
if (selectors.length === 1 && resolveSelectorType(selector) === 'nickname') {
return _.first(instances) || []
}
return instancesObjectFactory(instances)
}
const $wFactory: WixSelector['$wFactory'] = (controllerId: string, getInstancesForRole, repeaterId): $W => {
const wixSelectorInternal = (selector: string, { findOnlyNestedComponents } = { findOnlyNestedComponents: false }) => {
if (selector === 'Document') {
return $wDocument(controllerId)
}
return $wComponent(selector, controllerId, { getInstancesForRole, findOnlyNestedComponents })
}
const $w = (selector: string) => wixSelectorInternal(selector)
const controller | {
onReadyCallbacks[controllerId] = onReadyCallbacks[controllerId] || []
onReadyCallbacks[controllerId].push(onReadyCallback)
} | identifier_body |
wixSelector.ts | ?: Connection
$wScope?: $WScope
itemId?: string
}) => SdkInstance | Array<SdkInstance> | null
$wFactory: (controllerId: string, getInstancesForRole: GetInstanceFunction, repeaterId?: string) => $W
flushOnReadyCallbacks: () => Promise<any>
onPageReady: (onReadyCallback: () => Promise<any>, controllerId: string) => void
create$wRepeaterScope(params: { compId: string; itemId: string }): $WScope
}
const resolveSelectorType = (rawSelector: string): 'role' | 'nickname' | 'type' => {
switch (rawSelector[0]) {
case '@':
return 'role'
case '#':
return 'nickname'
default:
return 'type'
}
}
export default function ({
modelsApi,
getSdkFactoryParams,
controllerEventsFactory,
sdkInstancesCache,
componentSdksManager,
logger,
bootstrapData,
}: {
modelsApi: ModelsAPI
getSdkFactoryParams: SdkFactoryParams['getSdkFactoryParams']
controllerEventsFactory: IControllerEvents
sdkInstancesCache: InstanceCacheFactory
componentSdksManager: ComponentSdksManager
logger: PlatformLogger
bootstrapData: BootstrapData
}) {
// Controls whether to queue or execute onReady callbacks.
let isFlushingOnReadyCallbacks = false
let onReadyCallbacks: { [controllerCompId: string]: Array<() => void> } = {}
const create$wRepeaterScope = ({ compId, itemId }: { compId: string; itemId: string }) => ({ type: EVENT_CONTEXT_SCOPE.COMPONENT_SCOPE, id: compId, compId, additionalData: { itemId } })
const create$wGlobalScope = () => ({ type: EVENT_CONTEXT_SCOPE.GLOBAL_SCOPE, additionalData: {} })
// self-exection function, allow caching the calculations of mapping all componentTypes to their sdkType
const resolveCompTypeForSdkType = (() => {
const _cache: Record<string, Record<string, boolean>> = {}
return (sdkType: string, compId: string) => {
const fromCache = () => {
const compType = modelsApi.getCompType(compId)
return compType && _cache[sdkType][compType] ? compType : null
}
if (_cache[sdkType]) {
return fromCache()
}
_cache[sdkType] = componentSdksManager.getSdkTypeToComponentTypes(sdkType).reduce(
(result, _compType) => ({
...result,
[_compType]: true,
}),
{} as Record<string, boolean>
)
return fromCache()
}
})()
const invokeControllerOnReady = (controllerCompId: string) => {
// It's possible to have a controller without an onReady Callback, for example wix code without any $w.onReady().
if (!onReadyCallbacks[controllerCompId]) {
return Promise.resolve()
}
const promises = onReadyCallbacks[controllerCompId].map((onReady) => onReady())
return promises
}
const flushOnReadyCallbacks = async () => {
await componentSdksManager.waitForSdksToLoad()
isFlushingOnReadyCallbacks = true
const onReadyPromise = Promise.all(_.flatMap(modelsApi.getControllers(), invokeControllerOnReady))
onReadyCallbacks = {}
return onReadyPromise
}
function getInstance({
controllerCompId,
compId,
connection,
compType,
role,
$wScope = create$wGlobalScope(),
itemId,
}: {
controllerCompId: string
compId: string
compType: string
connection?: Connection
role: string
$wScope?: $WScope
itemId?: string
}): SdkInstance | Array<SdkInstance> | null {
const compCacheParams: CompCacheParams = {
controllerCompId,
compId: getFullId(compId),
role,
itemId: itemId ?? getItemId(compId),
}
const instanceFromCache = sdkInstancesCache.getSdkInstance(compCacheParams)
if (instanceFromCache) {
return instanceFromCache
}
modelsApi.updateDisplayedIdPropsFromTemplate(compId)
const componentSdkFactory = componentSdksManager.getComponentSdkFactory(compType, { compId, role, controllerCompId })
if (!componentSdkFactory) {
return {}
}
const sdkFactoryParams = getSdkFactoryParams({
$wScope,
compId,
controllerCompId,
connection,
compType,
role,
getInstance,
create$w: () => create$w(controllerCompId),
})
const instance = componentSdkFactory(sdkFactoryParams)
sdkInstancesCache.setSdkInstance(compCacheParams, instance)
return instance
}
function | (onReadyCallback: () => Promise<any>, controllerId: string) {
onReadyCallbacks[controllerId] = onReadyCallbacks[controllerId] || []
onReadyCallbacks[controllerId].push(onReadyCallback)
}
const createInstancesGetter = (controllerId: string): GetInstanceFunction => (role: string) => {
const connections = modelsApi.getConnectionsByCompId(controllerId, role)
return connections.map((connection: Connection) => {
const compId = connection.compId
const compType = modelsApi.getCompType(compId)
if (!compType) {
logger.captureError(new Error('$W Error 2: Failed to find component from connection in structure'), {
tags: {
GetInstanceFunction: true,
},
extra: {
controllerCompId: controllerId,
role,
compId,
structureModel: modelsApi.getStructureModel(),
connection,
currentPageId: bootstrapData.currentPageId,
currentContextId: bootstrapData.currentContextId,
},
})
return {}
}
return getInstance({
controllerCompId: controllerId,
compId,
connection,
role,
compType,
})
})
}
const $wDocument = (controllerId: string) => {
const DocumentSdkFactory = componentSdksManager.getComponentSdkFactory('Document', { compId: 'Document', controllerCompId: controllerId, role: 'Document' })
if (!DocumentSdkFactory) {
return
}
return DocumentSdkFactory(
getSdkFactoryParams({
compId: controllerId,
controllerCompId: controllerId,
compType: 'Document',
role: 'Document',
getInstance,
create$w: () => create$w(controllerId),
$wScope: create$wGlobalScope(),
})
)
}
const $wComponent = (
selector: string,
controllerId: string,
{ getInstancesForRole, findOnlyNestedComponents }: { getInstancesForRole: GetInstanceFunction; findOnlyNestedComponents: boolean }
) => {
const getInstancesForType = (sdkType: string, connections: Array<Connection>): Array<SdkInstance> => {
return connections.reduce((instances, connection) => {
const { compId, role } = connection
const compType = resolveCompTypeForSdkType(sdkType, compId)
if (!compType) {
return instances
}
const instance: SdkInstance | Array<SdkInstance> | null = getInstance({
controllerCompId: controllerId,
compId,
connection,
role,
compType,
})
if (_.isArray(instance)) {
instances.push(...instance)
} else if (instance) {
instances.push(instance)
}
return instances
}, [] as Array<SdkInstance>)
}
const getComponentInstances = (slctr: string): Array<SdkInstance> => {
if (resolveSelectorType(slctr) === 'type') {
const connections = _.flatMap(Object.values(modelsApi.getControllerConnections(controllerId)))
return getInstancesForType(slctr, connections)
}
const roleOrId = slctr.slice(1)
return getInstancesForRole(roleOrId, findOnlyNestedComponents)
}
const selectors = selector.split(',').map((s) => s.trim())
const instances = _.chain(selectors)
.map(getComponentInstances)
.flatMap()
.uniqBy('uniqueId') // all SdkInstance have id
.value()
if (selectors.length === 1 && resolveSelectorType(selector) === 'nickname') {
return _.first(instances) || []
}
return instancesObjectFactory(instances)
}
const $wFactory: WixSelector['$wFactory'] = (controllerId: string, getInstancesForRole, repeaterId): $W => {
const wixSelectorInternal = (selector: string, { findOnlyNestedComponents } = { findOnlyNestedComponents: false }) => {
if (selector === 'Document') {
return $wDocument(controllerId)
}
return $wComponent(selector, controllerId, { getInstancesForRole, findOnlyNestedComponents })
}
const $w = (selector: string) => wixSelectorInternal(selector)
const controller | queueOnReadyCallback | identifier_name |
train.py | parser
def program_config(parser):
# ------ Add new params here ------>
parser.add_argument('--max_seq_len', default=cfg.max_seq_len, type=int)
parser.add_argument('--test_ratio', default=cfg.test_ratio, type=float)
parser.add_argument('--hidden_dim', default=cfg.hidden_dim, type=int)
parser.add_argument('--batch_size', default=cfg.batch_size, type=int)
parser.add_argument('--num_epochs', default=cfg.num_epochs, type=int)
parser.add_argument('--check_interval', default=cfg.check_interval, type=int)
parser.add_argument('--lr', default=cfg.lr, type=float)
parser.add_argument('--sch_factor', default=cfg.sch_factor, type=float)
parser.add_argument('--sch_patience', default=cfg.sch_patience, type=int)
parser.add_argument('--sch_verbose', default=cfg.sch_verbose, type=bool)
parser.add_argument('--device', default=cfg.device, type=str)
parser.add_argument('--emb_model_dir', default=cfg.emb_model_dir, type=str)
parser.add_argument('--lyrics_dir', default=cfg.lyrics_dir, type=str)
parser.add_argument('--pretrained_lm_dir', default=cfg.pretrained_lm_dir, type=str)
parser.add_argument('--save_lm_dir', default=cfg.save_lm_dir, type=str)
parser.add_argument('--save_tr_l_dir', default=cfg.save_tr_l_dir, type=str)
parser.add_argument('--save_tr_a_dir', default=cfg.save_tr_a_dir, type=str)
parser.add_argument('--save_tst_l_dir', default=cfg.save_tst_l_dir, type=str)
parser.add_argument('--save_tst_a_dir', default=cfg.save_tst_a_dir, type=str)
parser.add_argument('--save_log_dir', default=cfg.save_log_dir, type=str)
return parser
# Define training method
def train_dis_epoch(epoch, model, train_loader, criterion, optimizer):
train_losses, train_accs = [], []
total_loss, total_acc = 0, 0
model.train()
for i, (feature, target) in enumerate(train_loader):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_acc += 100 * torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
if (i + 1) % cfg.check_interval == 0:
train_losses.append(total_loss / (i + 1))
train_accs.append(total_acc / (i + 1))
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
return train_losses, train_accs
# Define testing method
def test(model, test_loader, criterion):
total_loss, total_acc = 0, 0
model.eval()
with torch.no_grad():
for feature, target in tqdm(test_loader, desc='Test'):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
total_loss += loss.item()
total_acc += 100 * \
torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
return total_loss / len(test_loader), total_acc / len(test_loader)
# Main
if __name__ == '__main__':
# Hyper parameters and configs
parser = argparse.ArgumentParser()
parser = program_config(parser)
opt = parser.parse_args()
cfg.init_param(opt)
# Get word2vec dict with embedding model
cfg.logger.info('Loading embedding model.')
wv_dict = Word2VecDict(Word2Vec.load(cfg.emb_model_dir))
# Load lyrics data, then delete any lyric whose length's greater than max_seq_len
cfg.logger.info('Loading lyrics data.')
with open(cfg.lyrics_dir, 'r') as f:
lyrics_dict = f.read()
lyrics_dict = json.loads(lyrics_dict)
data = []
for key, val in tqdm(lyrics_dict.items()): # val is a batch
cur_seq_len = len(val)
if cur_seq_len <= cfg.max_seq_len:
data.append(val)
# Uncomment this part to train the partial dataset
# data = data[:100]
| data_train = data[:num_train]
data_test = data[num_train:]
# Torch dataset and dataloader
train_dataset = LanguageDataset(data_train, wv_dict, padding, cfg.max_seq_len)
train_loader = DataLoader(dataset=train_dataset, batch_size=cfg.batch_size, shuffle=False)
if cfg.test_ratio > 0:
test_dataset = LanguageDataset(data_test, wv_dict, padding, cfg.max_seq_len)
test_loader = DataLoader(dataset=test_dataset, batch_size=cfg.batch_size, shuffle=False)
vocab_size = len(wv_dict.emb_model.wv.vocab) + 1
# Uncomment this part to check the validity of the dataloader
# for minibatch in train_loader:
# features, targets = minibatch
# print(features.size(), targets.size())
# for i, (f, t) in enumerate(zip(features, targets)): # minibatch (one lyric)
# for (wv_f, idx_t) in zip(f, t): # word vector of feature, index of target
# print(wv_dict.index2word(wv_dict.vector2index(wv_f.numpy())), wv_dict.index2word(int(idx_t.item())))
# Print basic info
cfg.logger.debug('Number of lyrics (Valid / Total): {} / {}'.format(len(data), len(lyrics_dict)))
cfg.logger.debug('Training / testing size: {} / {}'.format(len(data_train), len(data_test)))
cfg.logger.debug('Testing set ratio: {}'.format(cfg.test_ratio))
cfg.logger.debug('Total vocabulary size including paddings: {}'.format(vocab_size))
cfg.logger.debug('Max sequence length: {}'.format(cfg.max_seq_len))
cfg.logger.debug('Hidden dimension: {}'.format(cfg.hidden_dim))
cfg.logger.debug('Batch size: {}'.format(cfg.batch_size))
cfg.logger.debug('Total epochs: {}'.format(cfg.num_epochs))
cfg.logger.debug('Intervals to check: {}'.format(cfg.check_interval))
cfg.logger.debug('Learning rate: {}'.format(cfg.lr))
cfg.logger.debug('Schedular factor: {}'.format(cfg.sch_factor))
cfg.logger.debug('Schedular patience: {}'.format(cfg.sch_patience))
cfg.logger.debug('Schedular verbose: {}'.format(cfg.sch_verbose))
cfg.logger.debug('Device: {}'.format(cfg.device))
cfg.logger.debug('Embedding model directory: {}'.format(cfg.emb_model_dir))
cfg.logger.debug('Lyrics data directory: {}'.format(cfg.lyrics_dir))
if cfg.pretrained_lm_dir:
cfg.logger.debug('Pre-trained language model: {}'.format(cfg.pretrained_lm_dir))
else:
cfg.logger.debug('Pre-trained language model: initial training')
# Training
language_model = LanguageModel(wv_dict, cfg.hidden_dim).to(cfg.device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(language_model.parameters(), lr=cfg.lr)
schedular = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=cfg.sch_factor, patience=cfg.sch_patience, verbose=cfg.sch_verbose)
if cfg.pretrained_lm_dir:
lm_loading_res = language_model.load_state_dict(torch.load(cfg.pretrained_lm_dir))
cfg.logger.debug('Loading language model: {}'.format(lm_loading_res))
train_losses, train_accs = [], [] # losses & accuracies to save
if cfg.test_ratio > 0:
test_losses, test_accs = [], []
cfg.logger.info('Training.')
for epoch in range(1, cfg.num_epochs + 1):
train_losses_, train_accs_ = train_dis_epoch(epoch, language_model, train_loader, criterion, optimizer)
train_losses += train_losses_
train_accs += train_accs_
| # Split data into training and testing sets
num_train = int(len(data) * (1 - cfg.test_ratio)) | random_line_split |
train.py | parser
def program_config(parser):
# ------ Add new params here ------>
parser.add_argument('--max_seq_len', default=cfg.max_seq_len, type=int)
parser.add_argument('--test_ratio', default=cfg.test_ratio, type=float)
parser.add_argument('--hidden_dim', default=cfg.hidden_dim, type=int)
parser.add_argument('--batch_size', default=cfg.batch_size, type=int)
parser.add_argument('--num_epochs', default=cfg.num_epochs, type=int)
parser.add_argument('--check_interval', default=cfg.check_interval, type=int)
parser.add_argument('--lr', default=cfg.lr, type=float)
parser.add_argument('--sch_factor', default=cfg.sch_factor, type=float)
parser.add_argument('--sch_patience', default=cfg.sch_patience, type=int)
parser.add_argument('--sch_verbose', default=cfg.sch_verbose, type=bool)
parser.add_argument('--device', default=cfg.device, type=str)
parser.add_argument('--emb_model_dir', default=cfg.emb_model_dir, type=str)
parser.add_argument('--lyrics_dir', default=cfg.lyrics_dir, type=str)
parser.add_argument('--pretrained_lm_dir', default=cfg.pretrained_lm_dir, type=str)
parser.add_argument('--save_lm_dir', default=cfg.save_lm_dir, type=str)
parser.add_argument('--save_tr_l_dir', default=cfg.save_tr_l_dir, type=str)
parser.add_argument('--save_tr_a_dir', default=cfg.save_tr_a_dir, type=str)
parser.add_argument('--save_tst_l_dir', default=cfg.save_tst_l_dir, type=str)
parser.add_argument('--save_tst_a_dir', default=cfg.save_tst_a_dir, type=str)
parser.add_argument('--save_log_dir', default=cfg.save_log_dir, type=str)
return parser
# Define training method
def train_dis_epoch(epoch, model, train_loader, criterion, optimizer):
train_losses, train_accs = [], []
total_loss, total_acc = 0, 0
model.train()
for i, (feature, target) in enumerate(train_loader):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_acc += 100 * torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
if (i + 1) % cfg.check_interval == 0:
train_losses.append(total_loss / (i + 1))
train_accs.append(total_acc / (i + 1))
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
return train_losses, train_accs
# Define testing method
def test(model, test_loader, criterion):
|
# Main
if __name__ == '__main__':
# Hyper parameters and configs
parser = argparse.ArgumentParser()
parser = program_config(parser)
opt = parser.parse_args()
cfg.init_param(opt)
# Get word2vec dict with embedding model
cfg.logger.info('Loading embedding model.')
wv_dict = Word2VecDict(Word2Vec.load(cfg.emb_model_dir))
# Load lyrics data, then delete any lyric whose length's greater than max_seq_len
cfg.logger.info('Loading lyrics data.')
with open(cfg.lyrics_dir, 'r') as f:
lyrics_dict = f.read()
lyrics_dict = json.loads(lyrics_dict)
data = []
for key, val in tqdm(lyrics_dict.items()): # val is a batch
cur_seq_len = len(val)
if cur_seq_len <= cfg.max_seq_len:
data.append(val)
# Uncomment this part to train the partial dataset
# data = data[:100]
# Split data into training and testing sets
num_train = int(len(data) * (1 - cfg.test_ratio))
data_train = data[:num_train]
data_test = data[num_train:]
# Torch dataset and dataloader
train_dataset = LanguageDataset(data_train, wv_dict, padding, cfg.max_seq_len)
train_loader = DataLoader(dataset=train_dataset, batch_size=cfg.batch_size, shuffle=False)
if cfg.test_ratio > 0:
test_dataset = LanguageDataset(data_test, wv_dict, padding, cfg.max_seq_len)
test_loader = DataLoader(dataset=test_dataset, batch_size=cfg.batch_size, shuffle=False)
vocab_size = len(wv_dict.emb_model.wv.vocab) + 1
# Uncomment this part to check the validity of the dataloader
# for minibatch in train_loader:
# features, targets = minibatch
# print(features.size(), targets.size())
# for i, (f, t) in enumerate(zip(features, targets)): # minibatch (one lyric)
# for (wv_f, idx_t) in zip(f, t): # word vector of feature, index of target
# print(wv_dict.index2word(wv_dict.vector2index(wv_f.numpy())), wv_dict.index2word(int(idx_t.item())))
# Print basic info
cfg.logger.debug('Number of lyrics (Valid / Total): {} / {}'.format(len(data), len(lyrics_dict)))
cfg.logger.debug('Training / testing size: {} / {}'.format(len(data_train), len(data_test)))
cfg.logger.debug('Testing set ratio: {}'.format(cfg.test_ratio))
cfg.logger.debug('Total vocabulary size including paddings: {}'.format(vocab_size))
cfg.logger.debug('Max sequence length: {}'.format(cfg.max_seq_len))
cfg.logger.debug('Hidden dimension: {}'.format(cfg.hidden_dim))
cfg.logger.debug('Batch size: {}'.format(cfg.batch_size))
cfg.logger.debug('Total epochs: {}'.format(cfg.num_epochs))
cfg.logger.debug('Intervals to check: {}'.format(cfg.check_interval))
cfg.logger.debug('Learning rate: {}'.format(cfg.lr))
cfg.logger.debug('Schedular factor: {}'.format(cfg.sch_factor))
cfg.logger.debug('Schedular patience: {}'.format(cfg.sch_patience))
cfg.logger.debug('Schedular verbose: {}'.format(cfg.sch_verbose))
cfg.logger.debug('Device: {}'.format(cfg.device))
cfg.logger.debug('Embedding model directory: {}'.format(cfg.emb_model_dir))
cfg.logger.debug('Lyrics data directory: {}'.format(cfg.lyrics_dir))
if cfg.pretrained_lm_dir:
cfg.logger.debug('Pre-trained language model: {}'.format(cfg.pretrained_lm_dir))
else:
cfg.logger.debug('Pre-trained language model: initial training')
# Training
language_model = LanguageModel(wv_dict, cfg.hidden_dim).to(cfg.device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(language_model.parameters(), lr=cfg.lr)
schedular = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=cfg.sch_factor, patience=cfg.sch_patience, verbose=cfg.sch_verbose)
if cfg.pretrained_lm_dir:
lm_loading_res = language_model.load_state_dict(torch.load(cfg.pretrained_lm_dir))
cfg.logger.debug('Loading language model: {}'.format(lm_loading_res))
train_losses, train_accs = [], [] # losses & accuracies to save
if cfg.test_ratio > 0:
test_losses, test_accs = [], []
cfg.logger.info('Training.')
for epoch in range(1, cfg.num_epochs + 1):
train_losses_, train_accs_ = train_dis_epoch(epoch, language_model, train_loader, criterion, optimizer)
train_losses += train_losses_
train_accs += train_accs_
| total_loss, total_acc = 0, 0
model.eval()
with torch.no_grad():
for feature, target in tqdm(test_loader, desc='Test'):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
total_loss += loss.item()
total_acc += 100 * \
torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
return total_loss / len(test_loader), total_acc / len(test_loader) | identifier_body |
train.py | parser
def program_config(parser):
# ------ Add new params here ------>
parser.add_argument('--max_seq_len', default=cfg.max_seq_len, type=int)
parser.add_argument('--test_ratio', default=cfg.test_ratio, type=float)
parser.add_argument('--hidden_dim', default=cfg.hidden_dim, type=int)
parser.add_argument('--batch_size', default=cfg.batch_size, type=int)
parser.add_argument('--num_epochs', default=cfg.num_epochs, type=int)
parser.add_argument('--check_interval', default=cfg.check_interval, type=int)
parser.add_argument('--lr', default=cfg.lr, type=float)
parser.add_argument('--sch_factor', default=cfg.sch_factor, type=float)
parser.add_argument('--sch_patience', default=cfg.sch_patience, type=int)
parser.add_argument('--sch_verbose', default=cfg.sch_verbose, type=bool)
parser.add_argument('--device', default=cfg.device, type=str)
parser.add_argument('--emb_model_dir', default=cfg.emb_model_dir, type=str)
parser.add_argument('--lyrics_dir', default=cfg.lyrics_dir, type=str)
parser.add_argument('--pretrained_lm_dir', default=cfg.pretrained_lm_dir, type=str)
parser.add_argument('--save_lm_dir', default=cfg.save_lm_dir, type=str)
parser.add_argument('--save_tr_l_dir', default=cfg.save_tr_l_dir, type=str)
parser.add_argument('--save_tr_a_dir', default=cfg.save_tr_a_dir, type=str)
parser.add_argument('--save_tst_l_dir', default=cfg.save_tst_l_dir, type=str)
parser.add_argument('--save_tst_a_dir', default=cfg.save_tst_a_dir, type=str)
parser.add_argument('--save_log_dir', default=cfg.save_log_dir, type=str)
return parser
# Define training method
def train_dis_epoch(epoch, model, train_loader, criterion, optimizer):
train_losses, train_accs = [], []
total_loss, total_acc = 0, 0
model.train()
for i, (feature, target) in enumerate(train_loader):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_acc += 100 * torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
if (i + 1) % cfg.check_interval == 0:
train_losses.append(total_loss / (i + 1))
train_accs.append(total_acc / (i + 1))
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
return train_losses, train_accs
# Define testing method
def test(model, test_loader, criterion):
total_loss, total_acc = 0, 0
model.eval()
with torch.no_grad():
for feature, target in tqdm(test_loader, desc='Test'):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
total_loss += loss.item()
total_acc += 100 * \
torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
return total_loss / len(test_loader), total_acc / len(test_loader)
# Main
if __name__ == '__main__':
# Hyper parameters and configs
parser = argparse.ArgumentParser()
parser = program_config(parser)
opt = parser.parse_args()
cfg.init_param(opt)
# Get word2vec dict with embedding model
cfg.logger.info('Loading embedding model.')
wv_dict = Word2VecDict(Word2Vec.load(cfg.emb_model_dir))
# Load lyrics data, then delete any lyric whose length's greater than max_seq_len
cfg.logger.info('Loading lyrics data.')
with open(cfg.lyrics_dir, 'r') as f:
lyrics_dict = f.read()
lyrics_dict = json.loads(lyrics_dict)
data = []
for key, val in tqdm(lyrics_dict.items()): # val is a batch
cur_seq_len = len(val)
if cur_seq_len <= cfg.max_seq_len:
data.append(val)
# Uncomment this part to train the partial dataset
# data = data[:100]
# Split data into training and testing sets
num_train = int(len(data) * (1 - cfg.test_ratio))
data_train = data[:num_train]
data_test = data[num_train:]
# Torch dataset and dataloader
train_dataset = LanguageDataset(data_train, wv_dict, padding, cfg.max_seq_len)
train_loader = DataLoader(dataset=train_dataset, batch_size=cfg.batch_size, shuffle=False)
if cfg.test_ratio > 0:
test_dataset = LanguageDataset(data_test, wv_dict, padding, cfg.max_seq_len)
test_loader = DataLoader(dataset=test_dataset, batch_size=cfg.batch_size, shuffle=False)
vocab_size = len(wv_dict.emb_model.wv.vocab) + 1
# Uncomment this part to check the validity of the dataloader
# for minibatch in train_loader:
# features, targets = minibatch
# print(features.size(), targets.size())
# for i, (f, t) in enumerate(zip(features, targets)): # minibatch (one lyric)
# for (wv_f, idx_t) in zip(f, t): # word vector of feature, index of target
# print(wv_dict.index2word(wv_dict.vector2index(wv_f.numpy())), wv_dict.index2word(int(idx_t.item())))
# Print basic info
cfg.logger.debug('Number of lyrics (Valid / Total): {} / {}'.format(len(data), len(lyrics_dict)))
cfg.logger.debug('Training / testing size: {} / {}'.format(len(data_train), len(data_test)))
cfg.logger.debug('Testing set ratio: {}'.format(cfg.test_ratio))
cfg.logger.debug('Total vocabulary size including paddings: {}'.format(vocab_size))
cfg.logger.debug('Max sequence length: {}'.format(cfg.max_seq_len))
cfg.logger.debug('Hidden dimension: {}'.format(cfg.hidden_dim))
cfg.logger.debug('Batch size: {}'.format(cfg.batch_size))
cfg.logger.debug('Total epochs: {}'.format(cfg.num_epochs))
cfg.logger.debug('Intervals to check: {}'.format(cfg.check_interval))
cfg.logger.debug('Learning rate: {}'.format(cfg.lr))
cfg.logger.debug('Schedular factor: {}'.format(cfg.sch_factor))
cfg.logger.debug('Schedular patience: {}'.format(cfg.sch_patience))
cfg.logger.debug('Schedular verbose: {}'.format(cfg.sch_verbose))
cfg.logger.debug('Device: {}'.format(cfg.device))
cfg.logger.debug('Embedding model directory: {}'.format(cfg.emb_model_dir))
cfg.logger.debug('Lyrics data directory: {}'.format(cfg.lyrics_dir))
if cfg.pretrained_lm_dir:
cfg.logger.debug('Pre-trained language model: {}'.format(cfg.pretrained_lm_dir))
else:
cfg.logger.debug('Pre-trained language model: initial training')
# Training
language_model = LanguageModel(wv_dict, cfg.hidden_dim).to(cfg.device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(language_model.parameters(), lr=cfg.lr)
schedular = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=cfg.sch_factor, patience=cfg.sch_patience, verbose=cfg.sch_verbose)
if cfg.pretrained_lm_dir:
lm_loading_res = language_model.load_state_dict(torch.load(cfg.pretrained_lm_dir))
cfg.logger.debug('Loading language model: {}'.format(lm_loading_res))
train_losses, train_accs = [], [] # losses & accuracies to save
if cfg.test_ratio > 0:
|
cfg.logger.info('Training.')
for epoch in range(1, cfg.num_epochs + 1):
train_losses_, train_accs_ = train_dis_epoch(epoch, language_model, train_loader, criterion, optimizer)
train_losses += train_losses_
train_accs += train_accs_
| test_losses, test_accs = [], [] | conditional_block |
train.py | parser
def | (parser):
# ------ Add new params here ------>
parser.add_argument('--max_seq_len', default=cfg.max_seq_len, type=int)
parser.add_argument('--test_ratio', default=cfg.test_ratio, type=float)
parser.add_argument('--hidden_dim', default=cfg.hidden_dim, type=int)
parser.add_argument('--batch_size', default=cfg.batch_size, type=int)
parser.add_argument('--num_epochs', default=cfg.num_epochs, type=int)
parser.add_argument('--check_interval', default=cfg.check_interval, type=int)
parser.add_argument('--lr', default=cfg.lr, type=float)
parser.add_argument('--sch_factor', default=cfg.sch_factor, type=float)
parser.add_argument('--sch_patience', default=cfg.sch_patience, type=int)
parser.add_argument('--sch_verbose', default=cfg.sch_verbose, type=bool)
parser.add_argument('--device', default=cfg.device, type=str)
parser.add_argument('--emb_model_dir', default=cfg.emb_model_dir, type=str)
parser.add_argument('--lyrics_dir', default=cfg.lyrics_dir, type=str)
parser.add_argument('--pretrained_lm_dir', default=cfg.pretrained_lm_dir, type=str)
parser.add_argument('--save_lm_dir', default=cfg.save_lm_dir, type=str)
parser.add_argument('--save_tr_l_dir', default=cfg.save_tr_l_dir, type=str)
parser.add_argument('--save_tr_a_dir', default=cfg.save_tr_a_dir, type=str)
parser.add_argument('--save_tst_l_dir', default=cfg.save_tst_l_dir, type=str)
parser.add_argument('--save_tst_a_dir', default=cfg.save_tst_a_dir, type=str)
parser.add_argument('--save_log_dir', default=cfg.save_log_dir, type=str)
return parser
# Define training method
def train_dis_epoch(epoch, model, train_loader, criterion, optimizer):
train_losses, train_accs = [], []
total_loss, total_acc = 0, 0
model.train()
for i, (feature, target) in enumerate(train_loader):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_acc += 100 * torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
if (i + 1) % cfg.check_interval == 0:
train_losses.append(total_loss / (i + 1))
train_accs.append(total_acc / (i + 1))
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
cfg.logger.debug(
"[Epoch %d/%d] [Batch %d/%d] [Train Loss: %f] [Train Acc: %f]"
% (epoch, cfg.num_epochs, i + 1, len(train_loader), train_losses[-1], train_accs[-1])
)
return train_losses, train_accs
# Define testing method
def test(model, test_loader, criterion):
total_loss, total_acc = 0, 0
model.eval()
with torch.no_grad():
for feature, target in tqdm(test_loader, desc='Test'):
feature, target = feature.to(cfg.device), target.long().to(cfg.device)
hidden = init_hidden(feature.size(0), cfg.hidden_dim, cfg.device)
pred = model(feature, hidden)
pred = pred.view(-1, pred.size(2), pred.size(1))
# pred: batch_size * vocab_size * seq_len
# target: batch_size * seq_len
loss = criterion(pred, target)
total_loss += loss.item()
total_acc += 100 * \
torch.sum((pred.argmax(dim=1) == target)).item() / (target.size(0) * target.size(1))
return total_loss / len(test_loader), total_acc / len(test_loader)
# Main
if __name__ == '__main__':
# Hyper parameters and configs
parser = argparse.ArgumentParser()
parser = program_config(parser)
opt = parser.parse_args()
cfg.init_param(opt)
# Get word2vec dict with embedding model
cfg.logger.info('Loading embedding model.')
wv_dict = Word2VecDict(Word2Vec.load(cfg.emb_model_dir))
# Load lyrics data, then delete any lyric whose length's greater than max_seq_len
cfg.logger.info('Loading lyrics data.')
with open(cfg.lyrics_dir, 'r') as f:
lyrics_dict = f.read()
lyrics_dict = json.loads(lyrics_dict)
data = []
for key, val in tqdm(lyrics_dict.items()): # val is a batch
cur_seq_len = len(val)
if cur_seq_len <= cfg.max_seq_len:
data.append(val)
# Uncomment this part to train the partial dataset
# data = data[:100]
# Split data into training and testing sets
num_train = int(len(data) * (1 - cfg.test_ratio))
data_train = data[:num_train]
data_test = data[num_train:]
# Torch dataset and dataloader
train_dataset = LanguageDataset(data_train, wv_dict, padding, cfg.max_seq_len)
train_loader = DataLoader(dataset=train_dataset, batch_size=cfg.batch_size, shuffle=False)
if cfg.test_ratio > 0:
test_dataset = LanguageDataset(data_test, wv_dict, padding, cfg.max_seq_len)
test_loader = DataLoader(dataset=test_dataset, batch_size=cfg.batch_size, shuffle=False)
vocab_size = len(wv_dict.emb_model.wv.vocab) + 1
# Uncomment this part to check the validity of the dataloader
# for minibatch in train_loader:
# features, targets = minibatch
# print(features.size(), targets.size())
# for i, (f, t) in enumerate(zip(features, targets)): # minibatch (one lyric)
# for (wv_f, idx_t) in zip(f, t): # word vector of feature, index of target
# print(wv_dict.index2word(wv_dict.vector2index(wv_f.numpy())), wv_dict.index2word(int(idx_t.item())))
# Print basic info
cfg.logger.debug('Number of lyrics (Valid / Total): {} / {}'.format(len(data), len(lyrics_dict)))
cfg.logger.debug('Training / testing size: {} / {}'.format(len(data_train), len(data_test)))
cfg.logger.debug('Testing set ratio: {}'.format(cfg.test_ratio))
cfg.logger.debug('Total vocabulary size including paddings: {}'.format(vocab_size))
cfg.logger.debug('Max sequence length: {}'.format(cfg.max_seq_len))
cfg.logger.debug('Hidden dimension: {}'.format(cfg.hidden_dim))
cfg.logger.debug('Batch size: {}'.format(cfg.batch_size))
cfg.logger.debug('Total epochs: {}'.format(cfg.num_epochs))
cfg.logger.debug('Intervals to check: {}'.format(cfg.check_interval))
cfg.logger.debug('Learning rate: {}'.format(cfg.lr))
cfg.logger.debug('Schedular factor: {}'.format(cfg.sch_factor))
cfg.logger.debug('Schedular patience: {}'.format(cfg.sch_patience))
cfg.logger.debug('Schedular verbose: {}'.format(cfg.sch_verbose))
cfg.logger.debug('Device: {}'.format(cfg.device))
cfg.logger.debug('Embedding model directory: {}'.format(cfg.emb_model_dir))
cfg.logger.debug('Lyrics data directory: {}'.format(cfg.lyrics_dir))
if cfg.pretrained_lm_dir:
cfg.logger.debug('Pre-trained language model: {}'.format(cfg.pretrained_lm_dir))
else:
cfg.logger.debug('Pre-trained language model: initial training')
# Training
language_model = LanguageModel(wv_dict, cfg.hidden_dim).to(cfg.device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(language_model.parameters(), lr=cfg.lr)
schedular = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=cfg.sch_factor, patience=cfg.sch_patience, verbose=cfg.sch_verbose)
if cfg.pretrained_lm_dir:
lm_loading_res = language_model.load_state_dict(torch.load(cfg.pretrained_lm_dir))
cfg.logger.debug('Loading language model: {}'.format(lm_loading_res))
train_losses, train_accs = [], [] # losses & accuracies to save
if cfg.test_ratio > 0:
test_losses, test_accs = [], []
cfg.logger.info('Training.')
for epoch in range(1, cfg.num_epochs + 1):
train_losses_, train_accs_ = train_dis_epoch(epoch, language_model, train_loader, criterion, optimizer)
train_losses += train_losses_
train_accs += train_accs_
| program_config | identifier_name |
xdp.go | descriptor to the same UMEM area in the RX queue, signifying
// that userspace may read the packet.
// - Trasmit: Userspace adds a descriptor to TX queue. The kernel
// sends the packet (stored in UMEM) pointed to by the descriptor.
// Upon completion, the kernel places a desciptor in the completion
// queue to notify userspace that the packet is sent and the UMEM
// area can be reused.
//
// So in short: RX packets move from the fill to RX queue, and TX
// packets move from the TX to completion queue.
//
// Note that the shared UMEM for RX and TX means that packet forwarding
// can be done without copying; only the queues need to be updated to point to
// the packet in UMEM.
package xdp
import (
"fmt"
"math/bits"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cleanup"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/memutil"
)
// A ControlBlock contains all the control structures necessary to use an
// AF_XDP socket.
//
// The ControlBlock and the structures it contains are meant to be used with a
// single RX goroutine and a single TX goroutine.
type ControlBlock struct {
UMEM UMEM
Fill FillQueue
RX RXQueue
TX TXQueue
Completion CompletionQueue
}
// ReadOnlySocketOpts configure a read-only AF_XDP socket.
type ReadOnlySocketOpts struct {
NFrames uint32
FrameSize uint32
NDescriptors uint32
}
// DefaultReadOnlyOpts provides recommended default options for initializing a
// readonly AF_XDP socket. AF_XDP setup is extremely finnicky and can fail if
// incorrect values are used.
func DefaultReadOnlyOpts() ReadOnlySocketOpts {
return ReadOnlySocketOpts{
NFrames: 4096,
// Frames must be 2048 or 4096 bytes, although not all drivers support
// both.
FrameSize: 4096,
NDescriptors: 2048,
}
}
// ReadOnlySocket returns an initialized read-only AF_XDP socket bound to a
// particular interface and queue.
func ReadOnlySocket(ifaceIdx, queueID uint32, opts ReadOnlySocketOpts) (*ControlBlock, error) {
sockfd, err := unix.Socket(unix.AF_XDP, unix.SOCK_RAW, 0)
if err != nil {
return nil, fmt.Errorf("failed to create AF_XDP socket: %v", err)
}
return ReadOnlyFromSocket(sockfd, ifaceIdx, queueID, opts)
}
// ReadOnlyFromSocket takes an AF_XDP socket, initializes it, and binds it to a
// particular interface and queue.
func ReadOnlyFromSocket(sockfd int, ifaceIdx, queueID uint32, opts ReadOnlySocketOpts) (*ControlBlock, error) | 0,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap umem: %v", err)
}
cleanup := cleanup.Make(func() {
memutil.UnmapSlice(umemMemory)
})
if sliceBackingPointer(umemMemory)%uintptr(unix.Getpagesize()) != 0 {
return nil, fmt.Errorf("UMEM is not page aligned (address 0x%x)", sliceBackingPointer(umemMemory))
}
cb.UMEM = UMEM{
mem: umemMemory,
sockfd: uint32(sockfd),
frameAddresses: make([]uint64, opts.NFrames),
nFreeFrames: opts.NFrames,
frameMask: ^(uint64(opts.FrameSize) - 1),
}
// Fill in each frame address.
for i := range cb.UMEM.frameAddresses {
cb.UMEM.frameAddresses[i] = uint64(i) * uint64(opts.FrameSize)
}
// Check whether we're likely to fail due to RLIMIT_MEMLOCK.
var rlimit unix.Rlimit
if err := unix.Getrlimit(unix.RLIMIT_MEMLOCK, &rlimit); err != nil {
return nil, fmt.Errorf("failed to get rlimit for memlock: %v", err)
}
if rlimit.Cur < uint64(len(cb.UMEM.mem)) {
log.Infof("UMEM size (%d) may exceed RLIMIT_MEMLOCK (%+v) and cause registration to fail", len(cb.UMEM.mem), rlimit)
}
reg := unix.XDPUmemReg{
Addr: uint64(sliceBackingPointer(umemMemory)),
Len: uint64(len(umemMemory)),
Size: opts.FrameSize,
// Not useful in the RX path.
Headroom: 0,
// TODO(b/240191988): Investigate use of SHARED flag.
Flags: 0,
}
if err := registerUMEM(sockfd, reg); err != nil {
return nil, fmt.Errorf("failed to register UMEM: %v", err)
}
// Set the number of descriptors in the fill queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_UMEM_FILL_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register fill ring: %v", err)
}
// Set the number of descriptors in the completion queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_UMEM_COMPLETION_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register completion ring: %v", err)
}
// Set the number of descriptors in the RX queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_RX_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register RX queue: %v", err)
}
// Set the number of descriptors in the TX queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_TX_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register TX queue: %v", err)
}
// Get offset information for the queues. Offsets indicate where, once
// we mmap space for each queue, values in the queue are. They give
// offsets for the shared pointers, a shared flags value, and the
// beginning of the ring of descriptors.
off, err := getOffsets(sockfd)
if err != nil {
return nil, fmt.Errorf("failed to get offsets: %v", err)
}
// Allocate space for the fill queue.
fillQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Fr.Desc+uint64(opts.NDescriptors)*sizeOfFillQueueDesc()),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd),
unix.XDP_UMEM_PGOFF_FILL_RING,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap fill queue: %v", err)
}
cleanup.Add(func() {
memutil.UnmapSlice(fillQueueMem)
})
// Setup the fillQueue with offsets into allocated memory.
cb.Fill = FillQueue{
mem: fillQueueMem,
mask: opts.NDescriptors - 1,
cachedConsumer: opts.NDescriptors,
}
cb.Fill.init(off, opts)
// Allocate space for the completion queue.
completionQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Cr.Desc+uint64(opts.NDescriptors)*sizeOfCompletionQueueDesc()),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd),
unix.XDP_UMEM_PGOFF_COMPLETION_RING,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap completion queue: %v", err)
}
cleanup.Add(func() {
memutil.UnmapSlice(completionQueueMem)
})
// Setup the completionQueue with offsets into allocated memory.
cb.Completion = CompletionQueue{
mem: completionQueueMem,
mask: opts.NDescriptors - 1,
}
cb.Completion.init(off, opts)
// Allocate space for | {
if opts.FrameSize != 2048 && opts.FrameSize != 4096 {
return nil, fmt.Errorf("invalid frame size %d: must be either 2048 or 4096", opts.FrameSize)
}
if bits.OnesCount32(opts.NDescriptors) != 1 {
return nil, fmt.Errorf("invalid number of descriptors %d: must be a power of 2", opts.NDescriptors)
}
var cb ControlBlock
// Create the UMEM area. Use mmap instead of make([[]byte) to ensure
// that the UMEM is page-aligned. Aligning the UMEM keeps individual
// packets from spilling over between pages.
var zerofd uintptr
umemMemory, err := memutil.MapSlice(
0,
uintptr(opts.NFrames*opts.FrameSize),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_PRIVATE|unix.MAP_ANONYMOUS,
zerofd-1, | identifier_body |
xdp.go | descriptor to the same UMEM area in the RX queue, signifying
// that userspace may read the packet.
// - Trasmit: Userspace adds a descriptor to TX queue. The kernel
// sends the packet (stored in UMEM) pointed to by the descriptor.
// Upon completion, the kernel places a desciptor in the completion
// queue to notify userspace that the packet is sent and the UMEM
// area can be reused.
//
// So in short: RX packets move from the fill to RX queue, and TX
// packets move from the TX to completion queue.
//
// Note that the shared UMEM for RX and TX means that packet forwarding
// can be done without copying; only the queues need to be updated to point to
// the packet in UMEM.
package xdp
import (
"fmt"
"math/bits"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cleanup"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/memutil"
)
// A ControlBlock contains all the control structures necessary to use an
// AF_XDP socket.
//
// The ControlBlock and the structures it contains are meant to be used with a
// single RX goroutine and a single TX goroutine.
type ControlBlock struct {
UMEM UMEM
Fill FillQueue
RX RXQueue
TX TXQueue
Completion CompletionQueue
}
// ReadOnlySocketOpts configure a read-only AF_XDP socket.
type ReadOnlySocketOpts struct {
NFrames uint32
FrameSize uint32
NDescriptors uint32
}
// DefaultReadOnlyOpts provides recommended default options for initializing a
// readonly AF_XDP socket. AF_XDP setup is extremely finnicky and can fail if
// incorrect values are used.
func DefaultReadOnlyOpts() ReadOnlySocketOpts {
return ReadOnlySocketOpts{
NFrames: 4096,
// Frames must be 2048 or 4096 bytes, although not all drivers support
// both.
FrameSize: 4096,
NDescriptors: 2048,
}
}
// ReadOnlySocket returns an initialized read-only AF_XDP socket bound to a
// particular interface and queue.
func ReadOnlySocket(ifaceIdx, queueID uint32, opts ReadOnlySocketOpts) (*ControlBlock, error) {
sockfd, err := unix.Socket(unix.AF_XDP, unix.SOCK_RAW, 0)
if err != nil {
return nil, fmt.Errorf("failed to create AF_XDP socket: %v", err)
}
return ReadOnlyFromSocket(sockfd, ifaceIdx, queueID, opts)
}
// ReadOnlyFromSocket takes an AF_XDP socket, initializes it, and binds it to a
// particular interface and queue.
func ReadOnlyFromSocket(sockfd int, ifaceIdx, queueID uint32, opts ReadOnlySocketOpts) (*ControlBlock, error) {
if opts.FrameSize != 2048 && opts.FrameSize != 4096 |
if bits.OnesCount32(opts.NDescriptors) != 1 {
return nil, fmt.Errorf("invalid number of descriptors %d: must be a power of 2", opts.NDescriptors)
}
var cb ControlBlock
// Create the UMEM area. Use mmap instead of make([[]byte) to ensure
// that the UMEM is page-aligned. Aligning the UMEM keeps individual
// packets from spilling over between pages.
var zerofd uintptr
umemMemory, err := memutil.MapSlice(
0,
uintptr(opts.NFrames*opts.FrameSize),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_PRIVATE|unix.MAP_ANONYMOUS,
zerofd-1,
0,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap umem: %v", err)
}
cleanup := cleanup.Make(func() {
memutil.UnmapSlice(umemMemory)
})
if sliceBackingPointer(umemMemory)%uintptr(unix.Getpagesize()) != 0 {
return nil, fmt.Errorf("UMEM is not page aligned (address 0x%x)", sliceBackingPointer(umemMemory))
}
cb.UMEM = UMEM{
mem: umemMemory,
sockfd: uint32(sockfd),
frameAddresses: make([]uint64, opts.NFrames),
nFreeFrames: opts.NFrames,
frameMask: ^(uint64(opts.FrameSize) - 1),
}
// Fill in each frame address.
for i := range cb.UMEM.frameAddresses {
cb.UMEM.frameAddresses[i] = uint64(i) * uint64(opts.FrameSize)
}
// Check whether we're likely to fail due to RLIMIT_MEMLOCK.
var rlimit unix.Rlimit
if err := unix.Getrlimit(unix.RLIMIT_MEMLOCK, &rlimit); err != nil {
return nil, fmt.Errorf("failed to get rlimit for memlock: %v", err)
}
if rlimit.Cur < uint64(len(cb.UMEM.mem)) {
log.Infof("UMEM size (%d) may exceed RLIMIT_MEMLOCK (%+v) and cause registration to fail", len(cb.UMEM.mem), rlimit)
}
reg := unix.XDPUmemReg{
Addr: uint64(sliceBackingPointer(umemMemory)),
Len: uint64(len(umemMemory)),
Size: opts.FrameSize,
// Not useful in the RX path.
Headroom: 0,
// TODO(b/240191988): Investigate use of SHARED flag.
Flags: 0,
}
if err := registerUMEM(sockfd, reg); err != nil {
return nil, fmt.Errorf("failed to register UMEM: %v", err)
}
// Set the number of descriptors in the fill queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_UMEM_FILL_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register fill ring: %v", err)
}
// Set the number of descriptors in the completion queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_UMEM_COMPLETION_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register completion ring: %v", err)
}
// Set the number of descriptors in the RX queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_RX_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register RX queue: %v", err)
}
// Set the number of descriptors in the TX queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_TX_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register TX queue: %v", err)
}
// Get offset information for the queues. Offsets indicate where, once
// we mmap space for each queue, values in the queue are. They give
// offsets for the shared pointers, a shared flags value, and the
// beginning of the ring of descriptors.
off, err := getOffsets(sockfd)
if err != nil {
return nil, fmt.Errorf("failed to get offsets: %v", err)
}
// Allocate space for the fill queue.
fillQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Fr.Desc+uint64(opts.NDescriptors)*sizeOfFillQueueDesc()),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd),
unix.XDP_UMEM_PGOFF_FILL_RING,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap fill queue: %v", err)
}
cleanup.Add(func() {
memutil.UnmapSlice(fillQueueMem)
})
// Setup the fillQueue with offsets into allocated memory.
cb.Fill = FillQueue{
mem: fillQueueMem,
mask: opts.NDescriptors - 1,
cachedConsumer: opts.NDescriptors,
}
cb.Fill.init(off, opts)
// Allocate space for the completion queue.
completionQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Cr.Desc+uint64(opts.NDescriptors)*sizeOfCompletionQueueDesc()),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd),
unix.XDP_UMEM_PGOFF_COMPLETION_RING,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap completion queue: %v", err)
}
cleanup.Add(func() {
memutil.UnmapSlice(completionQueueMem)
})
// Setup the completionQueue with offsets into allocated memory.
cb.Completion = CompletionQueue{
mem: completionQueueMem,
mask: opts.NDescriptors - 1,
}
cb.Completion.init(off, opts)
// Allocate space | {
return nil, fmt.Errorf("invalid frame size %d: must be either 2048 or 4096", opts.FrameSize)
} | conditional_block |
xdp.go | //
// The ControlBlock and the structures it contains are meant to be used with a
// single RX goroutine and a single TX goroutine.
type ControlBlock struct {
UMEM UMEM
Fill FillQueue
RX RXQueue
TX TXQueue
Completion CompletionQueue
}
// ReadOnlySocketOpts configure a read-only AF_XDP socket.
type ReadOnlySocketOpts struct {
NFrames uint32
FrameSize uint32
NDescriptors uint32
}
// DefaultReadOnlyOpts provides recommended default options for initializing a
// readonly AF_XDP socket. AF_XDP setup is extremely finnicky and can fail if
// incorrect values are used.
func DefaultReadOnlyOpts() ReadOnlySocketOpts {
return ReadOnlySocketOpts{
NFrames: 4096,
// Frames must be 2048 or 4096 bytes, although not all drivers support
// both.
FrameSize: 4096,
NDescriptors: 2048,
}
}
// ReadOnlySocket returns an initialized read-only AF_XDP socket bound to a
// particular interface and queue.
func ReadOnlySocket(ifaceIdx, queueID uint32, opts ReadOnlySocketOpts) (*ControlBlock, error) {
sockfd, err := unix.Socket(unix.AF_XDP, unix.SOCK_RAW, 0)
if err != nil {
return nil, fmt.Errorf("failed to create AF_XDP socket: %v", err)
}
return ReadOnlyFromSocket(sockfd, ifaceIdx, queueID, opts)
}
// ReadOnlyFromSocket takes an AF_XDP socket, initializes it, and binds it to a
// particular interface and queue.
func ReadOnlyFromSocket(sockfd int, ifaceIdx, queueID uint32, opts ReadOnlySocketOpts) (*ControlBlock, error) {
if opts.FrameSize != 2048 && opts.FrameSize != 4096 {
return nil, fmt.Errorf("invalid frame size %d: must be either 2048 or 4096", opts.FrameSize)
}
if bits.OnesCount32(opts.NDescriptors) != 1 {
return nil, fmt.Errorf("invalid number of descriptors %d: must be a power of 2", opts.NDescriptors)
}
var cb ControlBlock
// Create the UMEM area. Use mmap instead of make([[]byte) to ensure
// that the UMEM is page-aligned. Aligning the UMEM keeps individual
// packets from spilling over between pages.
var zerofd uintptr
umemMemory, err := memutil.MapSlice(
0,
uintptr(opts.NFrames*opts.FrameSize),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_PRIVATE|unix.MAP_ANONYMOUS,
zerofd-1,
0,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap umem: %v", err)
}
cleanup := cleanup.Make(func() {
memutil.UnmapSlice(umemMemory)
})
if sliceBackingPointer(umemMemory)%uintptr(unix.Getpagesize()) != 0 {
return nil, fmt.Errorf("UMEM is not page aligned (address 0x%x)", sliceBackingPointer(umemMemory))
}
cb.UMEM = UMEM{
mem: umemMemory,
sockfd: uint32(sockfd),
frameAddresses: make([]uint64, opts.NFrames),
nFreeFrames: opts.NFrames,
frameMask: ^(uint64(opts.FrameSize) - 1),
}
// Fill in each frame address.
for i := range cb.UMEM.frameAddresses {
cb.UMEM.frameAddresses[i] = uint64(i) * uint64(opts.FrameSize)
}
// Check whether we're likely to fail due to RLIMIT_MEMLOCK.
var rlimit unix.Rlimit
if err := unix.Getrlimit(unix.RLIMIT_MEMLOCK, &rlimit); err != nil {
return nil, fmt.Errorf("failed to get rlimit for memlock: %v", err)
}
if rlimit.Cur < uint64(len(cb.UMEM.mem)) {
log.Infof("UMEM size (%d) may exceed RLIMIT_MEMLOCK (%+v) and cause registration to fail", len(cb.UMEM.mem), rlimit)
}
reg := unix.XDPUmemReg{
Addr: uint64(sliceBackingPointer(umemMemory)),
Len: uint64(len(umemMemory)),
Size: opts.FrameSize,
// Not useful in the RX path.
Headroom: 0,
// TODO(b/240191988): Investigate use of SHARED flag.
Flags: 0,
}
if err := registerUMEM(sockfd, reg); err != nil {
return nil, fmt.Errorf("failed to register UMEM: %v", err)
}
// Set the number of descriptors in the fill queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_UMEM_FILL_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register fill ring: %v", err)
}
// Set the number of descriptors in the completion queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_UMEM_COMPLETION_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register completion ring: %v", err)
}
// Set the number of descriptors in the RX queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_RX_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register RX queue: %v", err)
}
// Set the number of descriptors in the TX queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_TX_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register TX queue: %v", err)
}
// Get offset information for the queues. Offsets indicate where, once
// we mmap space for each queue, values in the queue are. They give
// offsets for the shared pointers, a shared flags value, and the
// beginning of the ring of descriptors.
off, err := getOffsets(sockfd)
if err != nil {
return nil, fmt.Errorf("failed to get offsets: %v", err)
}
// Allocate space for the fill queue.
fillQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Fr.Desc+uint64(opts.NDescriptors)*sizeOfFillQueueDesc()),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd),
unix.XDP_UMEM_PGOFF_FILL_RING,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap fill queue: %v", err)
}
cleanup.Add(func() {
memutil.UnmapSlice(fillQueueMem)
})
// Setup the fillQueue with offsets into allocated memory.
cb.Fill = FillQueue{
mem: fillQueueMem,
mask: opts.NDescriptors - 1,
cachedConsumer: opts.NDescriptors,
}
cb.Fill.init(off, opts)
// Allocate space for the completion queue.
completionQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Cr.Desc+uint64(opts.NDescriptors)*sizeOfCompletionQueueDesc()),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd),
unix.XDP_UMEM_PGOFF_COMPLETION_RING,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap completion queue: %v", err)
}
cleanup.Add(func() {
memutil.UnmapSlice(completionQueueMem)
})
// Setup the completionQueue with offsets into allocated memory.
cb.Completion = CompletionQueue{
mem: completionQueueMem,
mask: opts.NDescriptors - 1,
}
cb.Completion.init(off, opts)
// Allocate space for the RX queue.
rxQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Rx.Desc+uint64(opts.NDescriptors)*sizeOfRXQueueDesc()),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd),
unix.XDP_PGOFF_RX_RING,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap RX queue: %v", err)
}
cleanup.Add(func() {
memutil.UnmapSlice(rxQueueMem)
})
// Setup the rxQueue with offsets into allocated memory.
cb.RX = RXQueue{
mem: rxQueueMem,
mask: opts.NDescriptors - 1,
}
cb.RX.init(off, opts)
// Allocate space for the TX queue.
txQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Tx.Desc+uint64(opts.NDescriptors)*sizeOfTXQueueDesc()), | unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd), | random_line_split | |
xdp.go | descriptor to the same UMEM area in the RX queue, signifying
// that userspace may read the packet.
// - Trasmit: Userspace adds a descriptor to TX queue. The kernel
// sends the packet (stored in UMEM) pointed to by the descriptor.
// Upon completion, the kernel places a desciptor in the completion
// queue to notify userspace that the packet is sent and the UMEM
// area can be reused.
//
// So in short: RX packets move from the fill to RX queue, and TX
// packets move from the TX to completion queue.
//
// Note that the shared UMEM for RX and TX means that packet forwarding
// can be done without copying; only the queues need to be updated to point to
// the packet in UMEM.
package xdp
import (
"fmt"
"math/bits"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cleanup"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/memutil"
)
// A ControlBlock contains all the control structures necessary to use an
// AF_XDP socket.
//
// The ControlBlock and the structures it contains are meant to be used with a
// single RX goroutine and a single TX goroutine.
type ControlBlock struct {
UMEM UMEM
Fill FillQueue
RX RXQueue
TX TXQueue
Completion CompletionQueue
}
// ReadOnlySocketOpts configure a read-only AF_XDP socket.
type ReadOnlySocketOpts struct {
NFrames uint32
FrameSize uint32
NDescriptors uint32
}
// DefaultReadOnlyOpts provides recommended default options for initializing a
// readonly AF_XDP socket. AF_XDP setup is extremely finnicky and can fail if
// incorrect values are used.
func DefaultReadOnlyOpts() ReadOnlySocketOpts {
return ReadOnlySocketOpts{
NFrames: 4096,
// Frames must be 2048 or 4096 bytes, although not all drivers support
// both.
FrameSize: 4096,
NDescriptors: 2048,
}
}
// ReadOnlySocket returns an initialized read-only AF_XDP socket bound to a
// particular interface and queue.
func | (ifaceIdx, queueID uint32, opts ReadOnlySocketOpts) (*ControlBlock, error) {
sockfd, err := unix.Socket(unix.AF_XDP, unix.SOCK_RAW, 0)
if err != nil {
return nil, fmt.Errorf("failed to create AF_XDP socket: %v", err)
}
return ReadOnlyFromSocket(sockfd, ifaceIdx, queueID, opts)
}
// ReadOnlyFromSocket takes an AF_XDP socket, initializes it, and binds it to a
// particular interface and queue.
func ReadOnlyFromSocket(sockfd int, ifaceIdx, queueID uint32, opts ReadOnlySocketOpts) (*ControlBlock, error) {
if opts.FrameSize != 2048 && opts.FrameSize != 4096 {
return nil, fmt.Errorf("invalid frame size %d: must be either 2048 or 4096", opts.FrameSize)
}
if bits.OnesCount32(opts.NDescriptors) != 1 {
return nil, fmt.Errorf("invalid number of descriptors %d: must be a power of 2", opts.NDescriptors)
}
var cb ControlBlock
// Create the UMEM area. Use mmap instead of make([[]byte) to ensure
// that the UMEM is page-aligned. Aligning the UMEM keeps individual
// packets from spilling over between pages.
var zerofd uintptr
umemMemory, err := memutil.MapSlice(
0,
uintptr(opts.NFrames*opts.FrameSize),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_PRIVATE|unix.MAP_ANONYMOUS,
zerofd-1,
0,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap umem: %v", err)
}
cleanup := cleanup.Make(func() {
memutil.UnmapSlice(umemMemory)
})
if sliceBackingPointer(umemMemory)%uintptr(unix.Getpagesize()) != 0 {
return nil, fmt.Errorf("UMEM is not page aligned (address 0x%x)", sliceBackingPointer(umemMemory))
}
cb.UMEM = UMEM{
mem: umemMemory,
sockfd: uint32(sockfd),
frameAddresses: make([]uint64, opts.NFrames),
nFreeFrames: opts.NFrames,
frameMask: ^(uint64(opts.FrameSize) - 1),
}
// Fill in each frame address.
for i := range cb.UMEM.frameAddresses {
cb.UMEM.frameAddresses[i] = uint64(i) * uint64(opts.FrameSize)
}
// Check whether we're likely to fail due to RLIMIT_MEMLOCK.
var rlimit unix.Rlimit
if err := unix.Getrlimit(unix.RLIMIT_MEMLOCK, &rlimit); err != nil {
return nil, fmt.Errorf("failed to get rlimit for memlock: %v", err)
}
if rlimit.Cur < uint64(len(cb.UMEM.mem)) {
log.Infof("UMEM size (%d) may exceed RLIMIT_MEMLOCK (%+v) and cause registration to fail", len(cb.UMEM.mem), rlimit)
}
reg := unix.XDPUmemReg{
Addr: uint64(sliceBackingPointer(umemMemory)),
Len: uint64(len(umemMemory)),
Size: opts.FrameSize,
// Not useful in the RX path.
Headroom: 0,
// TODO(b/240191988): Investigate use of SHARED flag.
Flags: 0,
}
if err := registerUMEM(sockfd, reg); err != nil {
return nil, fmt.Errorf("failed to register UMEM: %v", err)
}
// Set the number of descriptors in the fill queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_UMEM_FILL_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register fill ring: %v", err)
}
// Set the number of descriptors in the completion queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_UMEM_COMPLETION_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register completion ring: %v", err)
}
// Set the number of descriptors in the RX queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_RX_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register RX queue: %v", err)
}
// Set the number of descriptors in the TX queue.
if err := unix.SetsockoptInt(sockfd, unix.SOL_XDP, unix.XDP_TX_RING, int(opts.NDescriptors)); err != nil {
return nil, fmt.Errorf("failed to register TX queue: %v", err)
}
// Get offset information for the queues. Offsets indicate where, once
// we mmap space for each queue, values in the queue are. They give
// offsets for the shared pointers, a shared flags value, and the
// beginning of the ring of descriptors.
off, err := getOffsets(sockfd)
if err != nil {
return nil, fmt.Errorf("failed to get offsets: %v", err)
}
// Allocate space for the fill queue.
fillQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Fr.Desc+uint64(opts.NDescriptors)*sizeOfFillQueueDesc()),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd),
unix.XDP_UMEM_PGOFF_FILL_RING,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap fill queue: %v", err)
}
cleanup.Add(func() {
memutil.UnmapSlice(fillQueueMem)
})
// Setup the fillQueue with offsets into allocated memory.
cb.Fill = FillQueue{
mem: fillQueueMem,
mask: opts.NDescriptors - 1,
cachedConsumer: opts.NDescriptors,
}
cb.Fill.init(off, opts)
// Allocate space for the completion queue.
completionQueueMem, err := memutil.MapSlice(
0,
uintptr(off.Cr.Desc+uint64(opts.NDescriptors)*sizeOfCompletionQueueDesc()),
unix.PROT_READ|unix.PROT_WRITE,
unix.MAP_SHARED|unix.MAP_POPULATE,
uintptr(sockfd),
unix.XDP_UMEM_PGOFF_COMPLETION_RING,
)
if err != nil {
return nil, fmt.Errorf("failed to mmap completion queue: %v", err)
}
cleanup.Add(func() {
memutil.UnmapSlice(completionQueueMem)
})
// Setup the completionQueue with offsets into allocated memory.
cb.Completion = CompletionQueue{
mem: completionQueueMem,
mask: opts.NDescriptors - 1,
}
cb.Completion.init(off, opts)
// Allocate space for | ReadOnlySocket | identifier_name |
feature.go | /v2alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
"github.com/DataDog/datadog-operator/controllers/datadogagent/component"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature"
"github.com/DataDog/datadog-operator/controllers/datadogagent/merger"
"github.com/DataDog/datadog-operator/controllers/datadogagent/object/volume"
cilium "github.com/DataDog/datadog-operator/pkg/cilium/v1"
)
func init() {
err := feature.Register(feature.APMIDType, buildAPMFeature)
if err != nil {
panic(err)
}
}
func buildAPMFeature(options *feature.Options) feature.Feature {
apmFeat := &apmFeature{}
return apmFeat
}
type apmFeature struct { | hostPortHostPort int32
useHostNetwork bool
udsEnabled bool
udsHostFilepath string
owner metav1.Object
forceEnableLocalService bool
localServiceName string
createKubernetesNetworkPolicy bool
createCiliumNetworkPolicy bool
createSCC bool
}
// ID returns the ID of the Feature
func (f *apmFeature) ID() feature.IDType {
return feature.APMIDType
}
// Configure is used to configure the feature from a v2alpha1.DatadogAgent instance.
func (f *apmFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) {
f.owner = dda
apm := dda.Spec.Features.APM
if apm != nil && apiutils.BoolValue(apm.Enabled) {
f.useHostNetwork = v2alpha1.IsHostNetworkEnabled(dda, v2alpha1.NodeAgentComponentName)
// hostPort defaults to 'false' in the defaulting code
f.hostPortEnabled = apiutils.BoolValue(apm.HostPortConfig.Enabled)
f.hostPortHostPort = *apm.HostPortConfig.Port
if f.hostPortEnabled {
if enabled, flavor := v2alpha1.IsNetworkPolicyEnabled(dda); enabled {
if flavor == v2alpha1.NetworkPolicyFlavorCilium {
f.createCiliumNetworkPolicy = true
} else {
f.createKubernetesNetworkPolicy = true
}
}
}
// UDS defaults to 'true' in the defaulting code
f.udsEnabled = apiutils.BoolValue(apm.UnixDomainSocketConfig.Enabled)
f.udsHostFilepath = *apm.UnixDomainSocketConfig.Path
if dda.Spec.Global.LocalService != nil {
f.forceEnableLocalService = apiutils.BoolValue(dda.Spec.Global.LocalService.ForceEnableLocalService)
}
f.localServiceName = v2alpha1.GetLocalAgentServiceName(dda)
f.createSCC = v2alpha1.ShouldCreateSCC(dda, v2alpha1.NodeAgentComponentName)
reqComp = feature.RequiredComponents{
Agent: feature.RequiredComponent{
IsRequired: apiutils.NewBoolPointer(true),
Containers: []apicommonv1.AgentContainerName{
apicommonv1.CoreAgentContainerName,
apicommonv1.TraceAgentContainerName,
},
},
}
}
return reqComp
}
// ConfigureV1 use to configure the feature from a v1alpha1.DatadogAgent instance.
func (f *apmFeature) ConfigureV1(dda *v1alpha1.DatadogAgent) (reqComp feature.RequiredComponents) {
f.owner = dda
apm := dda.Spec.Agent.Apm
if apiutils.BoolValue(apm.Enabled) {
f.hostPortEnabled = true
f.hostPortHostPort = *apm.HostPort
f.useHostNetwork = v1alpha1.IsHostNetworkEnabled(dda)
if apiutils.BoolValue(apm.UnixDomainSocket.Enabled) {
f.udsEnabled = true
if apm.UnixDomainSocket.HostFilepath != nil {
f.udsHostFilepath = *apm.UnixDomainSocket.HostFilepath
}
}
if dda.Spec.Agent.LocalService != nil {
f.forceEnableLocalService = apiutils.BoolValue(dda.Spec.Agent.LocalService.ForceLocalServiceEnable)
}
f.localServiceName = v1alpha1.GetLocalAgentServiceName(dda)
if enabled, flavor := v1alpha1.IsAgentNetworkPolicyEnabled(dda); enabled {
if flavor == v1alpha1.NetworkPolicyFlavorCilium {
f.createCiliumNetworkPolicy = true
} else {
f.createKubernetesNetworkPolicy = true
}
}
reqComp = feature.RequiredComponents{
Agent: feature.RequiredComponent{
IsRequired: apiutils.NewBoolPointer(true),
Containers: []apicommonv1.AgentContainerName{
apicommonv1.CoreAgentContainerName,
apicommonv1.TraceAgentContainerName,
},
},
}
}
return reqComp
}
// ManageDependencies allows a feature to manage its dependencies.
// Feature's dependencies should be added in the store.
func (f *apmFeature) ManageDependencies(managers feature.ResourceManagers, components feature.RequiredComponents) error {
// agent local service
if component.ShouldCreateAgentLocalService(managers.Store().GetVersionInfo(), f.forceEnableLocalService) {
apmPort := &corev1.ServicePort{
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(int(apicommon.DefaultApmPort)),
Port: apicommon.DefaultApmPort,
Name: apicommon.DefaultApmPortName,
}
if f.hostPortEnabled {
apmPort.Port = f.hostPortHostPort
apmPort.Name = apicommon.APMHostPortName
if f.useHostNetwork {
apmPort.TargetPort = intstr.FromInt(int(f.hostPortHostPort))
}
}
serviceInternalTrafficPolicy := corev1.ServiceInternalTrafficPolicyLocal
if err := managers.ServiceManager().AddService(f.localServiceName, f.owner.GetNamespace(), nil, []corev1.ServicePort{*apmPort}, &serviceInternalTrafficPolicy); err != nil {
return err
}
}
// network policies
if f.hostPortEnabled {
policyName, podSelector := component.GetNetworkPolicyMetadata(f.owner, v2alpha1.NodeAgentComponentName)
if f.createKubernetesNetworkPolicy {
protocolTCP := corev1.ProtocolTCP
ingressRules := []netv1.NetworkPolicyIngressRule{
{
Ports: []netv1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{
Type: intstr.Int,
IntVal: f.hostPortHostPort,
},
Protocol: &protocolTCP,
},
},
},
}
return managers.NetworkPolicyManager().AddKubernetesNetworkPolicy(
policyName,
f.owner.GetNamespace(),
podSelector,
nil,
ingressRules,
nil,
)
} else if f.createCiliumNetworkPolicy {
policySpecs := []cilium.NetworkPolicySpec{
{
Description: "Ingress for APM trace",
EndpointSelector: podSelector,
Ingress: []cilium.IngressRule{
{
FromEndpoints: []metav1.LabelSelector{
{},
},
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: strconv.Itoa(int(f.hostPortHostPort)),
Protocol: cilium.ProtocolTCP,
},
},
},
},
},
},
},
}
return managers.CiliumPolicyManager().AddCiliumPolicy(policyName, f.owner.GetNamespace(), policySpecs)
}
}
// scc
if f.createSCC {
sccName := component.GetAgentSCCName(f.owner)
scc := securityv1.SecurityContextConstraints{}
if f.hostPortEnabled {
scc.AllowHostPorts = true
}
return managers.PodSecurityManager().AddSecurityContextConstraints(sccName, f.owner.GetNamespace(), &scc)
}
return nil
}
// ManageClusterAgent allows a feature to configure the ClusterAgent's corev1.PodTemplateSpec
// It should do nothing if the feature doesn't need to configure it.
func (f *apmFeature) ManageClusterAgent(managers feature.PodTemplateManagers) error {
return nil
}
// ManageNodeAgent allows a feature to configure the Node Agent's corev1.PodTemplateSpec
// It should do nothing if the feature doesn't need to configure it.
func (f *apmFeature) ManageNodeAgent(managers feature.PodTemplateManagers) error {
managers.EnvVar().AddEnvVarToContainer(apicommonv1.TraceAgentContainerName, &corev1.EnvVar{
Name: apicommon.DDAPMEnabled,
Value: "true",
})
// udp
apmPort := &corev1.ContainerPort{
Name: apicommon.DefaultApmPortName,
ContainerPort: apicommon.DefaultApmPort,
Protocol: corev1.ProtocolTCP | hostPortEnabled bool | random_line_split |
feature.go | /v2alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
"github.com/DataDog/datadog-operator/controllers/datadogagent/component"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature"
"github.com/DataDog/datadog-operator/controllers/datadogagent/merger"
"github.com/DataDog/datadog-operator/controllers/datadogagent/object/volume"
cilium "github.com/DataDog/datadog-operator/pkg/cilium/v1"
)
func init() {
err := feature.Register(feature.APMIDType, buildAPMFeature)
if err != nil {
panic(err)
}
}
func buildAPMFeature(options *feature.Options) feature.Feature {
apmFeat := &apmFeature{}
return apmFeat
}
type apmFeature struct {
hostPortEnabled bool
hostPortHostPort int32
useHostNetwork bool
udsEnabled bool
udsHostFilepath string
owner metav1.Object
forceEnableLocalService bool
localServiceName string
createKubernetesNetworkPolicy bool
createCiliumNetworkPolicy bool
createSCC bool
}
// ID returns the ID of the Feature
func (f *apmFeature) ID() feature.IDType {
return feature.APMIDType
}
// Configure is used to configure the feature from a v2alpha1.DatadogAgent instance.
func (f *apmFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) |
if dda.Spec.Global.LocalService != nil {
f.forceEnableLocalService = apiutils.BoolValue(dda.Spec.Global.LocalService.ForceEnableLocalService)
}
f.localServiceName = v2alpha1.GetLocalAgentServiceName(dda)
f.createSCC = v2alpha1.ShouldCreateSCC(dda, v2alpha1.NodeAgentComponentName)
reqComp = feature.RequiredComponents{
Agent: feature.RequiredComponent{
IsRequired: apiutils.NewBoolPointer(true),
Containers: []apicommonv1.AgentContainerName{
apicommonv1.CoreAgentContainerName,
apicommonv1.TraceAgentContainerName,
},
},
}
}
return reqComp
}
// ConfigureV1 use to configure the feature from a v1alpha1.DatadogAgent instance.
func (f *apmFeature) ConfigureV1(dda *v1alpha1.DatadogAgent) (reqComp feature.RequiredComponents) {
f.owner = dda
apm := dda.Spec.Agent.Apm
if apiutils.BoolValue(apm.Enabled) {
f.hostPortEnabled = true
f.hostPortHostPort = *apm.HostPort
f.useHostNetwork = v1alpha1.IsHostNetworkEnabled(dda)
if apiutils.BoolValue(apm.UnixDomainSocket.Enabled) {
f.udsEnabled = true
if apm.UnixDomainSocket.HostFilepath != nil {
f.udsHostFilepath = *apm.UnixDomainSocket.HostFilepath
}
}
if dda.Spec.Agent.LocalService != nil {
f.forceEnableLocalService = apiutils.BoolValue(dda.Spec.Agent.LocalService.ForceLocalServiceEnable)
}
f.localServiceName = v1alpha1.GetLocalAgentServiceName(dda)
if enabled, flavor := v1alpha1.IsAgentNetworkPolicyEnabled(dda); enabled {
if flavor == v1alpha1.NetworkPolicyFlavorCilium {
f.createCiliumNetworkPolicy = true
} else {
f.createKubernetesNetworkPolicy = true
}
}
reqComp = feature.RequiredComponents{
Agent: feature.RequiredComponent{
IsRequired: apiutils.NewBoolPointer(true),
Containers: []apicommonv1.AgentContainerName{
apicommonv1.CoreAgentContainerName,
apicommonv1.TraceAgentContainerName,
},
},
}
}
return reqComp
}
// ManageDependencies allows a feature to manage its dependencies.
// Feature's dependencies should be added in the store.
func (f *apmFeature) ManageDependencies(managers feature.ResourceManagers, components feature.RequiredComponents) error {
// agent local service
if component.ShouldCreateAgentLocalService(managers.Store().GetVersionInfo(), f.forceEnableLocalService) {
apmPort := &corev1.ServicePort{
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(int(apicommon.DefaultApmPort)),
Port: apicommon.DefaultApmPort,
Name: apicommon.DefaultApmPortName,
}
if f.hostPortEnabled {
apmPort.Port = f.hostPortHostPort
apmPort.Name = apicommon.APMHostPortName
if f.useHostNetwork {
apmPort.TargetPort = intstr.FromInt(int(f.hostPortHostPort))
}
}
serviceInternalTrafficPolicy := corev1.ServiceInternalTrafficPolicyLocal
if err := managers.ServiceManager().AddService(f.localServiceName, f.owner.GetNamespace(), nil, []corev1.ServicePort{*apmPort}, &serviceInternalTrafficPolicy); err != nil {
return err
}
}
// network policies
if f.hostPortEnabled {
policyName, podSelector := component.GetNetworkPolicyMetadata(f.owner, v2alpha1.NodeAgentComponentName)
if f.createKubernetesNetworkPolicy {
protocolTCP := corev1.ProtocolTCP
ingressRules := []netv1.NetworkPolicyIngressRule{
{
Ports: []netv1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{
Type: intstr.Int,
IntVal: f.hostPortHostPort,
},
Protocol: &protocolTCP,
},
},
},
}
return managers.NetworkPolicyManager().AddKubernetesNetworkPolicy(
policyName,
f.owner.GetNamespace(),
podSelector,
nil,
ingressRules,
nil,
)
} else if f.createCiliumNetworkPolicy {
policySpecs := []cilium.NetworkPolicySpec{
{
Description: "Ingress for APM trace",
EndpointSelector: podSelector,
Ingress: []cilium.IngressRule{
{
FromEndpoints: []metav1.LabelSelector{
{},
},
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: strconv.Itoa(int(f.hostPortHostPort)),
Protocol: cilium.ProtocolTCP,
},
},
},
},
},
},
},
}
return managers.CiliumPolicyManager().AddCiliumPolicy(policyName, f.owner.GetNamespace(), policySpecs)
}
}
// scc
if f.createSCC {
sccName := component.GetAgentSCCName(f.owner)
scc := securityv1.SecurityContextConstraints{}
if f.hostPortEnabled {
scc.AllowHostPorts = true
}
return managers.PodSecurityManager().AddSecurityContextConstraints(sccName, f.owner.GetNamespace(), &scc)
}
return nil
}
// ManageClusterAgent allows a feature to configure the ClusterAgent's corev1.PodTemplateSpec
// It should do nothing if the feature doesn't need to configure it.
func (f *apmFeature) ManageClusterAgent(managers feature.PodTemplateManagers) error {
return nil
}
// ManageNodeAgent allows a feature to configure the Node Agent's corev1.PodTemplateSpec
// It should do nothing if the feature doesn't need to configure it.
func (f *apmFeature) ManageNodeAgent(managers feature.PodTemplateManagers) error {
managers.EnvVar().AddEnvVarToContainer(apicommonv1.TraceAgentContainerName, &corev1.EnvVar{
Name: apicommon.DDAPMEnabled,
Value: "true",
})
// udp
apmPort := &corev1.ContainerPort{
Name: apicommon.DefaultApmPortName,
ContainerPort: apicommon.DefaultApmPort,
Protocol: corev1 | {
f.owner = dda
apm := dda.Spec.Features.APM
if apm != nil && apiutils.BoolValue(apm.Enabled) {
f.useHostNetwork = v2alpha1.IsHostNetworkEnabled(dda, v2alpha1.NodeAgentComponentName)
// hostPort defaults to 'false' in the defaulting code
f.hostPortEnabled = apiutils.BoolValue(apm.HostPortConfig.Enabled)
f.hostPortHostPort = *apm.HostPortConfig.Port
if f.hostPortEnabled {
if enabled, flavor := v2alpha1.IsNetworkPolicyEnabled(dda); enabled {
if flavor == v2alpha1.NetworkPolicyFlavorCilium {
f.createCiliumNetworkPolicy = true
} else {
f.createKubernetesNetworkPolicy = true
}
}
}
// UDS defaults to 'true' in the defaulting code
f.udsEnabled = apiutils.BoolValue(apm.UnixDomainSocketConfig.Enabled)
f.udsHostFilepath = *apm.UnixDomainSocketConfig.Path | identifier_body |
feature.go | /v2alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
"github.com/DataDog/datadog-operator/controllers/datadogagent/component"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature"
"github.com/DataDog/datadog-operator/controllers/datadogagent/merger"
"github.com/DataDog/datadog-operator/controllers/datadogagent/object/volume"
cilium "github.com/DataDog/datadog-operator/pkg/cilium/v1"
)
func init() {
err := feature.Register(feature.APMIDType, buildAPMFeature)
if err != nil {
panic(err)
}
}
func buildAPMFeature(options *feature.Options) feature.Feature {
apmFeat := &apmFeature{}
return apmFeat
}
type apmFeature struct {
hostPortEnabled bool
hostPortHostPort int32
useHostNetwork bool
udsEnabled bool
udsHostFilepath string
owner metav1.Object
forceEnableLocalService bool
localServiceName string
createKubernetesNetworkPolicy bool
createCiliumNetworkPolicy bool
createSCC bool
}
// ID returns the ID of the Feature
func (f *apmFeature) ID() feature.IDType {
return feature.APMIDType
}
// Configure is used to configure the feature from a v2alpha1.DatadogAgent instance.
func (f *apmFeature) Configure(dda *v2alpha1.DatadogAgent) (reqComp feature.RequiredComponents) {
f.owner = dda
apm := dda.Spec.Features.APM
if apm != nil && apiutils.BoolValue(apm.Enabled) {
f.useHostNetwork = v2alpha1.IsHostNetworkEnabled(dda, v2alpha1.NodeAgentComponentName)
// hostPort defaults to 'false' in the defaulting code
f.hostPortEnabled = apiutils.BoolValue(apm.HostPortConfig.Enabled)
f.hostPortHostPort = *apm.HostPortConfig.Port
if f.hostPortEnabled {
if enabled, flavor := v2alpha1.IsNetworkPolicyEnabled(dda); enabled {
if flavor == v2alpha1.NetworkPolicyFlavorCilium {
f.createCiliumNetworkPolicy = true
} else {
f.createKubernetesNetworkPolicy = true
}
}
}
// UDS defaults to 'true' in the defaulting code
f.udsEnabled = apiutils.BoolValue(apm.UnixDomainSocketConfig.Enabled)
f.udsHostFilepath = *apm.UnixDomainSocketConfig.Path
if dda.Spec.Global.LocalService != nil {
f.forceEnableLocalService = apiutils.BoolValue(dda.Spec.Global.LocalService.ForceEnableLocalService)
}
f.localServiceName = v2alpha1.GetLocalAgentServiceName(dda)
f.createSCC = v2alpha1.ShouldCreateSCC(dda, v2alpha1.NodeAgentComponentName)
reqComp = feature.RequiredComponents{
Agent: feature.RequiredComponent{
IsRequired: apiutils.NewBoolPointer(true),
Containers: []apicommonv1.AgentContainerName{
apicommonv1.CoreAgentContainerName,
apicommonv1.TraceAgentContainerName,
},
},
}
}
return reqComp
}
// ConfigureV1 use to configure the feature from a v1alpha1.DatadogAgent instance.
func (f *apmFeature) ConfigureV1(dda *v1alpha1.DatadogAgent) (reqComp feature.RequiredComponents) {
f.owner = dda
apm := dda.Spec.Agent.Apm
if apiutils.BoolValue(apm.Enabled) {
f.hostPortEnabled = true
f.hostPortHostPort = *apm.HostPort
f.useHostNetwork = v1alpha1.IsHostNetworkEnabled(dda)
if apiutils.BoolValue(apm.UnixDomainSocket.Enabled) {
f.udsEnabled = true
if apm.UnixDomainSocket.HostFilepath != nil {
f.udsHostFilepath = *apm.UnixDomainSocket.HostFilepath
}
}
if dda.Spec.Agent.LocalService != nil {
f.forceEnableLocalService = apiutils.BoolValue(dda.Spec.Agent.LocalService.ForceLocalServiceEnable)
}
f.localServiceName = v1alpha1.GetLocalAgentServiceName(dda)
if enabled, flavor := v1alpha1.IsAgentNetworkPolicyEnabled(dda); enabled {
if flavor == v1alpha1.NetworkPolicyFlavorCilium {
f.createCiliumNetworkPolicy = true
} else {
f.createKubernetesNetworkPolicy = true
}
}
reqComp = feature.RequiredComponents{
Agent: feature.RequiredComponent{
IsRequired: apiutils.NewBoolPointer(true),
Containers: []apicommonv1.AgentContainerName{
apicommonv1.CoreAgentContainerName,
apicommonv1.TraceAgentContainerName,
},
},
}
}
return reqComp
}
// ManageDependencies allows a feature to manage its dependencies.
// Feature's dependencies should be added in the store.
func (f *apmFeature) ManageDependencies(managers feature.ResourceManagers, components feature.RequiredComponents) error {
// agent local service
if component.ShouldCreateAgentLocalService(managers.Store().GetVersionInfo(), f.forceEnableLocalService) {
apmPort := &corev1.ServicePort{
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(int(apicommon.DefaultApmPort)),
Port: apicommon.DefaultApmPort,
Name: apicommon.DefaultApmPortName,
}
if f.hostPortEnabled {
apmPort.Port = f.hostPortHostPort
apmPort.Name = apicommon.APMHostPortName
if f.useHostNetwork {
apmPort.TargetPort = intstr.FromInt(int(f.hostPortHostPort))
}
}
serviceInternalTrafficPolicy := corev1.ServiceInternalTrafficPolicyLocal
if err := managers.ServiceManager().AddService(f.localServiceName, f.owner.GetNamespace(), nil, []corev1.ServicePort{*apmPort}, &serviceInternalTrafficPolicy); err != nil {
return err
}
}
// network policies
if f.hostPortEnabled {
policyName, podSelector := component.GetNetworkPolicyMetadata(f.owner, v2alpha1.NodeAgentComponentName)
if f.createKubernetesNetworkPolicy | ingressRules,
nil,
)
}
else if f.createCiliumNetworkPolicy {
policySpecs := []cilium.NetworkPolicySpec{
{
Description: "Ingress for APM trace",
EndpointSelector: podSelector,
Ingress: []cilium.IngressRule{
{
FromEndpoints: []metav1.LabelSelector{
{},
},
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: strconv.Itoa(int(f.hostPortHostPort)),
Protocol: cilium.ProtocolTCP,
},
},
},
},
},
},
},
}
return managers.CiliumPolicyManager().AddCiliumPolicy(policyName, f.owner.GetNamespace(), policySpecs)
}
}
// scc
if f.createSCC {
sccName := component.GetAgentSCCName(f.owner)
scc := securityv1.SecurityContextConstraints{}
if f.hostPortEnabled {
scc.AllowHostPorts = true
}
return managers.PodSecurityManager().AddSecurityContextConstraints(sccName, f.owner.GetNamespace(), &scc)
}
return nil
}
// ManageClusterAgent allows a feature to configure the ClusterAgent's corev1.PodTemplateSpec
// It should do nothing if the feature doesn't need to configure it.
func (f *apmFeature) ManageClusterAgent(managers feature.PodTemplateManagers) error {
return nil
}
// ManageNodeAgent allows a feature to configure the Node Agent's corev1.PodTemplateSpec
// It should do nothing if the feature doesn't need to configure it.
func (f *apmFeature) ManageNodeAgent(managers feature.PodTemplateManagers) error {
managers.EnvVar().AddEnvVarToContainer(apicommonv1.TraceAgentContainerName, &corev1.EnvVar{
Name: apicommon.DDAPMEnabled,
Value: "true",
})
// udp
apmPort := &corev1.ContainerPort{
Name: apicommon.DefaultApmPortName,
ContainerPort: apicommon.DefaultApmPort,
Protocol: corev1 | {
protocolTCP := corev1.ProtocolTCP
ingressRules := []netv1.NetworkPolicyIngressRule{
{
Ports: []netv1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{
Type: intstr.Int,
IntVal: f.hostPortHostPort,
},
Protocol: &protocolTCP,
},
},
},
}
return managers.NetworkPolicyManager().AddKubernetesNetworkPolicy(
policyName,
f.owner.GetNamespace(),
podSelector,
nil, | conditional_block |
feature.go | f.createKubernetesNetworkPolicy = true
}
}
}
// UDS defaults to 'true' in the defaulting code
f.udsEnabled = apiutils.BoolValue(apm.UnixDomainSocketConfig.Enabled)
f.udsHostFilepath = *apm.UnixDomainSocketConfig.Path
if dda.Spec.Global.LocalService != nil {
f.forceEnableLocalService = apiutils.BoolValue(dda.Spec.Global.LocalService.ForceEnableLocalService)
}
f.localServiceName = v2alpha1.GetLocalAgentServiceName(dda)
f.createSCC = v2alpha1.ShouldCreateSCC(dda, v2alpha1.NodeAgentComponentName)
reqComp = feature.RequiredComponents{
Agent: feature.RequiredComponent{
IsRequired: apiutils.NewBoolPointer(true),
Containers: []apicommonv1.AgentContainerName{
apicommonv1.CoreAgentContainerName,
apicommonv1.TraceAgentContainerName,
},
},
}
}
return reqComp
}
// ConfigureV1 use to configure the feature from a v1alpha1.DatadogAgent instance.
func (f *apmFeature) ConfigureV1(dda *v1alpha1.DatadogAgent) (reqComp feature.RequiredComponents) {
f.owner = dda
apm := dda.Spec.Agent.Apm
if apiutils.BoolValue(apm.Enabled) {
f.hostPortEnabled = true
f.hostPortHostPort = *apm.HostPort
f.useHostNetwork = v1alpha1.IsHostNetworkEnabled(dda)
if apiutils.BoolValue(apm.UnixDomainSocket.Enabled) {
f.udsEnabled = true
if apm.UnixDomainSocket.HostFilepath != nil {
f.udsHostFilepath = *apm.UnixDomainSocket.HostFilepath
}
}
if dda.Spec.Agent.LocalService != nil {
f.forceEnableLocalService = apiutils.BoolValue(dda.Spec.Agent.LocalService.ForceLocalServiceEnable)
}
f.localServiceName = v1alpha1.GetLocalAgentServiceName(dda)
if enabled, flavor := v1alpha1.IsAgentNetworkPolicyEnabled(dda); enabled {
if flavor == v1alpha1.NetworkPolicyFlavorCilium {
f.createCiliumNetworkPolicy = true
} else {
f.createKubernetesNetworkPolicy = true
}
}
reqComp = feature.RequiredComponents{
Agent: feature.RequiredComponent{
IsRequired: apiutils.NewBoolPointer(true),
Containers: []apicommonv1.AgentContainerName{
apicommonv1.CoreAgentContainerName,
apicommonv1.TraceAgentContainerName,
},
},
}
}
return reqComp
}
// ManageDependencies allows a feature to manage its dependencies.
// Feature's dependencies should be added in the store.
func (f *apmFeature) ManageDependencies(managers feature.ResourceManagers, components feature.RequiredComponents) error {
// agent local service
if component.ShouldCreateAgentLocalService(managers.Store().GetVersionInfo(), f.forceEnableLocalService) {
apmPort := &corev1.ServicePort{
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(int(apicommon.DefaultApmPort)),
Port: apicommon.DefaultApmPort,
Name: apicommon.DefaultApmPortName,
}
if f.hostPortEnabled {
apmPort.Port = f.hostPortHostPort
apmPort.Name = apicommon.APMHostPortName
if f.useHostNetwork {
apmPort.TargetPort = intstr.FromInt(int(f.hostPortHostPort))
}
}
serviceInternalTrafficPolicy := corev1.ServiceInternalTrafficPolicyLocal
if err := managers.ServiceManager().AddService(f.localServiceName, f.owner.GetNamespace(), nil, []corev1.ServicePort{*apmPort}, &serviceInternalTrafficPolicy); err != nil {
return err
}
}
// network policies
if f.hostPortEnabled {
policyName, podSelector := component.GetNetworkPolicyMetadata(f.owner, v2alpha1.NodeAgentComponentName)
if f.createKubernetesNetworkPolicy {
protocolTCP := corev1.ProtocolTCP
ingressRules := []netv1.NetworkPolicyIngressRule{
{
Ports: []netv1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{
Type: intstr.Int,
IntVal: f.hostPortHostPort,
},
Protocol: &protocolTCP,
},
},
},
}
return managers.NetworkPolicyManager().AddKubernetesNetworkPolicy(
policyName,
f.owner.GetNamespace(),
podSelector,
nil,
ingressRules,
nil,
)
} else if f.createCiliumNetworkPolicy {
policySpecs := []cilium.NetworkPolicySpec{
{
Description: "Ingress for APM trace",
EndpointSelector: podSelector,
Ingress: []cilium.IngressRule{
{
FromEndpoints: []metav1.LabelSelector{
{},
},
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: strconv.Itoa(int(f.hostPortHostPort)),
Protocol: cilium.ProtocolTCP,
},
},
},
},
},
},
},
}
return managers.CiliumPolicyManager().AddCiliumPolicy(policyName, f.owner.GetNamespace(), policySpecs)
}
}
// scc
if f.createSCC {
sccName := component.GetAgentSCCName(f.owner)
scc := securityv1.SecurityContextConstraints{}
if f.hostPortEnabled {
scc.AllowHostPorts = true
}
return managers.PodSecurityManager().AddSecurityContextConstraints(sccName, f.owner.GetNamespace(), &scc)
}
return nil
}
// ManageClusterAgent allows a feature to configure the ClusterAgent's corev1.PodTemplateSpec
// It should do nothing if the feature doesn't need to configure it.
func (f *apmFeature) ManageClusterAgent(managers feature.PodTemplateManagers) error {
return nil
}
// ManageNodeAgent allows a feature to configure the Node Agent's corev1.PodTemplateSpec
// It should do nothing if the feature doesn't need to configure it.
func (f *apmFeature) ManageNodeAgent(managers feature.PodTemplateManagers) error {
managers.EnvVar().AddEnvVarToContainer(apicommonv1.TraceAgentContainerName, &corev1.EnvVar{
Name: apicommon.DDAPMEnabled,
Value: "true",
})
// udp
apmPort := &corev1.ContainerPort{
Name: apicommon.DefaultApmPortName,
ContainerPort: apicommon.DefaultApmPort,
Protocol: corev1.ProtocolTCP,
}
if f.hostPortEnabled {
apmPort.HostPort = f.hostPortHostPort
// if using host network, host port should be set and needs to match container port
if f.useHostNetwork {
apmPort.ContainerPort = f.hostPortHostPort
}
managers.EnvVar().AddEnvVarToContainer(apicommonv1.TraceAgentContainerName, &corev1.EnvVar{
Name: apicommon.DDAPMReceiverPort,
Value: strconv.FormatInt(int64(f.hostPortHostPort), 10),
})
managers.EnvVar().AddEnvVarToContainer(apicommonv1.TraceAgentContainerName, &corev1.EnvVar{
Name: apicommon.DDAPMNonLocalTraffic,
Value: "true",
})
}
managers.Port().AddPortToContainer(apicommonv1.TraceAgentContainerName, apmPort)
// uds
if f.udsEnabled {
udsHostFolder := filepath.Dir(f.udsHostFilepath)
sockName := filepath.Base(f.udsHostFilepath)
managers.EnvVar().AddEnvVarToContainer(apicommonv1.TraceAgentContainerName, &corev1.EnvVar{
Name: apicommon.DDAPMReceiverSocket,
Value: filepath.Join(apicommon.APMSocketVolumeLocalPath, sockName),
})
socketVol, socketVolMount := volume.GetVolumes(apicommon.APMSocketVolumeName, udsHostFolder, apicommon.APMSocketVolumeLocalPath, false)
volType := corev1.HostPathDirectoryOrCreate // We need to create the directory on the host if it does not exist.
socketVol.VolumeSource.HostPath.Type = &volType
managers.VolumeMount().AddVolumeMountToContainerWithMergeFunc(&socketVolMount, apicommonv1.TraceAgentContainerName, merger.OverrideCurrentVolumeMountMergeFunction)
managers.Volume().AddVolume(&socketVol)
}
return nil
}
// ManageClusterChecksRunner allows a feature to configure the ClusterChecksRunner's corev1.PodTemplateSpec
// It should do nothing if the feature doesn't need to configure it.
func (f *apmFeature) | ManageClusterChecksRunner | identifier_name | |
demo.js | (xml, container) {
// Parallel arrays for the chart data, these are populated as the XML/JSON file
// is loaded
this.symbols = [];
this.symbolNames = [];
this.precipitations = [];
this.windDirections = [];
this.windDirectionNames = [];
this.windSpeeds = [];
this.windSpeedNames = [];
this.temperatures = [];
this.pressures = [];
// Initialize
this.xml = xml;
this.container = container;
// Run
this.parseYrData();
}
/**
* Return weather symbol sprites as laid out at http://om.yr.no/forklaring/symbol/
*/
Meteogram.prototype.getSymbolSprites = function (symbolSize) {
return {
'01d': {
x: 0,
y: 0
},
'01n': {
x: symbolSize,
y: 0
},
'16': {
x: 2 * symbolSize,
y: 0
},
'02d': {
x: 0,
y: symbolSize
},
'02n': {
x: symbolSize,
y: symbolSize
},
'03d': {
x: 0,
y: 2 * symbolSize
},
'03n': {
x: symbolSize,
y: 2 * symbolSize
},
'17': {
x: 2 * symbolSize,
y: 2 * symbolSize
},
'04': {
x: 0,
y: 3 * symbolSize
},
'05d': {
x: 0,
y: 4 * symbolSize
},
'05n': {
x: symbolSize,
y: 4 * symbolSize
},
'18': {
x: 2 * symbolSize,
y: 4 * symbolSize
},
'06d': {
x: 0,
y: 5 * symbolSize
},
'06n': {
x: symbolSize,
y: 5 * symbolSize
},
'07d': {
x: 0,
y: 6 * symbolSize
},
'07n': {
x: symbolSize,
y: 6 * symbolSize
},
'08d': {
x: 0,
y: 7 * symbolSize
},
'08n': {
x: symbolSize,
y: 7 * symbolSize
},
'19': {
x: 2 * symbolSize,
y: 7 * symbolSize
},
'09': {
x: 0,
y: 8 * symbolSize
},
'10': {
x: 0,
y: 9 * symbolSize
},
'11': {
x: 0,
y: 10 * symbolSize
},
'12': {
x: 0,
y: 11 * symbolSize
},
'13': {
x: 0,
y: 12 * symbolSize
},
'14': {
x: 0,
y: 13 * symbolSize
},
'15': {
x: 0,
y: 14 * symbolSize
},
'20d': {
x: 0,
y: 15 * symbolSize
},
'20n': {
x: symbolSize,
y: 15 * symbolSize
},
'20m': {
x: 2 * symbolSize,
y: 15 * symbolSize
},
'21d': {
x: 0,
y: 16 * symbolSize
},
'21n': {
x: symbolSize,
y: 16 * symbolSize
},
'21m': {
x: 2 * symbolSize,
y: 16 * symbolSize
},
'22': {
x: 0,
y: 17 * symbolSize
},
'23': {
x: 0,
y: 18 * symbolSize
}
};
};
/**
* Function to smooth the temperature line. The original data provides only whole degrees,
* which makes the line graph look jagged. So we apply a running mean on it, but preserve
* the unaltered value in the tooltip.
*/
Meteogram.prototype.smoothLine = function (data) {
var i = data.length,
sum,
value;
while (i--) {
data[i].value = value = data[i].y; // preserve value for tooltip
// Set the smoothed value to the average of the closest points, but don't allow
// it to differ more than 0.5 degrees from the given value
sum = (data[i - 1] || data[i]).y + value + (data[i + 1] || data[i]).y;
data[i].y = Math.max(value - 0.5, Math.min(sum / 3, value + 0.5));
}
};
/**
* Callback function that is called from Highcharts on hovering each point and returns
* HTML for the tooltip.
*/
Meteogram.prototype.tooltipFormatter = function (tooltip) {
// Create the header with reference to the time interval
var index = tooltip.points[0].point.index,
ret = '<small>' + Highcharts.dateFormat('%A, %b %e, %H:%M', tooltip.x) + '-' +
Highcharts.dateFormat('%H:%M', tooltip.points[0].point.to) + '</small><br>';
// Symbol text
ret += '<b>' + this.symbolNames[index] + '</b>';
ret += '<table>';
// Add all series
Highcharts.each(tooltip.points, function (point) {
var series = point.series;
ret += '<tr><td><span style="color:' + series.color + '">\u25CF</span> ' + series.name +
': </td><td style="white-space:nowrap">' + Highcharts.pick(point.point.value, point.y) +
series.options.tooltip.valueSuffix + '</td></tr>';
});
// Add wind
ret += '<tr><td style="vertical-align: top">\u25CF Wind</td><td style="white-space:nowrap">' + this.windDirectionNames[index] +
'<br>' + this.windSpeedNames[index] + ' (' +
Highcharts.numberFormat(this.windSpeeds[index], 1) + ' m/s)</td></tr>';
// Close
ret += '</table>';
return ret;
};
/**
* Draw the weather symbols on top of the temperature series. The symbols are sprites of a single
* file, defined in the getSymbolSprites function above.
*/
Meteogram.prototype.drawWeatherSymbols = function (chart) {
var meteogram = this,
symbolSprites = this.getSymbolSprites(30);
$.each(chart.series[0].data, function (i, point) {
var sprite,
group;
if (meteogram.resolution > 36e5 || i % 2 === 0) {
sprite = symbolSprites[meteogram.symbols[i]];
if (sprite) {
// Create a group element that is positioned and clipped at 30 pixels width and height
group = chart.renderer.g()
.attr({
translateX: point.plotX + chart.plotLeft - 15,
translateY: point.plotY + chart.plotTop - 30,
zIndex: 5
})
.clip(chart.renderer.clipRect(0, 0, 30, 30))
.add();
// Position the image inside it at the sprite position
chart.renderer.image(
'http://www.highcharts.com/samples/graphics/meteogram-symbols-30px.png',
-sprite.x,
-sprite.y,
90,
570
)
.add(group);
}
}
});
};
/**
* Create wind speed symbols for the Beaufort wind scale. The symbols are rotated
* around the zero centerpoint.
*/
Meteogram.prototype.windArrow = function (name) {
var level,
path;
// The stem and the arrow head
path = [
'M', 0, 7, // base of arrow
'L', -1.5, 7,
0, 10,
1.5, 7,
0, 7,
0, -10 // top
];
level = $.inArray(name, ['Calm', 'Light air', 'Light breeze', 'Gentle breeze', 'Moderate breeze',
'Fresh breeze', 'Strong breeze', 'Near gale', 'Gale', 'Strong gale', 'Storm',
'Violent storm', 'Hurricane']);
if (level === 0) {
path = [];
}
if (level === 2) {
path.push('M', 0, -8, 'L', 4, | Meteogram | identifier_name | |
demo.js | Calm', 'Light air', 'Light breeze', 'Gentle breeze', 'Moderate breeze',
'Fresh breeze', 'Strong breeze', 'Near gale', 'Gale', 'Strong gale', 'Storm',
'Violent storm', 'Hurricane']);
if (level === 0) {
path = [];
}
if (level === 2) {
path.push('M', 0, -8, 'L', 4, -8); // short line
} else if (level >= 3) {
path.push(0, -10, 7, -10); // long line
}
if (level === 4) {
path.push('M', 0, -7, 'L', 4, -7);
} else if (level >= 5) {
path.push('M', 0, -7, 'L', 7, -7);
}
if (level === 5) {
path.push('M', 0, -4, 'L', 4, -4);
} else if (level >= 6) {
path.push('M', 0, -4, 'L', 7, -4);
}
if (level === 7) {
path.push('M', 0, -1, 'L', 4, -1);
} else if (level >= 8) {
path.push('M', 0, -1, 'L', 7, -1);
}
return path;
};
/**
* Draw the wind arrows. Each arrow path is generated by the windArrow function above.
*/
Meteogram.prototype.drawWindArrows = function (chart) {
var meteogram = this;
$.each(chart.series[0].data, function (i, point) {
var sprite, arrow, x, y;
if (meteogram.resolution > 36e5 || i % 2 === 0) {
// Draw the wind arrows
x = point.plotX + chart.plotLeft + 7;
y = 255;
if (meteogram.windSpeedNames[i] === 'Calm') {
arrow = chart.renderer.circle(x, y, 10).attr({
fill: 'none'
});
} else {
arrow = chart.renderer.path(
meteogram.windArrow(meteogram.windSpeedNames[i])
).attr({
rotation: parseInt(meteogram.windDirections[i], 10),
translateX: x, // rotation center
translateY: y // rotation center
});
}
arrow.attr({
stroke: (Highcharts.theme && Highcharts.theme.contrastTextColor) || 'black',
'stroke-width': 1.5,
zIndex: 5
})
.add();
}
});
};
/**
* Draw blocks around wind arrows, below the plot area
*/
Meteogram.prototype.drawBlocksForWindArrows = function (chart) {
var xAxis = chart.xAxis[0],
x,
pos,
max,
isLong,
isLast,
i;
for (pos = xAxis.min, max = xAxis.max, i = 0; pos <= max + 36e5; pos += 36e5, i += 1) {
// Get the X position
isLast = pos === max + 36e5;
x = Math.round(xAxis.toPixels(pos)) + (isLast ? 0.5 : -0.5);
// Draw the vertical dividers and ticks
if (this.resolution > 36e5) {
isLong = pos % this.resolution === 0;
} else {
isLong = i % 2 === 0;
}
chart.renderer.path(['M', x, chart.plotTop + chart.plotHeight + (isLong ? 0 : 28),
'L', x, chart.plotTop + chart.plotHeight + 32, 'Z'])
.attr({
'stroke': chart.options.chart.plotBorderColor,
'stroke-width': 1
})
.add();
}
};
/**
* Get the title based on the XML data
*/
Meteogram.prototype.getTitle = function () {
return 'Meteogram for ' + this.xml.location.name + ', ' + this.xml.location.country;
};
/**
* Build and return the Highcharts options structure
*/
Meteogram.prototype.getChartOptions = function () {
var meteogram = this;
return {
chart: {
renderTo: this.container,
marginBottom: 70,
marginRight: 40,
marginTop: 50,
plotBorderWidth: 1,
width: 800,
height: 310
},
title: {
text: this.getTitle(),
align: 'left'
},
credits: {
text: 'Forecast from <a href="http://yr.no">yr.no</a>',
href: this.xml.credit.link['@attributes'].url,
position: {
x: -40
}
},
tooltip: {
shared: true,
useHTML: true,
formatter: function () {
return meteogram.tooltipFormatter(this);
}
},
xAxis: [{ // Bottom X axis
type: 'datetime',
tickInterval: 2 * 36e5, // two hours
minorTickInterval: 36e5, // one hour
tickLength: 0,
gridLineWidth: 1,
gridLineColor: (Highcharts.theme && Highcharts.theme.background2) || '#F0F0F0',
startOnTick: false,
endOnTick: false,
minPadding: 0,
maxPadding: 0,
offset: 30,
showLastLabel: true,
labels: {
format: '{value:%H}'
}
}, { // Top X axis
linkedTo: 0,
type: 'datetime',
tickInterval: 24 * 3600 * 1000,
labels: {
format: '{value:<span style="font-size: 12px; font-weight: bold">%a</span> %b %e}',
align: 'left',
x: 3,
y: -5
},
opposite: true,
tickLength: 20,
gridLineWidth: 1
}],
yAxis: [{ // temperature axis
title: {
text: null
},
labels: {
format: '{value}°',
style: {
fontSize: '10px'
},
x: -3
},
plotLines: [{ // zero plane
value: 0,
color: '#BBBBBB',
width: 1,
zIndex: 2
}],
// Custom positioner to provide even temperature ticks from top down
tickPositioner: function () {
var max = Math.ceil(this.max) + 1,
pos = max - 12, // start
ret;
if (pos < this.min) {
ret = [];
while (pos <= max) {
ret.push(pos += 1);
}
} // else return undefined and go auto
return ret;
},
maxPadding: 0.3,
tickInterval: 1,
gridLineColor: (Highcharts.theme && Highcharts.theme.background2) || '#F0F0F0'
}, { // precipitation axis
title: {
text: null
},
labels: {
enabled: false
},
gridLineWidth: 0,
tickLength: 0
}, { // Air pressure
allowDecimals: false,
title: { // Title on top of axis
text: 'hPa',
offset: 0,
align: 'high',
rotation: 0,
style: {
fontSize: '10px',
color: Highcharts.getOptions().colors[2]
},
textAlign: 'left',
x: 3
},
labels: {
style: {
fontSize: '8px',
color: Highcharts.getOptions().colors[2]
},
y: 2,
x: 3
},
gridLineWidth: 0,
opposite: true,
showLastLabel: false
}],
legend: {
enabled: false
},
plotOptions: {
series: {
pointPlacement: 'between'
}
},
series: [{
name: 'Temperature',
data: this.temperatures,
type: 'spline',
marker: {
enabled: false,
states: {
hover: {
enabled: true
}
}
},
tooltip: {
valueSuffix: '°C'
},
zIndex: 1,
color: '#FF3333',
negativeColor: '#48AFE8'
}, {
name: 'Precipitation',
data: this.precipitations,
type: 'column',
color: '#68CFE8',
yAxis: 1,
groupPadding: 0,
pointPadding: 0,
borderWidth: 0,
shadow: false,
dataLabels: {
enabled: true,
formatter: function () {
if (this.y > 0) {
| return this.y;
}
| conditional_block | |
demo.js |
/**
* Return weather symbol sprites as laid out at http://om.yr.no/forklaring/symbol/
*/
Meteogram.prototype.getSymbolSprites = function (symbolSize) {
return {
'01d': {
x: 0,
y: 0
},
'01n': {
x: symbolSize,
y: 0
},
'16': {
x: 2 * symbolSize,
y: 0
},
'02d': {
x: 0,
y: symbolSize
},
'02n': {
x: symbolSize,
y: symbolSize
},
'03d': {
x: 0,
y: 2 * symbolSize
},
'03n': {
x: symbolSize,
y: 2 * symbolSize
},
'17': {
x: 2 * symbolSize,
y: 2 * symbolSize
},
'04': {
x: 0,
y: 3 * symbolSize
},
'05d': {
x: 0,
y: 4 * symbolSize
},
'05n': {
x: symbolSize,
y: 4 * symbolSize
},
'18': {
x: 2 * symbolSize,
y: 4 * symbolSize
},
'06d': {
x: 0,
y: 5 * symbolSize
},
'06n': {
x: symbolSize,
y: 5 * symbolSize
},
'07d': {
x: 0,
y: 6 * symbolSize
},
'07n': {
x: symbolSize,
y: 6 * symbolSize
},
'08d': {
x: 0,
y: 7 * symbolSize
},
'08n': {
x: symbolSize,
y: 7 * symbolSize
},
'19': {
x: 2 * symbolSize,
y: 7 * symbolSize
},
'09': {
x: 0,
y: 8 * symbolSize
},
'10': {
x: 0,
y: 9 * symbolSize
},
'11': {
x: 0,
y: 10 * symbolSize
},
'12': {
x: 0,
y: 11 * symbolSize
},
'13': {
x: 0,
y: 12 * symbolSize
},
'14': {
x: 0,
y: 13 * symbolSize
},
'15': {
x: 0,
y: 14 * symbolSize
},
'20d': {
x: 0,
y: 15 * symbolSize
},
'20n': {
x: symbolSize,
y: 15 * symbolSize
},
'20m': {
x: 2 * symbolSize,
y: 15 * symbolSize
},
'21d': {
x: 0,
y: 16 * symbolSize
},
'21n': {
x: symbolSize,
y: 16 * symbolSize
},
'21m': {
x: 2 * symbolSize,
y: 16 * symbolSize
},
'22': {
x: 0,
y: 17 * symbolSize
},
'23': {
x: 0,
y: 18 * symbolSize
}
};
};
/**
* Function to smooth the temperature line. The original data provides only whole degrees,
* which makes the line graph look jagged. So we apply a running mean on it, but preserve
* the unaltered value in the tooltip.
*/
Meteogram.prototype.smoothLine = function (data) {
var i = data.length,
sum,
value;
while (i--) {
data[i].value = value = data[i].y; // preserve value for tooltip
// Set the smoothed value to the average of the closest points, but don't allow
// it to differ more than 0.5 degrees from the given value
sum = (data[i - 1] || data[i]).y + value + (data[i + 1] || data[i]).y;
data[i].y = Math.max(value - 0.5, Math.min(sum / 3, value + 0.5));
}
};
/**
* Callback function that is called from Highcharts on hovering each point and returns
* HTML for the tooltip.
*/
Meteogram.prototype.tooltipFormatter = function (tooltip) {
// Create the header with reference to the time interval
var index = tooltip.points[0].point.index,
ret = '<small>' + Highcharts.dateFormat('%A, %b %e, %H:%M', tooltip.x) + '-' +
Highcharts.dateFormat('%H:%M', tooltip.points[0].point.to) + '</small><br>';
// Symbol text
ret += '<b>' + this.symbolNames[index] + '</b>';
ret += '<table>';
// Add all series
Highcharts.each(tooltip.points, function (point) {
var series = point.series;
ret += '<tr><td><span style="color:' + series.color + '">\u25CF</span> ' + series.name +
': </td><td style="white-space:nowrap">' + Highcharts.pick(point.point.value, point.y) +
series.options.tooltip.valueSuffix + '</td></tr>';
});
// Add wind
ret += '<tr><td style="vertical-align: top">\u25CF Wind</td><td style="white-space:nowrap">' + this.windDirectionNames[index] +
'<br>' + this.windSpeedNames[index] + ' (' +
Highcharts.numberFormat(this.windSpeeds[index], 1) + ' m/s)</td></tr>';
// Close
ret += '</table>';
return ret;
};
/**
* Draw the weather symbols on top of the temperature series. The symbols are sprites of a single
* file, defined in the getSymbolSprites function above.
*/
Meteogram.prototype.drawWeatherSymbols = function (chart) {
var meteogram = this,
symbolSprites = this.getSymbolSprites(30);
$.each(chart.series[0].data, function (i, point) {
var sprite,
group;
if (meteogram.resolution > 36e5 || i % 2 === 0) {
sprite = symbolSprites[meteogram.symbols[i]];
if (sprite) {
// Create a group element that is positioned and clipped at 30 pixels width and height
group = chart.renderer.g()
.attr({
translateX: point.plotX + chart.plotLeft - 15,
translateY: point.plotY + chart.plotTop - 30,
zIndex: 5
})
.clip(chart.renderer.clipRect(0, 0, 30, 30))
.add();
// Position the image inside it at the sprite position
chart.renderer.image(
'http://www.highcharts.com/samples/graphics/meteogram-symbols-30px.png',
-sprite.x,
-sprite.y,
90,
570
)
.add(group);
}
}
});
};
/**
* Create wind speed symbols for the Beaufort wind scale. The symbols are rotated
* around the zero centerpoint.
*/
Meteogram.prototype.windArrow = function (name) {
var level,
path;
// The stem and the arrow head
path = [
'M', 0, 7, // base of arrow
'L', -1.5, 7,
0, 10,
1.5, 7,
0, 7,
0, -10 // top
];
level = $.inArray(name, ['Calm', 'Light air', 'Light breeze', 'Gentle breeze', 'Moderate breeze',
'Fresh breeze', 'Strong breeze', 'Near gale', 'Gale', 'Strong gale', 'Storm',
'Violent storm', 'Hurricane']);
if (level === 0) {
path = [];
}
if (level === 2) {
path.push('M', 0, -8, 'L', 4, -8); // short line | {
// Parallel arrays for the chart data, these are populated as the XML/JSON file
// is loaded
this.symbols = [];
this.symbolNames = [];
this.precipitations = [];
this.windDirections = [];
this.windDirectionNames = [];
this.windSpeeds = [];
this.windSpeedNames = [];
this.temperatures = [];
this.pressures = [];
// Initialize
this.xml = xml;
this.container = container;
// Run
this.parseYrData();
} | identifier_body | |
demo.js | x: 0,
y: 9 * symbolSize
},
'11': {
x: 0,
y: 10 * symbolSize
},
'12': {
x: 0,
y: 11 * symbolSize
},
'13': {
x: 0,
y: 12 * symbolSize
},
'14': {
x: 0,
y: 13 * symbolSize
},
'15': {
x: 0,
y: 14 * symbolSize
},
'20d': {
x: 0,
y: 15 * symbolSize
},
'20n': {
x: symbolSize,
y: 15 * symbolSize
},
'20m': {
x: 2 * symbolSize,
y: 15 * symbolSize
},
'21d': {
x: 0,
y: 16 * symbolSize
},
'21n': {
x: symbolSize,
y: 16 * symbolSize
},
'21m': {
x: 2 * symbolSize,
y: 16 * symbolSize
},
'22': {
x: 0,
y: 17 * symbolSize
},
'23': {
x: 0,
y: 18 * symbolSize
}
};
};
/**
* Function to smooth the temperature line. The original data provides only whole degrees,
* which makes the line graph look jagged. So we apply a running mean on it, but preserve
* the unaltered value in the tooltip.
*/
Meteogram.prototype.smoothLine = function (data) {
var i = data.length,
sum,
value;
while (i--) {
data[i].value = value = data[i].y; // preserve value for tooltip
// Set the smoothed value to the average of the closest points, but don't allow
// it to differ more than 0.5 degrees from the given value
sum = (data[i - 1] || data[i]).y + value + (data[i + 1] || data[i]).y;
data[i].y = Math.max(value - 0.5, Math.min(sum / 3, value + 0.5));
}
};
/**
* Callback function that is called from Highcharts on hovering each point and returns
* HTML for the tooltip.
*/
Meteogram.prototype.tooltipFormatter = function (tooltip) {
// Create the header with reference to the time interval
var index = tooltip.points[0].point.index,
ret = '<small>' + Highcharts.dateFormat('%A, %b %e, %H:%M', tooltip.x) + '-' +
Highcharts.dateFormat('%H:%M', tooltip.points[0].point.to) + '</small><br>';
// Symbol text
ret += '<b>' + this.symbolNames[index] + '</b>';
ret += '<table>';
// Add all series
Highcharts.each(tooltip.points, function (point) {
var series = point.series;
ret += '<tr><td><span style="color:' + series.color + '">\u25CF</span> ' + series.name +
': </td><td style="white-space:nowrap">' + Highcharts.pick(point.point.value, point.y) +
series.options.tooltip.valueSuffix + '</td></tr>';
});
// Add wind
ret += '<tr><td style="vertical-align: top">\u25CF Wind</td><td style="white-space:nowrap">' + this.windDirectionNames[index] +
'<br>' + this.windSpeedNames[index] + ' (' +
Highcharts.numberFormat(this.windSpeeds[index], 1) + ' m/s)</td></tr>';
// Close
ret += '</table>';
return ret;
};
/**
* Draw the weather symbols on top of the temperature series. The symbols are sprites of a single
* file, defined in the getSymbolSprites function above.
*/
Meteogram.prototype.drawWeatherSymbols = function (chart) {
var meteogram = this,
symbolSprites = this.getSymbolSprites(30); | $.each(chart.series[0].data, function (i, point) {
var sprite,
group;
if (meteogram.resolution > 36e5 || i % 2 === 0) {
sprite = symbolSprites[meteogram.symbols[i]];
if (sprite) {
// Create a group element that is positioned and clipped at 30 pixels width and height
group = chart.renderer.g()
.attr({
translateX: point.plotX + chart.plotLeft - 15,
translateY: point.plotY + chart.plotTop - 30,
zIndex: 5
})
.clip(chart.renderer.clipRect(0, 0, 30, 30))
.add();
// Position the image inside it at the sprite position
chart.renderer.image(
'http://www.highcharts.com/samples/graphics/meteogram-symbols-30px.png',
-sprite.x,
-sprite.y,
90,
570
)
.add(group);
}
}
});
};
/**
* Create wind speed symbols for the Beaufort wind scale. The symbols are rotated
* around the zero centerpoint.
*/
Meteogram.prototype.windArrow = function (name) {
var level,
path;
// The stem and the arrow head
path = [
'M', 0, 7, // base of arrow
'L', -1.5, 7,
0, 10,
1.5, 7,
0, 7,
0, -10 // top
];
level = $.inArray(name, ['Calm', 'Light air', 'Light breeze', 'Gentle breeze', 'Moderate breeze',
'Fresh breeze', 'Strong breeze', 'Near gale', 'Gale', 'Strong gale', 'Storm',
'Violent storm', 'Hurricane']);
if (level === 0) {
path = [];
}
if (level === 2) {
path.push('M', 0, -8, 'L', 4, -8); // short line
} else if (level >= 3) {
path.push(0, -10, 7, -10); // long line
}
if (level === 4) {
path.push('M', 0, -7, 'L', 4, -7);
} else if (level >= 5) {
path.push('M', 0, -7, 'L', 7, -7);
}
if (level === 5) {
path.push('M', 0, -4, 'L', 4, -4);
} else if (level >= 6) {
path.push('M', 0, -4, 'L', 7, -4);
}
if (level === 7) {
path.push('M', 0, -1, 'L', 4, -1);
} else if (level >= 8) {
path.push('M', 0, -1, 'L', 7, -1);
}
return path;
};
/**
* Draw the wind arrows. Each arrow path is generated by the windArrow function above.
*/
Meteogram.prototype.drawWindArrows = function (chart) {
var meteogram = this;
$.each(chart.series[0].data, function (i, point) {
var sprite, arrow, x, y;
if (meteogram.resolution > 36e5 || i % 2 === 0) {
// Draw the wind arrows
x = point.plotX + chart.plotLeft + 7;
y = 255;
if (meteogram.windSpeedNames[i] === 'Calm') {
arrow = chart.renderer.circle(x, y, 10).attr({
fill: 'none'
});
} else {
arrow = chart.renderer.path(
meteogram.windArrow(meteogram.windSpeedNames[i])
).attr({
rotation: parseInt(meteogram.windDirections[i], 10),
translateX: x, // rotation center
translateY: y // rotation center
});
}
arrow.attr({
stroke: (Highcharts.theme && Highcharts.theme.contrastTextColor) || 'black',
'stroke-width': 1.5,
zIndex: 5
})
.add();
}
});
};
/**
* Draw blocks around wind arrows, below the plot area
*/
Meteogram.prototype.drawBlocksForWindArrows = function (chart) {
var xAxis = chart.xAxis[0],
x,
pos,
max,
isLong,
isLast,
i;
for (pos = xAxis.min, max = xAxis.max, i = 0; pos <= max + 36e5; pos += 36e5, i += 1) {
// Get the X position
isLast = pos === max + 36e | random_line_split | |
cli.rs | to be used to inline a function of small size. Can be empty or `{:inline}`.
pub func_inline: String,
/// A bound to apply to the length of serialization results.
pub serialize_bound: usize,
/// How many times to call the prover backend for the verification problem. This is used for
/// benchmarking.
pub bench_repeat: usize,
}
impl Default for BackendOptions {
fn default() -> Self {
let get_env = |s| std::env::var(s).unwrap_or_else(|_| String::new());
Self {
bench_repeat: 1,
boogie_exe: get_env("BOOGIE_EXE"),
z3_exe: get_env("Z3_EXE"),
use_cvc4: false,
cvc4_exe: get_env("CVC4_EXE"),
boogie_flags: vec![],
use_array_theory: false,
generate_smt: false,
native_equality: false,
type_requires: "free requires".to_owned(),
stratification_depth: 4,
aggressive_func_inline: "".to_owned(),
func_inline: "{:inline}".to_owned(),
serialize_bound: 4,
}
}
}
impl Options {
/// Creates options from toml configuration source.
pub fn create_from_toml(toml_source: &str) -> anyhow::Result<Options> {
Ok(toml::from_str(toml_source)?)
}
/// Creates options from toml configuration file.
pub fn create_from_toml_file(toml_file: &str) -> anyhow::Result<Options> {
Self::create_from_toml(&std::fs::read_to_string(toml_file)?)
}
// Creates options from command line arguments. This parses the arguments and terminates
// the program on errors, printing usage information. The first argument is expected to be
// the program name.
pub fn create_from_args(args: &[String]) -> anyhow::Result<Options> {
// Clap definition of the command line interface.
let is_number = |s: String| {
s.parse::<usize>()
.map(|_| ())
.map_err(|_| "expected number".to_string())
};
let cli = App::new("mvp")
.version("0.1.0")
.about("The Move Prover")
.author("The Libra Core Contributors")
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.takes_value(true)
.value_name("TOML_FILE")
.env("MOVE_PROVER_CONFIG")
.help("path to a configuration file. \
Values in this file will be overridden by command line flags"),
)
.arg(
Arg::with_name("config-str")
.conflicts_with("config")
.short("C")
.long("config-str")
.takes_value(true)
.multiple(true)
.number_of_values(1)
.value_name("TOML_STRING")
.help("inline configuration string in toml syntax. Can be repeated. \
Use as in `-C=prover.opt=value -C=backend.opt=value`"),
)
.arg(
Arg::with_name("print-config")
.long("print-config")
.help("prints the effective toml configuration, then exits")
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.value_name("BOOGIE_FILE")
.help("path to the boogie output which represents the verification problem"),
)
.arg(
Arg::with_name("verbosity")
.short("v")
.long("verbose")
.takes_value(true)
.possible_values(&["error", "warn", "info", "debug"])
.help("verbosity level."),
)
.arg(
Arg::with_name("generate-only")
.short("g")
.long("generate-only")
.help("only generate boogie file but do not call boogie"),
)
.arg(
Arg::with_name("trace")
.long("trace")
.short("t")
.help("enables automatic tracing of expressions in prover errors")
)
.arg(
Arg::with_name("docgen")
.long("docgen")
.help("run the documentation generator instead of the prover. \
Generated docs will be written into the directory `./doc` unless configured otherwise via toml"),
)
.arg(
Arg::with_name("verify")
.long("verify")
.takes_value(true)
.possible_values(&["public", "all", "none"])
.value_name("SCOPE")
.help("default scope of verification \
(can be overridden by `pragma verify=true|false`)"),
)
.arg(
Arg::with_name("bench-repeat")
.long("bench-repeat")
.takes_value(true)
.value_name("COUNT")
.validator(is_number)
.help(
"for benchmarking: how many times to call the backend on the verification problem",
),
)
.arg(
Arg::with_name("dependencies")
.long("dependency")
.short("d")
.multiple(true)
.number_of_values(1)
.takes_value(true)
.value_name("PATH_TO_DEPENDENCY")
.help("path to a Move file, or a directory which will be searched for \
Move files, containing dependencies which will not be verified")
)
.arg(
Arg::with_name("sources")
.multiple(true)
.value_name("PATH_TO_SOURCE_FILE")
.min_values(1)
.help("the source files to verify"),
)
.after_help("More options available via `--config file` or `--config-str str`. \
Use `--print-config` to see format and current values. \
See `move-prover/src/cli.rs::Option` for documentation.");
// Parse the arguments. This will abort the program on parsing errors and print help.
// It will also accept options like --help.
let matches = cli.get_matches_from(args);
// Initialize options.
let get_vec = |s: &str| -> Vec<String> {
match matches.values_of(s) {
Some(vs) => vs.map(|v| v.to_string()).collect(),
_ => vec![],
}
};
let mut options = if matches.is_present("config") {
Self::create_from_toml_file(matches.value_of("config").unwrap())?
} else if matches.is_present("config-str") {
let config_lines = get_vec("config-str").join("\n");
Self::create_from_toml(&config_lines)?
} else {
Options::default()
};
// Analyze arguments.
if matches.is_present("output") {
options.output_path = matches.value_of("output").unwrap().to_string();
}
if matches.is_present("verbosity") {
options.verbosity_level = match matches.value_of("verbosity").unwrap() {
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"info" => LevelFilter::Info,
"debug" => LevelFilter::Debug,
_ => unreachable!("should not happen"),
}
}
if matches.occurrences_of("sources") > 0 {
options.move_sources = get_vec("sources");
}
if matches.occurrences_of("dependencies") > 0 {
options.move_deps = get_vec("dependencies");
}
if matches.is_present("verify") {
options.prover.verify_scope = match matches.value_of("verify").unwrap() {
"public" => VerificationScope::Public,
"all" => VerificationScope::All,
"none" => VerificationScope::None,
_ => unreachable!("should not happen"),
}
}
if matches.is_present("bench-repeat") {
options.backend.bench_repeat =
matches.value_of("bench-repeat").unwrap().parse::<usize>()?;
}
if matches.is_present("docgen") {
options.run_docgen = true;
}
if matches.is_present("trace") {
options.prover.debug_trace = true;
}
if matches.is_present("print-config") {
println!("{}", toml::to_string(&options).unwrap());
Err(anyhow!("exiting"))
} else {
Ok(options)
}
}
/// Sets up logging based on provided options. This should be called as early as possible
/// and before any use of info!, warn! etc.
pub fn setup_logging(&self) {
CombinedLogger::init(vec![TermLogger::new(
self.verbosity_level,
ConfigBuilder::new()
.set_time_level(LevelFilter::Debug)
.set_level_padding(LevelPadding::Off)
.build(),
TerminalMode::Mixed,
)])
.expect("Unexpected CombinedLogger init failure");
}
pub fn setup_logging_for_test(&self) {
// Loggers are global static, so we have to protect against reinitializing.
if LOGGER_CONFIGURED.compare_and_swap(false, true, Ordering::Relaxed) {
return;
}
TEST_MODE.store(true, Ordering::Relaxed);
SimpleLogger::init(self.verbosity_level, Config::default())
.expect("UnexpectedSimpleLogger failure");
}
/// Returns command line to call boogie.
pub fn | get_boogie_command | identifier_name | |
cli.rs | verbosity_level: LevelFilter,
/// Whether to run the documentation generator instead of the prover.
pub run_docgen: bool,
/// An account address to use if none is specified in the source.
pub account_address: String,
/// The paths to the Move sources.
pub move_sources: Vec<String>,
/// The paths to any dependencies for the Move sources. Those will not be verified but
/// can be used by `move_sources`.
pub move_deps: Vec<String>,
/// Options for the prover.
pub prover: ProverOptions,
/// Options for the prover backend.
pub backend: BackendOptions,
/// Options for the documentation generator.
pub docgen: DocgenOptions,
}
impl Default for Options {
fn default() -> Self {
Self {
prelude_path: INLINE_PRELUDE.to_string(),
output_path: "output.bpl".to_string(),
run_docgen: false,
account_address: "0x234567".to_string(),
verbosity_level: LevelFilter::Info,
move_sources: vec![],
move_deps: vec![],
docgen: DocgenOptions::default(),
prover: ProverOptions::default(),
backend: BackendOptions::default(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct ProverOptions {
/// Whether to only generate backend code. | /// Whether to minimize execution traces in errors.
pub minimize_execution_trace: bool,
/// Whether to omit debug information in generated model.
pub omit_model_debug: bool,
/// Whether output for e.g. diagnosis shall be stable/redacted so it can be used in test
/// output.
pub stable_test_output: bool,
/// Scope of what functions to verify.
pub verify_scope: VerificationScope,
/// Whether to emit global axiom that resources are well-formed.
pub resource_wellformed_axiom: bool,
/// Whether to automatically debug trace values of specification expression leafs.
pub debug_trace: bool,
}
impl Default for ProverOptions {
fn default() -> Self {
Self {
generate_only: false,
native_stubs: false,
minimize_execution_trace: true,
omit_model_debug: false,
stable_test_output: false,
verify_scope: VerificationScope::Public,
resource_wellformed_axiom: true,
debug_trace: false,
}
}
}
/// Backend options.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct BackendOptions {
/// Path to the boogie executable.
pub boogie_exe: String,
/// Path to the z3 executable.
pub z3_exe: String,
/// Whether to use cvc4.
pub use_cvc4: bool,
/// Path to the cvc4 executable.
pub cvc4_exe: String,
/// List of flags to pass on to boogie.
pub boogie_flags: Vec<String>,
/// Whether to use native array theory.
pub use_array_theory: bool,
/// Whether to produce an SMT file for each verification problem.
pub generate_smt: bool,
/// Whether native instead of stratified equality should be used.
pub native_equality: bool,
/// A string determining the type of requires used for parameter type checks. Can be
/// `"requires"` or `"free requires`".
pub type_requires: String,
/// The depth until which stratified functions are expanded.
pub stratification_depth: usize,
/// A string to be used to inline a function of medium size. Can be empty or `{:inline}`.
pub aggressive_func_inline: String,
/// A string to be used to inline a function of small size. Can be empty or `{:inline}`.
pub func_inline: String,
/// A bound to apply to the length of serialization results.
pub serialize_bound: usize,
/// How many times to call the prover backend for the verification problem. This is used for
/// benchmarking.
pub bench_repeat: usize,
}
impl Default for BackendOptions {
fn default() -> Self {
let get_env = |s| std::env::var(s).unwrap_or_else(|_| String::new());
Self {
bench_repeat: 1,
boogie_exe: get_env("BOOGIE_EXE"),
z3_exe: get_env("Z3_EXE"),
use_cvc4: false,
cvc4_exe: get_env("CVC4_EXE"),
boogie_flags: vec![],
use_array_theory: false,
generate_smt: false,
native_equality: false,
type_requires: "free requires".to_owned(),
stratification_depth: 4,
aggressive_func_inline: "".to_owned(),
func_inline: "{:inline}".to_owned(),
serialize_bound: 4,
}
}
}
impl Options {
/// Creates options from toml configuration source.
pub fn create_from_toml(toml_source: &str) -> anyhow::Result<Options> {
Ok(toml::from_str(toml_source)?)
}
/// Creates options from toml configuration file.
pub fn create_from_toml_file(toml_file: &str) -> anyhow::Result<Options> {
Self::create_from_toml(&std::fs::read_to_string(toml_file)?)
}
// Creates options from command line arguments. This parses the arguments and terminates
// the program on errors, printing usage information. The first argument is expected to be
// the program name.
pub fn create_from_args(args: &[String]) -> anyhow::Result<Options> {
// Clap definition of the command line interface.
let is_number = |s: String| {
s.parse::<usize>()
.map(|_| ())
.map_err(|_| "expected number".to_string())
};
let cli = App::new("mvp")
.version("0.1.0")
.about("The Move Prover")
.author("The Libra Core Contributors")
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.takes_value(true)
.value_name("TOML_FILE")
.env("MOVE_PROVER_CONFIG")
.help("path to a configuration file. \
Values in this file will be overridden by command line flags"),
)
.arg(
Arg::with_name("config-str")
.conflicts_with("config")
.short("C")
.long("config-str")
.takes_value(true)
.multiple(true)
.number_of_values(1)
.value_name("TOML_STRING")
.help("inline configuration string in toml syntax. Can be repeated. \
Use as in `-C=prover.opt=value -C=backend.opt=value`"),
)
.arg(
Arg::with_name("print-config")
.long("print-config")
.help("prints the effective toml configuration, then exits")
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.value_name("BOOGIE_FILE")
.help("path to the boogie output which represents the verification problem"),
)
.arg(
Arg::with_name("verbosity")
.short("v")
.long("verbose")
.takes_value(true)
.possible_values(&["error", "warn", "info", "debug"])
.help("verbosity level."),
)
.arg(
Arg::with_name("generate-only")
.short("g")
.long("generate-only")
.help("only generate boogie file but do not call boogie"),
)
.arg(
Arg::with_name("trace")
.long("trace")
.short("t")
.help("enables automatic tracing of expressions in prover errors")
)
.arg(
Arg::with_name("docgen")
.long("docgen")
.help("run the documentation generator instead of the prover. \
Generated docs will be written into the directory `./doc` unless configured otherwise via toml"),
)
.arg(
Arg::with_name("verify")
.long("verify")
.takes_value(true)
.possible_values(&["public", "all", "none"])
.value_name("SCOPE")
.help("default scope of verification \
(can be overridden by `pragma verify=true|false`)"),
)
.arg(
Arg::with_name("bench-repeat")
.long("bench-repeat")
.takes_value(true)
.value_name("COUNT")
.validator(is_number)
.help(
"for benchmarking: how many times to call the backend on the verification problem",
),
)
.arg(
Arg::with_name("dependencies")
.long("dependency")
.short("d")
.multiple(true)
.number_of_values(1)
.takes_value(true)
.value_name("PATH_TO_DEPENDENCY")
.help("path to a Move file, or a directory which will be searched for \
Move files, containing dependencies which will not be verified")
)
.arg(
Arg::with_name("sources")
.multiple(true)
| pub generate_only: bool,
/// Whether to generate stubs for native functions.
pub native_stubs: bool, | random_line_split |
cli.rs | z3_exe: String,
/// Whether to use cvc4.
pub use_cvc4: bool,
/// Path to the cvc4 executable.
pub cvc4_exe: String,
/// List of flags to pass on to boogie.
pub boogie_flags: Vec<String>,
/// Whether to use native array theory.
pub use_array_theory: bool,
/// Whether to produce an SMT file for each verification problem.
pub generate_smt: bool,
/// Whether native instead of stratified equality should be used.
pub native_equality: bool,
/// A string determining the type of requires used for parameter type checks. Can be
/// `"requires"` or `"free requires`".
pub type_requires: String,
/// The depth until which stratified functions are expanded.
pub stratification_depth: usize,
/// A string to be used to inline a function of medium size. Can be empty or `{:inline}`.
pub aggressive_func_inline: String,
/// A string to be used to inline a function of small size. Can be empty or `{:inline}`.
pub func_inline: String,
/// A bound to apply to the length of serialization results.
pub serialize_bound: usize,
/// How many times to call the prover backend for the verification problem. This is used for
/// benchmarking.
pub bench_repeat: usize,
}
impl Default for BackendOptions {
fn default() -> Self {
let get_env = |s| std::env::var(s).unwrap_or_else(|_| String::new());
Self {
bench_repeat: 1,
boogie_exe: get_env("BOOGIE_EXE"),
z3_exe: get_env("Z3_EXE"),
use_cvc4: false,
cvc4_exe: get_env("CVC4_EXE"),
boogie_flags: vec![],
use_array_theory: false,
generate_smt: false,
native_equality: false,
type_requires: "free requires".to_owned(),
stratification_depth: 4,
aggressive_func_inline: "".to_owned(),
func_inline: "{:inline}".to_owned(),
serialize_bound: 4,
}
}
}
impl Options {
/// Creates options from toml configuration source.
pub fn create_from_toml(toml_source: &str) -> anyhow::Result<Options> {
Ok(toml::from_str(toml_source)?)
}
/// Creates options from toml configuration file.
pub fn create_from_toml_file(toml_file: &str) -> anyhow::Result<Options> {
Self::create_from_toml(&std::fs::read_to_string(toml_file)?)
}
// Creates options from command line arguments. This parses the arguments and terminates
// the program on errors, printing usage information. The first argument is expected to be
// the program name.
pub fn create_from_args(args: &[String]) -> anyhow::Result<Options> {
// Clap definition of the command line interface.
let is_number = |s: String| {
s.parse::<usize>()
.map(|_| ())
.map_err(|_| "expected number".to_string())
};
let cli = App::new("mvp")
.version("0.1.0")
.about("The Move Prover")
.author("The Libra Core Contributors")
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.takes_value(true)
.value_name("TOML_FILE")
.env("MOVE_PROVER_CONFIG")
.help("path to a configuration file. \
Values in this file will be overridden by command line flags"),
)
.arg(
Arg::with_name("config-str")
.conflicts_with("config")
.short("C")
.long("config-str")
.takes_value(true)
.multiple(true)
.number_of_values(1)
.value_name("TOML_STRING")
.help("inline configuration string in toml syntax. Can be repeated. \
Use as in `-C=prover.opt=value -C=backend.opt=value`"),
)
.arg(
Arg::with_name("print-config")
.long("print-config")
.help("prints the effective toml configuration, then exits")
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.takes_value(true)
.value_name("BOOGIE_FILE")
.help("path to the boogie output which represents the verification problem"),
)
.arg(
Arg::with_name("verbosity")
.short("v")
.long("verbose")
.takes_value(true)
.possible_values(&["error", "warn", "info", "debug"])
.help("verbosity level."),
)
.arg(
Arg::with_name("generate-only")
.short("g")
.long("generate-only")
.help("only generate boogie file but do not call boogie"),
)
.arg(
Arg::with_name("trace")
.long("trace")
.short("t")
.help("enables automatic tracing of expressions in prover errors")
)
.arg(
Arg::with_name("docgen")
.long("docgen")
.help("run the documentation generator instead of the prover. \
Generated docs will be written into the directory `./doc` unless configured otherwise via toml"),
)
.arg(
Arg::with_name("verify")
.long("verify")
.takes_value(true)
.possible_values(&["public", "all", "none"])
.value_name("SCOPE")
.help("default scope of verification \
(can be overridden by `pragma verify=true|false`)"),
)
.arg(
Arg::with_name("bench-repeat")
.long("bench-repeat")
.takes_value(true)
.value_name("COUNT")
.validator(is_number)
.help(
"for benchmarking: how many times to call the backend on the verification problem",
),
)
.arg(
Arg::with_name("dependencies")
.long("dependency")
.short("d")
.multiple(true)
.number_of_values(1)
.takes_value(true)
.value_name("PATH_TO_DEPENDENCY")
.help("path to a Move file, or a directory which will be searched for \
Move files, containing dependencies which will not be verified")
)
.arg(
Arg::with_name("sources")
.multiple(true)
.value_name("PATH_TO_SOURCE_FILE")
.min_values(1)
.help("the source files to verify"),
)
.after_help("More options available via `--config file` or `--config-str str`. \
Use `--print-config` to see format and current values. \
See `move-prover/src/cli.rs::Option` for documentation.");
// Parse the arguments. This will abort the program on parsing errors and print help.
// It will also accept options like --help.
let matches = cli.get_matches_from(args);
// Initialize options.
let get_vec = |s: &str| -> Vec<String> {
match matches.values_of(s) {
Some(vs) => vs.map(|v| v.to_string()).collect(),
_ => vec![],
}
};
let mut options = if matches.is_present("config") {
Self::create_from_toml_file(matches.value_of("config").unwrap())?
} else if matches.is_present("config-str") {
let config_lines = get_vec("config-str").join("\n");
Self::create_from_toml(&config_lines)?
} else {
Options::default()
};
// Analyze arguments.
if matches.is_present("output") {
options.output_path = matches.value_of("output").unwrap().to_string();
}
if matches.is_present("verbosity") {
options.verbosity_level = match matches.value_of("verbosity").unwrap() {
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"info" => LevelFilter::Info,
"debug" => LevelFilter::Debug,
_ => unreachable!("should not happen"),
}
}
if matches.occurrences_of("sources") > 0 {
options.move_sources = get_vec("sources");
}
if matches.occurrences_of("dependencies") > 0 {
options.move_deps = get_vec("dependencies");
}
if matches.is_present("verify") {
options.prover.verify_scope = match matches.value_of("verify").unwrap() {
"public" => VerificationScope::Public,
"all" => VerificationScope::All,
"none" => VerificationScope::None,
_ => unreachable!("should not happen"),
}
}
if matches.is_present("bench-repeat") {
options.backend.bench_repeat =
matches.value_of("bench-repeat").unwrap().parse::<usize>()?;
}
if matches.is_present("docgen") {
options.run_docgen = true;
}
if matches.is_present("trace") {
options.prover.debug_trace = true;
}
if matches.is_present("print-config") {
println!("{}", toml::to_string(&options).unwrap());
Err(anyhow!("exiting"))
} else | {
Ok(options)
} | conditional_block | |
glideKeeper.py | %s'%self.glidekeeper_constraint)
# string, what our ads will be identified at the factories
self.classad_id=classad_id
ilog('Thread classad_id: %s'%classad_id)
# factory pools is a list of pairs, where
# [0] is factory node
# [1] is factory identity
self.factory_pools=factory_pools
# string or None
self.factory_constraint=factory_constraint
# string
self.collector_node = collector_node
self.proxy_fname=proxy_fname
self.reload_proxy() # provides proxy_data
ilog('Backend info:\n\tfactory_pools: %s\n\tfactory_constraint: %s\n\tcollector_node: %s\n\tproxy_fname: %s'%(dbgp(factory_pools), factory_constraint, collector_node, proxy_fname))
#############################
# keep it simple, start with 0, requests will come later
self.needed_glideins=0
self.need_cleanup = False # if never requested more than 0, then no need to do cleanup
self.running_glideins=0
self.errors=[]
##############################
self.shutdown=False
# if you request 0, all the currenty running ones will be killed
# in all other cases, it is just requesting for more, if appropriate
def request_glideins(self,needed_glideins):
ilog('Requesting %d glidens from thread.'%needed_glideins)
self.needed_glideins=needed_glideins
# use this for monitoring
def get_running_glideins(self):
return self.running_glideins
def soft_kill(self):
ilog('Requesting a soft kill from the thread.')
self.shutdown=True
# this is the main of the class
def run(self):
self.shutdown=False
first=True
while (not self.shutdown) or self.need_cleanup:
if first:
first=False
else:
# do not sleep the first round
time.sleep(20)
self.reload_proxy()
if (self.needed_glideins>0) and (not self.shutdown): # on shutdown clean up, don't ask for more
self.go_request_glideins()
self.need_cleanup = True
else:
if self.need_cleanup:
self.cleanup_glideins()
##############
# INTERNAL
def reload_proxy(self):
ilog('Reloading proxy from fname: %s'%str(self.proxy_fname))
if self.proxy_fname==None:
self.proxy_data=None
return
proxy_fd=open(self.proxy_fname,'r')
try:
self.proxy_data=proxy_fd.read()
(self.public_cert, self.private_cert) = self._parse_proxy_certs(self.proxy_data)
finally:
proxy_fd.close()
return
def _parse_proxy_certs(self, data):
split_data = data.split('\n-')
certs = [x.split('-\n')[1] for x in split_data if not 'END' in x and 'CERTIFICATE' in x]
return certs
def cleanup_glideins(self):
ilog('Thread is cleaning up glideins.')
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.lib import condorMonitor, condorExe |
# Deadvertize my add, so the factory knows we are gone
for factory_pool in self.factory_pools:
factory_pool_node=factory_pool[0]
ilog('Deadvertising for node %s'%dbgp(factory_pool_node))
try:
glideinFrontendInterface.deadvertizeAllWork(factory_pool_node,self.client_name)
except RuntimeError, e:
self.errors.append((time.time(),"Deadvertizing failed: %s"%e))
except:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
self.errors.append((time.time(),"Deadvertizing failed: %s"%string.join(tb,'')))
# Stop all the glideins I can see
ilog('Getting glidein pool status data.')
try:
pool_status=condorMonitor.CondorStatus()
pool_status.load(self.glidekeeper_constraint,[('GLIDEIN_COLLECTOR_NAME','s'),('GLIDEIN_MASTER_NAME','s')])
pool_data=pool_status.fetchStored()
except:
self.errors.append((time.time(),"condor_status failed"))
for k in pool_data.keys():
el=pool_data[k]
ilog('Now killing pool with data: (%s -> %s)'%(dbgp(k), dbgp(el)))
try:
condorExe.exe_cmd("../sbin/condor_off","-master -pool %s %s"%(el['GLIDEIN_COLLECTOR_NAME'],el['GLIDEIN_MASTER_NAME']))
except RuntimeError, e:
self.errors.append((time.time(),"condor_off failed: %s"%e))
except:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
self.errors.append((time.time(),"condor_off failed: %s"%string.join(tb,'')))
self.need_cleanup = False
ilog('Finished cleanup.')
def go_request_glideins(self):
ilog('Entered go_request_glideins.')
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.lib import condorMonitor, condorExe
from glideinwms.frontend.glideinFrontendPlugins import proxy_plugins, createCredentialList
# query job collector
ilog('Checking the condor pool.')
try:
pool_status=condorMonitor.CondorStatus()
pool_status.load()#'(IS_MONITOR_VM=!=True)&&(%s)'%self.glidekeeper_constraint,[('State','s')])
running_glideins=len(pool_status.fetchStored())
del pool_status
self.running_glideins=running_glideins
ilog('Found %d glideins in the pool.'%running_glideins)
except:
self.errors.append((time.time(),"condor_status failed"))
return
# query WMS collector
ilog('Checking factory glideins.')
glidein_dict={}
for factory_pool in self.factory_pools:
factory_pool_node=factory_pool[0]
factory_identity=factory_pool[1]
try:
if self.proxy_data != None:
full_constraint = self.factory_constraint +' && (PubKeyType=?="RSA") && (GlideinAllowx509_Proxy=!=False)'
else:
full_constraint = self.factory_constraint + ' && (GlideinRequirex509_Proxy=!=True)'
ilog('Running findGlideins with these params: \n\tpool: %s\n\tident: %s\n\tsigtype: %s\n\tconstraints: %s'%(
str(factory_pool_node),
str(None),
str(self.signature_type),
str(full_constraint)
#str(self.proxy_data!=None),
#str(True)
))
factory_glidein_dict=glideinFrontendInterface.findGlideins(
factory_pool_node,
None, #factory_identity, #TODO: How do we authenticate with the factory?
self.signature_type,
full_constraint
#self.proxy_data!=None,
#get_only_matching=True
)
except RuntimeError, e:
factory_glidein_dict={} # in case of error, treat as there is nothing there
ilog('Error from findGlideins: %s'%str(e))
ilog('Found %d possible in factory_pool %s'%(len(factory_glidein_dict.keys()), dbgp(factory_pool)))
for glidename in factory_glidein_dict.keys():
ilog('Now testing glidein with name %s'%glidename)
glidein_el=factory_glidein_dict[glidename]
ilog('Glidein stats: \n\n %s \n\n'%dbgp(glidein_el))
if not glidein_el['attrs'].has_key('PubKeyType'): # no pub key at all, skip
ilog('%s has no PubKeyType -- skipping.'% glidename)
continue
elif glidein_el['attrs']['PubKeyType']=='RSA': # only trust RSA for now
try:
# augment
glidein_el['attrs']['PubKeyObj']=glideinFrontendInterface.pubCrypto.PubRSAKey(str(re.sub(r"\\+n", r"\n", glidein_el['attrs']['PubKeyValue'])))
# and add
glidein_dict[(factory_pool_node,glidename)]=glidein_el
ilog('Adding %s to glidein_dict'%glidename)
except:
ilog('Hit error when adding %s to glidein_dict'%glidename)
continue # skip
else: # invalid key type, skip
ilog('%s has invalid PubKeyType -- skipping.'% glidename)
continue
nr_entries=len(glidein_dict.keys())
if running_glideins>=self.needed_glideins:
additional_glideins=0
else:
# ask for 2/3 since it takes a few cycles to stabilize
additional_glideins=(self.needed_glideins-running_glideins)*2/3+1
if additional | random_line_split | |
glideKeeper.py | %s'%self.glidekeeper_constraint)
# string, what our ads will be identified at the factories
self.classad_id=classad_id
ilog('Thread classad_id: %s'%classad_id)
# factory pools is a list of pairs, where
# [0] is factory node
# [1] is factory identity
self.factory_pools=factory_pools
# string or None
self.factory_constraint=factory_constraint
# string
self.collector_node = collector_node
self.proxy_fname=proxy_fname
self.reload_proxy() # provides proxy_data
ilog('Backend info:\n\tfactory_pools: %s\n\tfactory_constraint: %s\n\tcollector_node: %s\n\tproxy_fname: %s'%(dbgp(factory_pools), factory_constraint, collector_node, proxy_fname))
#############################
# keep it simple, start with 0, requests will come later
self.needed_glideins=0
self.need_cleanup = False # if never requested more than 0, then no need to do cleanup
self.running_glideins=0
self.errors=[]
##############################
self.shutdown=False
# if you request 0, all the currenty running ones will be killed
# in all other cases, it is just requesting for more, if appropriate
def request_glideins(self,needed_glideins):
ilog('Requesting %d glidens from thread.'%needed_glideins)
self.needed_glideins=needed_glideins
# use this for monitoring
def get_running_glideins(self):
return self.running_glideins
def soft_kill(self):
ilog('Requesting a soft kill from the thread.')
self.shutdown=True
# this is the main of the class
def run(self):
|
##############
# INTERNAL
def reload_proxy(self):
ilog('Reloading proxy from fname: %s'%str(self.proxy_fname))
if self.proxy_fname==None:
self.proxy_data=None
return
proxy_fd=open(self.proxy_fname,'r')
try:
self.proxy_data=proxy_fd.read()
(self.public_cert, self.private_cert) = self._parse_proxy_certs(self.proxy_data)
finally:
proxy_fd.close()
return
def _parse_proxy_certs(self, data):
split_data = data.split('\n-')
certs = [x.split('-\n')[1] for x in split_data if not 'END' in x and 'CERTIFICATE' in x]
return certs
def cleanup_glideins(self):
ilog('Thread is cleaning up glideins.')
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.lib import condorMonitor, condorExe
# Deadvertize my add, so the factory knows we are gone
for factory_pool in self.factory_pools:
factory_pool_node=factory_pool[0]
ilog('Deadvertising for node %s'%dbgp(factory_pool_node))
try:
glideinFrontendInterface.deadvertizeAllWork(factory_pool_node,self.client_name)
except RuntimeError, e:
self.errors.append((time.time(),"Deadvertizing failed: %s"%e))
except:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
self.errors.append((time.time(),"Deadvertizing failed: %s"%string.join(tb,'')))
# Stop all the glideins I can see
ilog('Getting glidein pool status data.')
try:
pool_status=condorMonitor.CondorStatus()
pool_status.load(self.glidekeeper_constraint,[('GLIDEIN_COLLECTOR_NAME','s'),('GLIDEIN_MASTER_NAME','s')])
pool_data=pool_status.fetchStored()
except:
self.errors.append((time.time(),"condor_status failed"))
for k in pool_data.keys():
el=pool_data[k]
ilog('Now killing pool with data: (%s -> %s)'%(dbgp(k), dbgp(el)))
try:
condorExe.exe_cmd("../sbin/condor_off","-master -pool %s %s"%(el['GLIDEIN_COLLECTOR_NAME'],el['GLIDEIN_MASTER_NAME']))
except RuntimeError, e:
self.errors.append((time.time(),"condor_off failed: %s"%e))
except:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
self.errors.append((time.time(),"condor_off failed: %s"%string.join(tb,'')))
self.need_cleanup = False
ilog('Finished cleanup.')
def go_request_glideins(self):
ilog('Entered go_request_glideins.')
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.lib import condorMonitor, condorExe
from glideinwms.frontend.glideinFrontendPlugins import proxy_plugins, createCredentialList
# query job collector
ilog('Checking the condor pool.')
try:
pool_status=condorMonitor.CondorStatus()
pool_status.load()#'(IS_MONITOR_VM=!=True)&&(%s)'%self.glidekeeper_constraint,[('State','s')])
running_glideins=len(pool_status.fetchStored())
del pool_status
self.running_glideins=running_glideins
ilog('Found %d glideins in the pool.'%running_glideins)
except:
self.errors.append((time.time(),"condor_status failed"))
return
# query WMS collector
ilog('Checking factory glideins.')
glidein_dict={}
for factory_pool in self.factory_pools:
factory_pool_node=factory_pool[0]
factory_identity=factory_pool[1]
try:
if self.proxy_data != None:
full_constraint = self.factory_constraint +' && (PubKeyType=?="RSA") && (GlideinAllowx509_Proxy=!=False)'
else:
full_constraint = self.factory_constraint + ' && (GlideinRequirex509_Proxy=!=True)'
ilog('Running findGlideins with these params: \n\tpool: %s\n\tident: %s\n\tsigtype: %s\n\tconstraints: %s'%(
str(factory_pool_node),
str(None),
str(self.signature_type),
str(full_constraint)
#str(self.proxy_data!=None),
#str(True)
))
factory_glidein_dict=glideinFrontendInterface.findGlideins(
factory_pool_node,
None, #factory_identity, #TODO: How do we authenticate with the factory?
self.signature_type,
full_constraint
#self.proxy_data!=None,
#get_only_matching=True
)
except RuntimeError, e:
factory_glidein_dict={} # in case of error, treat as there is nothing there
ilog('Error from findGlideins: %s'%str(e))
ilog('Found %d possible in factory_pool %s'%(len(factory_glidein_dict.keys()), dbgp(factory_pool)))
for glidename in factory_glidein_dict.keys():
ilog('Now testing glidein with name %s'%glidename)
glidein_el=factory_glidein_dict[glidename]
ilog('Glidein stats: \n\n %s \n\n'%dbgp(glidein_el))
if not glidein_el['attrs'].has_key('PubKeyType'): # no pub key at all, skip
ilog('%s has no PubKeyType -- skipping.'% glidename)
continue
elif glidein_el['attrs']['PubKeyType']=='RSA': # only trust RSA for now
try:
# augment
glidein_el['attrs']['PubKeyObj']=glideinFrontendInterface.pubCrypto.PubRSAKey(str(re.sub(r"\\+n", r"\n", glidein_el['attrs']['PubKeyValue'])))
# and add
glidein_dict[(factory_pool_node,glidename)]=glidein_el
ilog('Adding %s to glidein_dict'%glidename)
except:
ilog('Hit error when adding %s to glidein_dict'%glidename)
continue # skip
else: # invalid key type, skip
ilog('%s has invalid PubKeyType -- skipping.'% glidename)
continue
nr_entries=len(glidein_dict.keys())
if running_glideins>=self.needed_glideins:
additional_glideins=0
else:
# ask for 2/3 since it takes a few cycles to stabilize
additional_glideins=(self.needed_glideins-running_glideins)*2/3+1
if | self.shutdown=False
first=True
while (not self.shutdown) or self.need_cleanup:
if first:
first=False
else:
# do not sleep the first round
time.sleep(20)
self.reload_proxy()
if (self.needed_glideins>0) and (not self.shutdown): # on shutdown clean up, don't ask for more
self.go_request_glideins()
self.need_cleanup = True
else:
if self.need_cleanup:
self.cleanup_glideins() | identifier_body |
glideKeeper.py | : %s'%self.glidekeeper_constraint)
# string, what our ads will be identified at the factories
self.classad_id=classad_id
ilog('Thread classad_id: %s'%classad_id)
# factory pools is a list of pairs, where
# [0] is factory node
# [1] is factory identity
self.factory_pools=factory_pools
# string or None
self.factory_constraint=factory_constraint
# string
self.collector_node = collector_node
self.proxy_fname=proxy_fname
self.reload_proxy() # provides proxy_data
ilog('Backend info:\n\tfactory_pools: %s\n\tfactory_constraint: %s\n\tcollector_node: %s\n\tproxy_fname: %s'%(dbgp(factory_pools), factory_constraint, collector_node, proxy_fname))
#############################
# keep it simple, start with 0, requests will come later
self.needed_glideins=0
self.need_cleanup = False # if never requested more than 0, then no need to do cleanup
self.running_glideins=0
self.errors=[]
##############################
self.shutdown=False
# if you request 0, all the currenty running ones will be killed
# in all other cases, it is just requesting for more, if appropriate
def request_glideins(self,needed_glideins):
ilog('Requesting %d glidens from thread.'%needed_glideins)
self.needed_glideins=needed_glideins
# use this for monitoring
def get_running_glideins(self):
return self.running_glideins
def soft_kill(self):
ilog('Requesting a soft kill from the thread.')
self.shutdown=True
# this is the main of the class
def run(self):
self.shutdown=False
first=True
while (not self.shutdown) or self.need_cleanup:
if first:
first=False
else:
# do not sleep the first round
time.sleep(20)
self.reload_proxy()
if (self.needed_glideins>0) and (not self.shutdown): # on shutdown clean up, don't ask for more
self.go_request_glideins()
self.need_cleanup = True
else:
if self.need_cleanup:
self.cleanup_glideins()
##############
# INTERNAL
def reload_proxy(self):
ilog('Reloading proxy from fname: %s'%str(self.proxy_fname))
if self.proxy_fname==None:
self.proxy_data=None
return
proxy_fd=open(self.proxy_fname,'r')
try:
self.proxy_data=proxy_fd.read()
(self.public_cert, self.private_cert) = self._parse_proxy_certs(self.proxy_data)
finally:
proxy_fd.close()
return
def _parse_proxy_certs(self, data):
split_data = data.split('\n-')
certs = [x.split('-\n')[1] for x in split_data if not 'END' in x and 'CERTIFICATE' in x]
return certs
def cleanup_glideins(self):
ilog('Thread is cleaning up glideins.')
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.lib import condorMonitor, condorExe
# Deadvertize my add, so the factory knows we are gone
for factory_pool in self.factory_pools:
factory_pool_node=factory_pool[0]
ilog('Deadvertising for node %s'%dbgp(factory_pool_node))
try:
glideinFrontendInterface.deadvertizeAllWork(factory_pool_node,self.client_name)
except RuntimeError, e:
self.errors.append((time.time(),"Deadvertizing failed: %s"%e))
except:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
self.errors.append((time.time(),"Deadvertizing failed: %s"%string.join(tb,'')))
# Stop all the glideins I can see
ilog('Getting glidein pool status data.')
try:
pool_status=condorMonitor.CondorStatus()
pool_status.load(self.glidekeeper_constraint,[('GLIDEIN_COLLECTOR_NAME','s'),('GLIDEIN_MASTER_NAME','s')])
pool_data=pool_status.fetchStored()
except:
self.errors.append((time.time(),"condor_status failed"))
for k in pool_data.keys():
el=pool_data[k]
ilog('Now killing pool with data: (%s -> %s)'%(dbgp(k), dbgp(el)))
try:
condorExe.exe_cmd("../sbin/condor_off","-master -pool %s %s"%(el['GLIDEIN_COLLECTOR_NAME'],el['GLIDEIN_MASTER_NAME']))
except RuntimeError, e:
self.errors.append((time.time(),"condor_off failed: %s"%e))
except:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
self.errors.append((time.time(),"condor_off failed: %s"%string.join(tb,'')))
self.need_cleanup = False
ilog('Finished cleanup.')
def go_request_glideins(self):
ilog('Entered go_request_glideins.')
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.lib import condorMonitor, condorExe
from glideinwms.frontend.glideinFrontendPlugins import proxy_plugins, createCredentialList
# query job collector
ilog('Checking the condor pool.')
try:
pool_status=condorMonitor.CondorStatus()
pool_status.load()#'(IS_MONITOR_VM=!=True)&&(%s)'%self.glidekeeper_constraint,[('State','s')])
running_glideins=len(pool_status.fetchStored())
del pool_status
self.running_glideins=running_glideins
ilog('Found %d glideins in the pool.'%running_glideins)
except:
self.errors.append((time.time(),"condor_status failed"))
return
# query WMS collector
ilog('Checking factory glideins.')
glidein_dict={}
for factory_pool in self.factory_pools:
factory_pool_node=factory_pool[0]
factory_identity=factory_pool[1]
try:
if self.proxy_data != None:
full_constraint = self.factory_constraint +' && (PubKeyType=?="RSA") && (GlideinAllowx509_Proxy=!=False)'
else:
full_constraint = self.factory_constraint + ' && (GlideinRequirex509_Proxy=!=True)'
ilog('Running findGlideins with these params: \n\tpool: %s\n\tident: %s\n\tsigtype: %s\n\tconstraints: %s'%(
str(factory_pool_node),
str(None),
str(self.signature_type),
str(full_constraint)
#str(self.proxy_data!=None),
#str(True)
))
factory_glidein_dict=glideinFrontendInterface.findGlideins(
factory_pool_node,
None, #factory_identity, #TODO: How do we authenticate with the factory?
self.signature_type,
full_constraint
#self.proxy_data!=None,
#get_only_matching=True
)
except RuntimeError, e:
factory_glidein_dict={} # in case of error, treat as there is nothing there
ilog('Error from findGlideins: %s'%str(e))
ilog('Found %d possible in factory_pool %s'%(len(factory_glidein_dict.keys()), dbgp(factory_pool)))
for glidename in factory_glidein_dict.keys():
|
nr_entries=len(glidein_dict.keys())
if running_glideins>=self.needed_glideins:
additional_glideins=0
else:
# ask for 2/3 since it takes a few cycles to stabilize
additional_glideins=(self.needed_glideins-running_glideins)*2/3+1
if additional | ilog('Now testing glidein with name %s'%glidename)
glidein_el=factory_glidein_dict[glidename]
ilog('Glidein stats: \n\n %s \n\n'%dbgp(glidein_el))
if not glidein_el['attrs'].has_key('PubKeyType'): # no pub key at all, skip
ilog('%s has no PubKeyType -- skipping.'% glidename)
continue
elif glidein_el['attrs']['PubKeyType']=='RSA': # only trust RSA for now
try:
# augment
glidein_el['attrs']['PubKeyObj']=glideinFrontendInterface.pubCrypto.PubRSAKey(str(re.sub(r"\\+n", r"\n", glidein_el['attrs']['PubKeyValue'])))
# and add
glidein_dict[(factory_pool_node,glidename)]=glidein_el
ilog('Adding %s to glidein_dict'%glidename)
except:
ilog('Hit error when adding %s to glidein_dict'%glidename)
continue # skip
else: # invalid key type, skip
ilog('%s has invalid PubKeyType -- skipping.'% glidename)
continue | conditional_block |
glideKeeper.py | %s'%self.glidekeeper_constraint)
# string, what our ads will be identified at the factories
self.classad_id=classad_id
ilog('Thread classad_id: %s'%classad_id)
# factory pools is a list of pairs, where
# [0] is factory node
# [1] is factory identity
self.factory_pools=factory_pools
# string or None
self.factory_constraint=factory_constraint
# string
self.collector_node = collector_node
self.proxy_fname=proxy_fname
self.reload_proxy() # provides proxy_data
ilog('Backend info:\n\tfactory_pools: %s\n\tfactory_constraint: %s\n\tcollector_node: %s\n\tproxy_fname: %s'%(dbgp(factory_pools), factory_constraint, collector_node, proxy_fname))
#############################
# keep it simple, start with 0, requests will come later
self.needed_glideins=0
self.need_cleanup = False # if never requested more than 0, then no need to do cleanup
self.running_glideins=0
self.errors=[]
##############################
self.shutdown=False
# if you request 0, all the currenty running ones will be killed
# in all other cases, it is just requesting for more, if appropriate
def request_glideins(self,needed_glideins):
ilog('Requesting %d glidens from thread.'%needed_glideins)
self.needed_glideins=needed_glideins
# use this for monitoring
def get_running_glideins(self):
return self.running_glideins
def soft_kill(self):
ilog('Requesting a soft kill from the thread.')
self.shutdown=True
# this is the main of the class
def run(self):
self.shutdown=False
first=True
while (not self.shutdown) or self.need_cleanup:
if first:
first=False
else:
# do not sleep the first round
time.sleep(20)
self.reload_proxy()
if (self.needed_glideins>0) and (not self.shutdown): # on shutdown clean up, don't ask for more
self.go_request_glideins()
self.need_cleanup = True
else:
if self.need_cleanup:
self.cleanup_glideins()
##############
# INTERNAL
def reload_proxy(self):
ilog('Reloading proxy from fname: %s'%str(self.proxy_fname))
if self.proxy_fname==None:
self.proxy_data=None
return
proxy_fd=open(self.proxy_fname,'r')
try:
self.proxy_data=proxy_fd.read()
(self.public_cert, self.private_cert) = self._parse_proxy_certs(self.proxy_data)
finally:
proxy_fd.close()
return
def _parse_proxy_certs(self, data):
split_data = data.split('\n-')
certs = [x.split('-\n')[1] for x in split_data if not 'END' in x and 'CERTIFICATE' in x]
return certs
def | (self):
ilog('Thread is cleaning up glideins.')
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.lib import condorMonitor, condorExe
# Deadvertize my add, so the factory knows we are gone
for factory_pool in self.factory_pools:
factory_pool_node=factory_pool[0]
ilog('Deadvertising for node %s'%dbgp(factory_pool_node))
try:
glideinFrontendInterface.deadvertizeAllWork(factory_pool_node,self.client_name)
except RuntimeError, e:
self.errors.append((time.time(),"Deadvertizing failed: %s"%e))
except:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
self.errors.append((time.time(),"Deadvertizing failed: %s"%string.join(tb,'')))
# Stop all the glideins I can see
ilog('Getting glidein pool status data.')
try:
pool_status=condorMonitor.CondorStatus()
pool_status.load(self.glidekeeper_constraint,[('GLIDEIN_COLLECTOR_NAME','s'),('GLIDEIN_MASTER_NAME','s')])
pool_data=pool_status.fetchStored()
except:
self.errors.append((time.time(),"condor_status failed"))
for k in pool_data.keys():
el=pool_data[k]
ilog('Now killing pool with data: (%s -> %s)'%(dbgp(k), dbgp(el)))
try:
condorExe.exe_cmd("../sbin/condor_off","-master -pool %s %s"%(el['GLIDEIN_COLLECTOR_NAME'],el['GLIDEIN_MASTER_NAME']))
except RuntimeError, e:
self.errors.append((time.time(),"condor_off failed: %s"%e))
except:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
self.errors.append((time.time(),"condor_off failed: %s"%string.join(tb,'')))
self.need_cleanup = False
ilog('Finished cleanup.')
def go_request_glideins(self):
ilog('Entered go_request_glideins.')
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.lib import condorMonitor, condorExe
from glideinwms.frontend.glideinFrontendPlugins import proxy_plugins, createCredentialList
# query job collector
ilog('Checking the condor pool.')
try:
pool_status=condorMonitor.CondorStatus()
pool_status.load()#'(IS_MONITOR_VM=!=True)&&(%s)'%self.glidekeeper_constraint,[('State','s')])
running_glideins=len(pool_status.fetchStored())
del pool_status
self.running_glideins=running_glideins
ilog('Found %d glideins in the pool.'%running_glideins)
except:
self.errors.append((time.time(),"condor_status failed"))
return
# query WMS collector
ilog('Checking factory glideins.')
glidein_dict={}
for factory_pool in self.factory_pools:
factory_pool_node=factory_pool[0]
factory_identity=factory_pool[1]
try:
if self.proxy_data != None:
full_constraint = self.factory_constraint +' && (PubKeyType=?="RSA") && (GlideinAllowx509_Proxy=!=False)'
else:
full_constraint = self.factory_constraint + ' && (GlideinRequirex509_Proxy=!=True)'
ilog('Running findGlideins with these params: \n\tpool: %s\n\tident: %s\n\tsigtype: %s\n\tconstraints: %s'%(
str(factory_pool_node),
str(None),
str(self.signature_type),
str(full_constraint)
#str(self.proxy_data!=None),
#str(True)
))
factory_glidein_dict=glideinFrontendInterface.findGlideins(
factory_pool_node,
None, #factory_identity, #TODO: How do we authenticate with the factory?
self.signature_type,
full_constraint
#self.proxy_data!=None,
#get_only_matching=True
)
except RuntimeError, e:
factory_glidein_dict={} # in case of error, treat as there is nothing there
ilog('Error from findGlideins: %s'%str(e))
ilog('Found %d possible in factory_pool %s'%(len(factory_glidein_dict.keys()), dbgp(factory_pool)))
for glidename in factory_glidein_dict.keys():
ilog('Now testing glidein with name %s'%glidename)
glidein_el=factory_glidein_dict[glidename]
ilog('Glidein stats: \n\n %s \n\n'%dbgp(glidein_el))
if not glidein_el['attrs'].has_key('PubKeyType'): # no pub key at all, skip
ilog('%s has no PubKeyType -- skipping.'% glidename)
continue
elif glidein_el['attrs']['PubKeyType']=='RSA': # only trust RSA for now
try:
# augment
glidein_el['attrs']['PubKeyObj']=glideinFrontendInterface.pubCrypto.PubRSAKey(str(re.sub(r"\\+n", r"\n", glidein_el['attrs']['PubKeyValue'])))
# and add
glidein_dict[(factory_pool_node,glidename)]=glidein_el
ilog('Adding %s to glidein_dict'%glidename)
except:
ilog('Hit error when adding %s to glidein_dict'%glidename)
continue # skip
else: # invalid key type, skip
ilog('%s has invalid PubKeyType -- skipping.'% glidename)
continue
nr_entries=len(glidein_dict.keys())
if running_glideins>=self.needed_glideins:
additional_glideins=0
else:
# ask for 2/3 since it takes a few cycles to stabilize
additional_glideins=(self.needed_glideins-running_glideins)*2/3+1
if | cleanup_glideins | identifier_name |
main.rs | "Number of panels painted at least once: {}",
painted_hull.len()
);
let registration_id_hull = paint_hull(
robot_program,
iter::once((Point::origin(), Color::White)).collect(),
Color::Black,
)?;
print_hull(®istration_id_hull, Color::Black);
Ok(())
}
fn print_hull(hull: &HashMap<Point, Color>, default_color: Color) {
let ((min_x, max_x), (min_y, max_y)) = (
hull.keys()
.map(|p| p.x)
.minmax()
.into_option()
.unwrap_or_default(),
hull.keys()
.map(|p| p.y)
.minmax()
.into_option()
.unwrap_or_default(),
);
for y in (min_y..=max_y).rev() {
for x in min_x..=max_x {
if hull.get(&Point::new(x, y)).unwrap_or(&default_color) == &Color::Black {
print!("█");
} else {
print!(" ");
}
}
println!()
}
}
fn paint_hull(
robot_program: Vec<isize>,
starting_hull: HashMap<Point, Color>,
default_color: Color,
) -> Result<HashMap<Point, Color>, anyhow::Error> {
use Color::*;
use Direction::*;
// Basically, we're using Mutex as a way of telling Rust that we know
// for sure we aren't gonna be accessing these values concurrently.
// The borrow checker is then satisfied.
let hull = Mutex::new(starting_hull);
let current_location = Mutex::new(Point::origin());
let mut is_paint_output = true;
let mut facing_direction = Up;
futures_executor::block_on(run_program(
robot_program,
tokio_stream::iter(iter::from_fn(|| {
let current_location = *(current_location.lock().unwrap());
Some(
hull.lock()
.unwrap()
.get(¤t_location)
.copied()
.unwrap_or(default_color),
)
}))
.map(|color| if color == Black { 0 } else { 1 }),
|output| {
let mut current_location = current_location.lock().unwrap();
if is_paint_output {
hull.lock()
.unwrap()
.entry(*current_location)
.insert(if output == 0 { Black } else { White });
} else {
let turn_direction = if output == 0 { Left } else { Right };
(*current_location, facing_direction) = match (turn_direction, facing_direction) {
(Left, Right) | (Right, Left) => {
(Point::new(current_location.x, current_location.y + 1), Up)
}
(Left, Left) | (Right, Right) => {
(Point::new(current_location.x, current_location.y - 1), Down)
}
(Left, Up) | (Right, Down) => {
(Point::new(current_location.x - 1, current_location.y), Left)
}
(Left, Down) | (Right, Up) => (
Point::new(current_location.x + 1, current_location.y),
Right,
),
_ => unsafe { std::hint::unreachable_unchecked() },
}
}
is_paint_output = !is_paint_output;
},
))?;
Ok(hull.into_inner().unwrap())
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum Color {
White,
Black,
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum Direction {
Up,
Down,
Left,
Right,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, From)]
struct Point {
x: isize,
y: isize,
}
impl fmt::Debug for Point {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("").field(&self.x).field(&self.y).finish()
}
}
impl Point {
fn origin() -> Self {
Self::new(0, 0)
}
fn new(x: isize, y: isize) -> Self {
Self::from((x, y))
}
}
async fn run_program(
mut program: Vec<isize>,
input: impl Stream<Item = isize>,
mut output_fn: impl FnMut(isize),
) -> Result<Vec<isize>, anyhow::Error> {
pin!(input);
let mut instruction_pointer = 0;
let mut relative_base = 0;
loop {
let opcode = usize::try_from(program[instruction_pointer])
.map_err(|_| anyhow!("Found a negative integer where an opcode was expected"))?;
let parameter_modes = get_parameter_modes(opcode)?;
let parameter_mode_of = |param: usize| {
parameter_modes
.get(param)
.unwrap_or(&ParameterModes::Position)
};
let mut get_param = |param: usize, need_write: bool| {
let param_value = program
.get(instruction_pointer + param + 1)
.copied()
.ok_or(anyhow!("Parameter not found"))?;
let param_mode = parameter_mode_of(param);
if need_write {
ensure!(
[ParameterModes::Position, ParameterModes::Relative].contains(param_mode),
"Invalid argument for opcode {}: {}",
opcode,
param_value
);
}
Ok(match param_mode {
ParameterModes::Position | ParameterModes::Relative => {
let raw_idx = if param_mode == &ParameterModes::Relative {
relative_base + param_value
} else {
param_value
};
let idx = usize::try_from(raw_idx).map_err(|_| {
anyhow!(
"The program is attempting to access a negative index: {}",
raw_idx
)
})?;
if idx >= program.len() {
program.resize_with(idx + 1, || 0);
}
if !need_write {
program[idx]
} else {
raw_idx
}
}
ParameterModes::Immediate => param_value,
})
};
// x % 100 gets the last 2 digits of a number,
// no matter how long it is.
match opcode % 100 {
1 | 2 | 7 | 8 => {
let (x, y, result_idx) = (
get_param(0, false)?,
get_param(1, false)?,
get_param(2, true)? as usize,
);
match opcode % 100 {
1 => program[result_idx] = x + y,
2 => program[result_idx] = x * y,
7 => program[result_idx] = (x < y) as isize,
8 => program[result_idx] = (x == y) as isize,
_ => unsafe { std::hint::unreachable_unchecked() },
}
instruction_pointer += 4;
}
5 | 6 => {
let (checked_value, jump_point) = (
get_param(0, false)?,
usize::try_from(get_param(1, false)?).map_err(|_| {
anyhow!("Found a negative integer where a jump point was expected")
})?,
);
let should_jump = match opcode % 100 {
5 => checked_value != 0,
6 => checked_value == 0,
_ => unsafe { std::hint::unreachable_unchecked() },
};
if should_jump {
instruction_pointer = jump_point;
} else {
instruction_pointer += 3;
}
}
3 | 4 | 9 => {
match opcode % 100 {
3 => {
let input = input
.next()
.await
.ok_or(anyhow!("Found an input opcode but no input was provided"))?;
let input_storage = get_param(0, true)? as usize;
program[input_storage] = input;
}
4 => output_fn(get_param(0, false)?),
9 => relative_base += get_param(0, false)?,
_ => unsafe { std::hint::unreachable_unchecked() },
}
instruction_pointer += 2;
}
99 => return Ok(program),
op => bail!("Encountered an unknown opcode: {}", op),
}
}
}
fn get_parameter_modes(opcode: usize) -> Result<Vec<ParameterModes>, anyhow::Error> {
opcode
.digits()
.rev()
.skip(2)
.map(ParameterModes::try_from)
.try_collect()
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
enum ParameterModes {
Position,
Immediate,
Relative,
}
impl TryFrom<u8> for ParameterModes {
type Error = anyhow::Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Ok(match value {
0 => Self::Position,
1 => Self::Immediate,
2 => Self::Relative,
_ => bail!("Unknown parameter mode: {}", value),
})
}
}
fn parse_input(program_str: &str) -> Result<Vec<isize>, anyhow::Error> {
| program_str
.split(",")
.map(|num_str| {
num_str
.trim()
.parse()
.map_err(|_| anyhow!("Could not parse number in program as isize: '{}'", num_str))
})
.try_collect()
}
| identifier_body | |
main.rs | Mutex};
use tokio::pin;
use tokio_stream::{Stream, StreamExt};
fn main() -> Result<(), anyhow::Error> {
let matches = App::new("2019-11")
.arg(Arg::from_usage("[input] 'Problem input file'").default_value("input.txt"))
.get_matches();
let input_filename = matches.value_of("input").unwrap();
let program_str = fs::read_to_string(input_filename)?.replace("\r\n", "\n");
let robot_program = parse_input(&program_str)?;
let painted_hull = paint_hull(robot_program.clone(), HashMap::new(), Color::Black)?;
println!(
"Number of panels painted at least once: {}",
painted_hull.len()
);
let registration_id_hull = paint_hull(
robot_program,
iter::once((Point::origin(), Color::White)).collect(),
Color::Black,
)?;
print_hull(®istration_id_hull, Color::Black);
Ok(())
}
fn print_hull(hull: &HashMap<Point, Color>, default_color: Color) {
let ((min_x, max_x), (min_y, max_y)) = (
hull.keys()
.map(|p| p.x)
.minmax()
.into_option()
.unwrap_or_default(),
hull.keys()
.map(|p| p.y)
.minmax()
.into_option()
.unwrap_or_default(),
);
for y in (min_y..=max_y).rev() {
for x in min_x..=max_x {
if hull.get(&Point::new(x, y)).unwrap_or(&default_color) == &Color::Black {
print!("█");
} else {
print!(" ");
}
}
println!()
}
}
fn paint_hull(
robot_program: Vec<isize>,
starting_hull: HashMap<Point, Color>,
default_color: Color,
) -> Result<HashMap<Point, Color>, anyhow::Error> {
use Color::*;
use Direction::*;
// Basically, we're using Mutex as a way of telling Rust that we know
// for sure we aren't gonna be accessing these values concurrently.
// The borrow checker is then satisfied.
let hull = Mutex::new(starting_hull);
let current_location = Mutex::new(Point::origin());
let mut is_paint_output = true;
let mut facing_direction = Up;
futures_executor::block_on(run_program(
robot_program,
tokio_stream::iter(iter::from_fn(|| {
let current_location = *(current_location.lock().unwrap());
Some(
hull.lock()
.unwrap()
.get(¤t_location)
.copied()
.unwrap_or(default_color),
)
}))
.map(|color| if color == Black { 0 } else { 1 }),
|output| {
let mut current_location = current_location.lock().unwrap();
if is_paint_output {
hull.lock()
.unwrap()
.entry(*current_location)
.insert(if output == 0 { Black } else { White });
} else {
let turn_direction = if output == 0 { Left } else { Right };
(*current_location, facing_direction) = match (turn_direction, facing_direction) {
(Left, Right) | (Right, Left) => {
(Point::new(current_location.x, current_location.y + 1), Up)
}
(Left, Left) | (Right, Right) => {
(Point::new(current_location.x, current_location.y - 1), Down)
}
(Left, Up) | (Right, Down) => {
(Point::new(current_location.x - 1, current_location.y), Left)
}
(Left, Down) | (Right, Up) => (
Point::new(current_location.x + 1, current_location.y),
Right,
),
_ => unsafe { std::hint::unreachable_unchecked() },
}
}
is_paint_output = !is_paint_output;
},
))?;
Ok(hull.into_inner().unwrap())
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum Color {
White,
Black,
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum Direction {
Up,
Down,
Left,
Right,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, From)]
struct Point {
x: isize,
y: isize,
}
impl fmt::Debug for Point {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("").field(&self.x).field(&self.y).finish()
}
}
impl Point {
fn origin() -> Self {
Self::new(0, 0)
}
fn new(x: isize, y: isize) -> Self {
Self::from((x, y))
}
}
async fn run_program(
mut program: Vec<isize>,
input: impl Stream<Item = isize>,
mut output_fn: impl FnMut(isize),
) -> Result<Vec<isize>, anyhow::Error> {
pin!(input);
let mut instruction_pointer = 0;
let mut relative_base = 0;
loop {
let opcode = usize::try_from(program[instruction_pointer])
.map_err(|_| anyhow!("Found a negative integer where an opcode was expected"))?;
let parameter_modes = get_parameter_modes(opcode)?;
let parameter_mode_of = |param: usize| {
parameter_modes
.get(param)
.unwrap_or(&ParameterModes::Position)
};
let mut get_param = |param: usize, need_write: bool| {
let param_value = program
.get(instruction_pointer + param + 1)
.copied()
.ok_or(anyhow!("Parameter not found"))?;
let param_mode = parameter_mode_of(param);
if need_write {
ensure!(
[ParameterModes::Position, ParameterModes::Relative].contains(param_mode),
"Invalid argument for opcode {}: {}",
opcode,
param_value
);
}
Ok(match param_mode {
ParameterModes::Position | ParameterModes::Relative => {
let raw_idx = if param_mode == &ParameterModes::Relative {
relative_base + param_value
} else {
param_value
};
let idx = usize::try_from(raw_idx).map_err(|_| {
anyhow!(
"The program is attempting to access a negative index: {}",
raw_idx
)
})?;
if idx >= program.len() {
program.resize_with(idx + 1, || 0);
}
if !need_write {
program[idx]
} else {
raw_idx
}
}
ParameterModes::Immediate => param_value,
})
};
// x % 100 gets the last 2 digits of a number,
// no matter how long it is.
match opcode % 100 {
1 | 2 | 7 | 8 => {
let (x, y, result_idx) = (
get_param(0, false)?,
get_param(1, false)?,
get_param(2, true)? as usize,
);
match opcode % 100 {
1 => program[result_idx] = x + y,
2 => program[result_idx] = x * y,
7 => program[result_idx] = (x < y) as isize,
8 => program[result_idx] = (x == y) as isize,
_ => unsafe { std::hint::unreachable_unchecked() },
}
instruction_pointer += 4;
}
5 | 6 => {
let (checked_value, jump_point) = (
get_param(0, false)?,
usize::try_from(get_param(1, false)?).map_err(|_| {
anyhow!("Found a negative integer where a jump point was expected")
})?,
);
let should_jump = match opcode % 100 {
5 => checked_value != 0,
6 => checked_value == 0,
_ => unsafe { std::hint::unreachable_unchecked() },
};
if should_jump {
instruction_pointer = jump_point;
} else {
instruction_pointer += 3;
}
}
3 | 4 | 9 => {
match opcode % 100 {
3 => {
let input = input
.next()
.await
.ok_or(anyhow!("Found an input opcode but no input was provided"))?;
let input_storage = get_param(0, true)? as usize;
program[input_storage] = input;
}
4 => output_fn(get_param(0, false)?),
9 => relative_base += get_param(0, false)?,
_ => unsafe { std::hint::unreachable_unchecked() },
}
instruction_pointer += 2;
}
99 => return Ok(program),
op => bail!("Encountered an unknown opcode: {}", op),
}
}
}
fn ge | pcode: usize) -> Result<Vec<ParameterModes>, anyhow::Error> {
opcode
.digits()
.rev()
.skip(2)
.map(ParameterModes::try_from)
.try_collect()
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
enum ParameterModes {
Position,
Immediate,
Relative,
}
impl TryFrom<u8> for ParameterModes {
type Error = anyhow:: | t_parameter_modes(o | identifier_name |
main.rs | ::Mutex};
use tokio::pin;
use tokio_stream::{Stream, StreamExt};
fn main() -> Result<(), anyhow::Error> {
let matches = App::new("2019-11")
.arg(Arg::from_usage("[input] 'Problem input file'").default_value("input.txt"))
.get_matches();
let input_filename = matches.value_of("input").unwrap();
let program_str = fs::read_to_string(input_filename)?.replace("\r\n", "\n");
let robot_program = parse_input(&program_str)?;
let painted_hull = paint_hull(robot_program.clone(), HashMap::new(), Color::Black)?;
println!(
"Number of panels painted at least once: {}",
painted_hull.len()
);
let registration_id_hull = paint_hull(
robot_program,
iter::once((Point::origin(), Color::White)).collect(),
Color::Black,
)?;
print_hull(®istration_id_hull, Color::Black);
Ok(())
}
fn print_hull(hull: &HashMap<Point, Color>, default_color: Color) {
let ((min_x, max_x), (min_y, max_y)) = (
hull.keys()
.map(|p| p.x)
.minmax()
.into_option()
.unwrap_or_default(),
hull.keys()
.map(|p| p.y)
.minmax()
.into_option()
.unwrap_or_default(),
);
for y in (min_y..=max_y).rev() {
for x in min_x..=max_x {
if hull.get(&Point::new(x, y)).unwrap_or(&default_color) == &Color::Black {
print!("█");
} else {
print!(" ");
}
}
println!()
}
}
fn paint_hull(
robot_program: Vec<isize>,
starting_hull: HashMap<Point, Color>,
default_color: Color,
) -> Result<HashMap<Point, Color>, anyhow::Error> {
use Color::*;
use Direction::*;
// Basically, we're using Mutex as a way of telling Rust that we know
// for sure we aren't gonna be accessing these values concurrently.
// The borrow checker is then satisfied.
let hull = Mutex::new(starting_hull);
let current_location = Mutex::new(Point::origin());
let mut is_paint_output = true;
let mut facing_direction = Up;
futures_executor::block_on(run_program(
robot_program,
tokio_stream::iter(iter::from_fn(|| {
let current_location = *(current_location.lock().unwrap());
Some(
hull.lock()
.unwrap()
.get(¤t_location)
.copied()
.unwrap_or(default_color),
)
}))
.map(|color| if color == Black { 0 } else { 1 }),
|output| {
let mut current_location = current_location.lock().unwrap();
if is_paint_output {
hull.lock()
.unwrap()
.entry(*current_location)
.insert(if output == 0 { Black } else { White });
} else {
let turn_direction = if output == 0 { Left } else { Right };
(*current_location, facing_direction) = match (turn_direction, facing_direction) {
(Left, Right) | (Right, Left) => {
(Point::new(current_location.x, current_location.y + 1), Up)
}
(Left, Left) | (Right, Right) => {
(Point::new(current_location.x, current_location.y - 1), Down)
}
(Left, Up) | (Right, Down) => {
(Point::new(current_location.x - 1, current_location.y), Left)
}
(Left, Down) | (Right, Up) => (
Point::new(current_location.x + 1, current_location.y),
Right,
),
_ => unsafe { std::hint::unreachable_unchecked() },
}
}
is_paint_output = !is_paint_output;
},
))?;
Ok(hull.into_inner().unwrap())
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum Color {
White,
Black, | Up,
Down,
Left,
Right,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, From)]
struct Point {
x: isize,
y: isize,
}
impl fmt::Debug for Point {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("").field(&self.x).field(&self.y).finish()
}
}
impl Point {
fn origin() -> Self {
Self::new(0, 0)
}
fn new(x: isize, y: isize) -> Self {
Self::from((x, y))
}
}
async fn run_program(
mut program: Vec<isize>,
input: impl Stream<Item = isize>,
mut output_fn: impl FnMut(isize),
) -> Result<Vec<isize>, anyhow::Error> {
pin!(input);
let mut instruction_pointer = 0;
let mut relative_base = 0;
loop {
let opcode = usize::try_from(program[instruction_pointer])
.map_err(|_| anyhow!("Found a negative integer where an opcode was expected"))?;
let parameter_modes = get_parameter_modes(opcode)?;
let parameter_mode_of = |param: usize| {
parameter_modes
.get(param)
.unwrap_or(&ParameterModes::Position)
};
let mut get_param = |param: usize, need_write: bool| {
let param_value = program
.get(instruction_pointer + param + 1)
.copied()
.ok_or(anyhow!("Parameter not found"))?;
let param_mode = parameter_mode_of(param);
if need_write {
ensure!(
[ParameterModes::Position, ParameterModes::Relative].contains(param_mode),
"Invalid argument for opcode {}: {}",
opcode,
param_value
);
}
Ok(match param_mode {
ParameterModes::Position | ParameterModes::Relative => {
let raw_idx = if param_mode == &ParameterModes::Relative {
relative_base + param_value
} else {
param_value
};
let idx = usize::try_from(raw_idx).map_err(|_| {
anyhow!(
"The program is attempting to access a negative index: {}",
raw_idx
)
})?;
if idx >= program.len() {
program.resize_with(idx + 1, || 0);
}
if !need_write {
program[idx]
} else {
raw_idx
}
}
ParameterModes::Immediate => param_value,
})
};
// x % 100 gets the last 2 digits of a number,
// no matter how long it is.
match opcode % 100 {
1 | 2 | 7 | 8 => {
let (x, y, result_idx) = (
get_param(0, false)?,
get_param(1, false)?,
get_param(2, true)? as usize,
);
match opcode % 100 {
1 => program[result_idx] = x + y,
2 => program[result_idx] = x * y,
7 => program[result_idx] = (x < y) as isize,
8 => program[result_idx] = (x == y) as isize,
_ => unsafe { std::hint::unreachable_unchecked() },
}
instruction_pointer += 4;
}
5 | 6 => {
let (checked_value, jump_point) = (
get_param(0, false)?,
usize::try_from(get_param(1, false)?).map_err(|_| {
anyhow!("Found a negative integer where a jump point was expected")
})?,
);
let should_jump = match opcode % 100 {
5 => checked_value != 0,
6 => checked_value == 0,
_ => unsafe { std::hint::unreachable_unchecked() },
};
if should_jump {
instruction_pointer = jump_point;
} else {
instruction_pointer += 3;
}
}
3 | 4 | 9 => {
match opcode % 100 {
3 => {
let input = input
.next()
.await
.ok_or(anyhow!("Found an input opcode but no input was provided"))?;
let input_storage = get_param(0, true)? as usize;
program[input_storage] = input;
}
4 => output_fn(get_param(0, false)?),
9 => relative_base += get_param(0, false)?,
_ => unsafe { std::hint::unreachable_unchecked() },
}
instruction_pointer += 2;
}
99 => return Ok(program),
op => bail!("Encountered an unknown opcode: {}", op),
}
}
}
fn get_parameter_modes(opcode: usize) -> Result<Vec<ParameterModes>, anyhow::Error> {
opcode
.digits()
.rev()
.skip(2)
.map(ParameterModes::try_from)
.try_collect()
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
enum ParameterModes {
Position,
Immediate,
Relative,
}
impl TryFrom<u8> for ParameterModes {
type Error = anyhow::Error;
| }
#[derive(Clone, Copy, PartialEq, Eq)]
enum Direction { | random_line_split |
env.rs | {
Single(KObj),
Range(Vec<KObj>),
None,
}
/// Keep track of our repl environment
pub struct Env {
pub config: Config,
pub click_config: ClickConfig,
click_config_path: PathBuf,
pub quit: bool,
pub need_new_editor: bool,
pub kluster: Option<Kluster>,
pub namespace: Option<String>,
current_selection: ObjectSelection,
last_objs: LastList,
pub ctrlcbool: Arc<AtomicBool>,
port_forwards: Vec<PortForward>,
pub prompt: String,
range_str: Option<String>,
pub tempdir: std::io::Result<TempDir>,
}
lazy_static! {
static ref CTC_BOOL: Arc<AtomicBool> = {
let b = Arc::new(AtomicBool::new(false));
let r = b.clone();
ctrlc::set_handler(move || {
r.store(true, Ordering::SeqCst);
})
.expect("Error setting Ctrl-C handler");
b
};
}
impl Env {
pub fn new(config: Config, click_config: ClickConfig, click_config_path: PathBuf) -> Env {
let namespace = click_config.namespace.clone();
let context = click_config.context.clone();
let mut env = Env {
config,
click_config,
click_config_path,
quit: false,
need_new_editor: false,
kluster: None,
namespace,
current_selection: ObjectSelection::None,
last_objs: LastList::None,
ctrlcbool: CTC_BOOL.clone(),
port_forwards: Vec::new(),
prompt: format!(
"[{}] [{}] [{}] > ",
Red.paint("none"),
Green.paint("none"),
Yellow.paint("none")
),
range_str: None,
tempdir: TempDir::new("click"),
};
env.set_context(context.as_deref());
env
}
pub fn current_selection(&self) -> &ObjectSelection {
&self.current_selection
}
pub fn save_click_config(&mut self) {
self.click_config.namespace = self.namespace.clone();
self.click_config.context = self.kluster.as_ref().map(|k| k.name.clone());
self.click_config
.save_to_file(self.click_config_path.as_path().to_str().unwrap())
.unwrap();
}
// sets the prompt string based on current settings
fn set_prompt(&mut self) {
self.prompt = format!(
"[{}] [{}] [{}] > ",
if let Some(ref k) = self.kluster {
Red.bold().paint(k.name.as_str())
} else {
Red.paint("none")
},
if let Some(ref n) = self.namespace {
Green.bold().paint(n.as_str())
} else {
Green.paint("none")
},
match self.current_selection {
ObjectSelection::Single(ref obj) => obj.prompt_str(),
ObjectSelection::Range(_) => Blue.paint(self.range_str.as_ref().unwrap()),
ObjectSelection::None => Yellow.paint("none"),
}
);
}
pub fn get_rustyline_conf(&self) -> rustyconfig::Config {
self.click_config.get_rustyline_conf()
}
pub fn get_contexts(&self) -> &BTreeMap<String, ::config::ContextConf> {
&self.config.contexts
}
pub fn set_context(&mut self, ctx: Option<&str>) {
if let Some(cname) = ctx {
self.kluster = match self.config.cluster_for_context(cname, &self.click_config) {
Ok(k) => Some(k),
Err(e) => {
println!(
"[WARN] Couldn't find/load context {}, now no current context. \
Error: {}",
cname, e
);
None
}
};
self.save_click_config();
self.set_prompt();
}
}
pub fn set_namespace(&mut self, namespace: Option<&str>) {
let mut do_clear = false;
if let (&Some(ref my_ns), Some(new_ns)) = (&self.namespace, namespace) {
if my_ns.as_str() != new_ns {
do_clear = true; // need to use bool since self is borrowed here
}
}
if do_clear {
self.clear_current();
}
self.namespace = namespace.map(|n| n.to_owned());
self.set_prompt();
}
pub fn set_editor(&mut self, editor: Option<&str>) {
self.click_config.editor = editor.map(|s| s.to_string());
}
pub fn set_terminal(&mut self, terminal: Option<&str>) {
self.click_config.terminal = terminal.map(|s| s.to_string());
}
pub fn set_completion_type(&mut self, comptype: config::CompletionType) {
self.click_config.completiontype = comptype;
self.need_new_editor = true;
}
pub fn set_edit_mode(&mut self, editmode: config::EditMode) {
self.click_config.editmode = editmode;
self.need_new_editor = true;
}
// Return the current position of the specified alias in the Vec, or None if it's not there
fn alias_position(&self, alias: &str) -> Option<usize> {
self.click_config
.aliases
.iter()
.position(|a| a.alias == *alias)
}
pub fn add_alias(&mut self, alias: Alias) {
self.remove_alias(&alias.alias);
self.click_config.aliases.push(alias);
self.save_click_config();
}
pub fn remove_alias(&mut self, alias: &str) -> bool {
match self.alias_position(alias) {
Some(p) => {
self.click_config.aliases.remove(p);
self.save_click_config();
true
}
None => false,
}
}
pub fn set_lastlist(&mut self, list: LastList) {
self.last_objs = list;
}
pub fn clear_current(&mut self) {
self.current_selection = ObjectSelection::None;
self.range_str = None;
self.set_prompt();
}
/// get the item from the last list at the specified index
pub fn item_at(&self, index: usize) -> Option<KObj> {
match self.last_objs {
LastList::None => {
println!("No active object list");
None
}
LastList::PodList(ref pl) => pl.items.get(index).map(|pod| {
let containers = pod
.spec
.containers
.iter()
.map(|cspec| cspec.name.clone())
.collect();
KObj::from_metadata(&pod.metadata, ObjType::Pod { containers })
}),
LastList::NodeList(ref nl) => nl.items.get(index).map(|n| KObj {
name: n.metadata.name.clone(),
namespace: None,
typ: ObjType::Node,
}),
LastList::DeploymentList(ref dl) => dl
.items
.get(index)
.map(|dep| KObj::from_metadata(&dep.metadata, ObjType::Deployment)),
LastList::ServiceList(ref sl) => sl
.items
.get(index)
.map(|service| KObj::from_metadata(&service.metadata, ObjType::Service)),
LastList::ReplicaSetList(ref rsl) => rsl
.items
.get(index)
.and_then(|replicaset| KObj::from_value(replicaset, ObjType::ReplicaSet)),
LastList::StatefulSetList(ref stfs) => stfs
.items
.get(index)
.and_then(|statefulset| KObj::from_value(statefulset, ObjType::StatefulSet)),
LastList::ConfigMapList(ref cml) => cml
.items
.get(index)
.and_then(|cm| KObj::from_value(cm, ObjType::ConfigMap)),
LastList::SecretList(ref sl) => sl
.items
.get(index)
.and_then(|secret| KObj::from_value(secret, ObjType::Secret)),
LastList::JobList(ref jl) => jl
.items
.get(index)
.and_then(|job| KObj::from_value(job, ObjType::Job)),
}
}
pub fn set_current(&mut self, num: usize) {
self.current_selection = match self.item_at(num) {
Some(obj) => ObjectSelection::Single(obj),
None => ObjectSelection::None,
};
self.range_str = None;
self.set_prompt();
}
pub fn set_range(&mut self, range: Vec<KObj>) {
let range_str = if range.is_empty() {
"Empty range".to_string()
} else {
let mut r = format!("{} {}", range.len(), range.get(0).unwrap().type_str());
if range.len() > 1 {
r.push('s');
}
r.push_str(" selected");
r
};
self.current_selection = ObjectSelection::Range(range);
self.range_str = Some(range_str);
self.set_prompt();
}
pub fn current_pod(&self) -> Option<&KObj> {
match self.current_selection {
ObjectSelection::Single(ref obj) => match obj.typ {
ObjType::Pod { .. } => Some(obj),
_ => None,
},
_ => None,
}
}
pub fn run_on_kluster | ObjectSelection | identifier_name | |
env.rs | io::Result<TempDir>,
}
lazy_static! {
static ref CTC_BOOL: Arc<AtomicBool> = {
let b = Arc::new(AtomicBool::new(false));
let r = b.clone();
ctrlc::set_handler(move || {
r.store(true, Ordering::SeqCst);
})
.expect("Error setting Ctrl-C handler");
b
};
}
impl Env {
pub fn new(config: Config, click_config: ClickConfig, click_config_path: PathBuf) -> Env {
let namespace = click_config.namespace.clone();
let context = click_config.context.clone();
let mut env = Env {
config,
click_config,
click_config_path,
quit: false,
need_new_editor: false,
kluster: None,
namespace,
current_selection: ObjectSelection::None,
last_objs: LastList::None,
ctrlcbool: CTC_BOOL.clone(),
port_forwards: Vec::new(),
prompt: format!(
"[{}] [{}] [{}] > ",
Red.paint("none"),
Green.paint("none"),
Yellow.paint("none")
),
range_str: None,
tempdir: TempDir::new("click"),
};
env.set_context(context.as_deref());
env
}
pub fn current_selection(&self) -> &ObjectSelection {
&self.current_selection
}
pub fn save_click_config(&mut self) {
self.click_config.namespace = self.namespace.clone();
self.click_config.context = self.kluster.as_ref().map(|k| k.name.clone());
self.click_config
.save_to_file(self.click_config_path.as_path().to_str().unwrap())
.unwrap();
}
// sets the prompt string based on current settings
fn set_prompt(&mut self) {
self.prompt = format!(
"[{}] [{}] [{}] > ",
if let Some(ref k) = self.kluster {
Red.bold().paint(k.name.as_str())
} else {
Red.paint("none")
},
if let Some(ref n) = self.namespace {
Green.bold().paint(n.as_str())
} else {
Green.paint("none")
},
match self.current_selection {
ObjectSelection::Single(ref obj) => obj.prompt_str(),
ObjectSelection::Range(_) => Blue.paint(self.range_str.as_ref().unwrap()),
ObjectSelection::None => Yellow.paint("none"),
}
);
}
pub fn get_rustyline_conf(&self) -> rustyconfig::Config {
self.click_config.get_rustyline_conf()
}
pub fn get_contexts(&self) -> &BTreeMap<String, ::config::ContextConf> {
&self.config.contexts
}
pub fn set_context(&mut self, ctx: Option<&str>) {
if let Some(cname) = ctx {
self.kluster = match self.config.cluster_for_context(cname, &self.click_config) {
Ok(k) => Some(k),
Err(e) => {
println!(
"[WARN] Couldn't find/load context {}, now no current context. \
Error: {}",
cname, e
);
None
}
};
self.save_click_config();
self.set_prompt();
}
}
pub fn set_namespace(&mut self, namespace: Option<&str>) {
let mut do_clear = false;
if let (&Some(ref my_ns), Some(new_ns)) = (&self.namespace, namespace) {
if my_ns.as_str() != new_ns {
do_clear = true; // need to use bool since self is borrowed here
}
}
if do_clear {
self.clear_current();
}
self.namespace = namespace.map(|n| n.to_owned());
self.set_prompt();
}
pub fn set_editor(&mut self, editor: Option<&str>) {
self.click_config.editor = editor.map(|s| s.to_string());
}
pub fn set_terminal(&mut self, terminal: Option<&str>) {
self.click_config.terminal = terminal.map(|s| s.to_string());
}
pub fn set_completion_type(&mut self, comptype: config::CompletionType) {
self.click_config.completiontype = comptype;
self.need_new_editor = true;
}
pub fn set_edit_mode(&mut self, editmode: config::EditMode) {
self.click_config.editmode = editmode;
self.need_new_editor = true;
}
// Return the current position of the specified alias in the Vec, or None if it's not there
fn alias_position(&self, alias: &str) -> Option<usize> {
self.click_config
.aliases
.iter()
.position(|a| a.alias == *alias)
}
pub fn add_alias(&mut self, alias: Alias) {
self.remove_alias(&alias.alias);
self.click_config.aliases.push(alias);
self.save_click_config();
}
pub fn remove_alias(&mut self, alias: &str) -> bool {
match self.alias_position(alias) {
Some(p) => {
self.click_config.aliases.remove(p);
self.save_click_config();
true
}
None => false,
}
}
pub fn set_lastlist(&mut self, list: LastList) {
self.last_objs = list;
}
pub fn clear_current(&mut self) {
self.current_selection = ObjectSelection::None;
self.range_str = None;
self.set_prompt();
}
/// get the item from the last list at the specified index
pub fn item_at(&self, index: usize) -> Option<KObj> {
match self.last_objs {
LastList::None => {
println!("No active object list");
None
}
LastList::PodList(ref pl) => pl.items.get(index).map(|pod| {
let containers = pod
.spec
.containers
.iter()
.map(|cspec| cspec.name.clone())
.collect();
KObj::from_metadata(&pod.metadata, ObjType::Pod { containers })
}),
LastList::NodeList(ref nl) => nl.items.get(index).map(|n| KObj {
name: n.metadata.name.clone(),
namespace: None,
typ: ObjType::Node,
}),
LastList::DeploymentList(ref dl) => dl
.items
.get(index)
.map(|dep| KObj::from_metadata(&dep.metadata, ObjType::Deployment)),
LastList::ServiceList(ref sl) => sl
.items
.get(index)
.map(|service| KObj::from_metadata(&service.metadata, ObjType::Service)),
LastList::ReplicaSetList(ref rsl) => rsl
.items
.get(index)
.and_then(|replicaset| KObj::from_value(replicaset, ObjType::ReplicaSet)),
LastList::StatefulSetList(ref stfs) => stfs
.items
.get(index)
.and_then(|statefulset| KObj::from_value(statefulset, ObjType::StatefulSet)),
LastList::ConfigMapList(ref cml) => cml
.items
.get(index)
.and_then(|cm| KObj::from_value(cm, ObjType::ConfigMap)),
LastList::SecretList(ref sl) => sl
.items
.get(index)
.and_then(|secret| KObj::from_value(secret, ObjType::Secret)),
LastList::JobList(ref jl) => jl
.items
.get(index)
.and_then(|job| KObj::from_value(job, ObjType::Job)),
}
}
pub fn set_current(&mut self, num: usize) {
self.current_selection = match self.item_at(num) {
Some(obj) => ObjectSelection::Single(obj),
None => ObjectSelection::None,
};
self.range_str = None;
self.set_prompt();
}
pub fn set_range(&mut self, range: Vec<KObj>) {
let range_str = if range.is_empty() {
"Empty range".to_string()
} else {
let mut r = format!("{} {}", range.len(), range.get(0).unwrap().type_str());
if range.len() > 1 {
r.push('s');
}
r.push_str(" selected");
r
};
self.current_selection = ObjectSelection::Range(range);
self.range_str = Some(range_str);
self.set_prompt();
}
pub fn current_pod(&self) -> Option<&KObj> {
match self.current_selection {
ObjectSelection::Single(ref obj) => match obj.typ {
ObjType::Pod { .. } => Some(obj),
_ => None,
},
_ => None,
}
}
pub fn run_on_kluster<F, R>(&self, f: F) -> Option<R>
where
F: FnOnce(&Kluster) -> Result<R, KubeError>,
{
match self.kluster {
Some(ref k) => match f(k) {
Ok(r) => Some(r),
Err(e) => {
println!("{}", e);
None
}
},
None => {
println!("Need to have an active context");
None
} | }
/// Add a new task for the env to keep track of
pub fn add_port_forward(&mut self, pf: PortForward) {
self.port_forwards | } | random_line_split |
env.rs | ::new(false));
let r = b.clone();
ctrlc::set_handler(move || {
r.store(true, Ordering::SeqCst);
})
.expect("Error setting Ctrl-C handler");
b
};
}
impl Env {
pub fn new(config: Config, click_config: ClickConfig, click_config_path: PathBuf) -> Env {
let namespace = click_config.namespace.clone();
let context = click_config.context.clone();
let mut env = Env {
config,
click_config,
click_config_path,
quit: false,
need_new_editor: false,
kluster: None,
namespace,
current_selection: ObjectSelection::None,
last_objs: LastList::None,
ctrlcbool: CTC_BOOL.clone(),
port_forwards: Vec::new(),
prompt: format!(
"[{}] [{}] [{}] > ",
Red.paint("none"),
Green.paint("none"),
Yellow.paint("none")
),
range_str: None,
tempdir: TempDir::new("click"),
};
env.set_context(context.as_deref());
env
}
pub fn current_selection(&self) -> &ObjectSelection {
&self.current_selection
}
pub fn save_click_config(&mut self) {
self.click_config.namespace = self.namespace.clone();
self.click_config.context = self.kluster.as_ref().map(|k| k.name.clone());
self.click_config
.save_to_file(self.click_config_path.as_path().to_str().unwrap())
.unwrap();
}
// sets the prompt string based on current settings
fn set_prompt(&mut self) {
self.prompt = format!(
"[{}] [{}] [{}] > ",
if let Some(ref k) = self.kluster {
Red.bold().paint(k.name.as_str())
} else {
Red.paint("none")
},
if let Some(ref n) = self.namespace {
Green.bold().paint(n.as_str())
} else {
Green.paint("none")
},
match self.current_selection {
ObjectSelection::Single(ref obj) => obj.prompt_str(),
ObjectSelection::Range(_) => Blue.paint(self.range_str.as_ref().unwrap()),
ObjectSelection::None => Yellow.paint("none"),
}
);
}
pub fn get_rustyline_conf(&self) -> rustyconfig::Config {
self.click_config.get_rustyline_conf()
}
pub fn get_contexts(&self) -> &BTreeMap<String, ::config::ContextConf> {
&self.config.contexts
}
pub fn set_context(&mut self, ctx: Option<&str>) {
if let Some(cname) = ctx {
self.kluster = match self.config.cluster_for_context(cname, &self.click_config) {
Ok(k) => Some(k),
Err(e) => {
println!(
"[WARN] Couldn't find/load context {}, now no current context. \
Error: {}",
cname, e
);
None
}
};
self.save_click_config();
self.set_prompt();
}
}
pub fn set_namespace(&mut self, namespace: Option<&str>) {
let mut do_clear = false;
if let (&Some(ref my_ns), Some(new_ns)) = (&self.namespace, namespace) {
if my_ns.as_str() != new_ns {
do_clear = true; // need to use bool since self is borrowed here
}
}
if do_clear {
self.clear_current();
}
self.namespace = namespace.map(|n| n.to_owned());
self.set_prompt();
}
pub fn set_editor(&mut self, editor: Option<&str>) {
self.click_config.editor = editor.map(|s| s.to_string());
}
pub fn set_terminal(&mut self, terminal: Option<&str>) {
self.click_config.terminal = terminal.map(|s| s.to_string());
}
pub fn set_completion_type(&mut self, comptype: config::CompletionType) {
self.click_config.completiontype = comptype;
self.need_new_editor = true;
}
pub fn set_edit_mode(&mut self, editmode: config::EditMode) {
self.click_config.editmode = editmode;
self.need_new_editor = true;
}
// Return the current position of the specified alias in the Vec, or None if it's not there
fn alias_position(&self, alias: &str) -> Option<usize> {
self.click_config
.aliases
.iter()
.position(|a| a.alias == *alias)
}
pub fn add_alias(&mut self, alias: Alias) {
self.remove_alias(&alias.alias);
self.click_config.aliases.push(alias);
self.save_click_config();
}
pub fn remove_alias(&mut self, alias: &str) -> bool {
match self.alias_position(alias) {
Some(p) => {
self.click_config.aliases.remove(p);
self.save_click_config();
true
}
None => false,
}
}
pub fn set_lastlist(&mut self, list: LastList) {
self.last_objs = list;
}
pub fn clear_current(&mut self) {
self.current_selection = ObjectSelection::None;
self.range_str = None;
self.set_prompt();
}
/// get the item from the last list at the specified index
pub fn item_at(&self, index: usize) -> Option<KObj> {
match self.last_objs {
LastList::None => {
println!("No active object list");
None
}
LastList::PodList(ref pl) => pl.items.get(index).map(|pod| {
let containers = pod
.spec
.containers
.iter()
.map(|cspec| cspec.name.clone())
.collect();
KObj::from_metadata(&pod.metadata, ObjType::Pod { containers })
}),
LastList::NodeList(ref nl) => nl.items.get(index).map(|n| KObj {
name: n.metadata.name.clone(),
namespace: None,
typ: ObjType::Node,
}),
LastList::DeploymentList(ref dl) => dl
.items
.get(index)
.map(|dep| KObj::from_metadata(&dep.metadata, ObjType::Deployment)),
LastList::ServiceList(ref sl) => sl
.items
.get(index)
.map(|service| KObj::from_metadata(&service.metadata, ObjType::Service)),
LastList::ReplicaSetList(ref rsl) => rsl
.items
.get(index)
.and_then(|replicaset| KObj::from_value(replicaset, ObjType::ReplicaSet)),
LastList::StatefulSetList(ref stfs) => stfs
.items
.get(index)
.and_then(|statefulset| KObj::from_value(statefulset, ObjType::StatefulSet)),
LastList::ConfigMapList(ref cml) => cml
.items
.get(index)
.and_then(|cm| KObj::from_value(cm, ObjType::ConfigMap)),
LastList::SecretList(ref sl) => sl
.items
.get(index)
.and_then(|secret| KObj::from_value(secret, ObjType::Secret)),
LastList::JobList(ref jl) => jl
.items
.get(index)
.and_then(|job| KObj::from_value(job, ObjType::Job)),
}
}
pub fn set_current(&mut self, num: usize) {
self.current_selection = match self.item_at(num) {
Some(obj) => ObjectSelection::Single(obj),
None => ObjectSelection::None,
};
self.range_str = None;
self.set_prompt();
}
pub fn set_range(&mut self, range: Vec<KObj>) {
let range_str = if range.is_empty() {
"Empty range".to_string()
} else {
let mut r = format!("{} {}", range.len(), range.get(0).unwrap().type_str());
if range.len() > 1 {
r.push('s');
}
r.push_str(" selected");
r
};
self.current_selection = ObjectSelection::Range(range);
self.range_str = Some(range_str);
self.set_prompt();
}
pub fn current_pod(&self) -> Option<&KObj> {
match self.current_selection {
ObjectSelection::Single(ref obj) => match obj.typ {
ObjType::Pod { .. } => Some(obj),
_ => None,
},
_ => None,
}
}
pub fn run_on_kluster<F, R>(&self, f: F) -> Option<R>
where
F: FnOnce(&Kluster) -> Result<R, KubeError>,
{
match self.kluster {
Some(ref k) => match f(k) {
Ok(r) => Some(r),
Err(e) => {
println!("{}", e);
None
}
},
None => {
println!("Need to have an active context");
None
}
}
}
/// Add a new task for the env to keep track of
pub fn add_port_forward(&mut self, pf: PortForward) {
self.port_forwards.push(pf);
}
pub fn get_port_forwards(&self) -> std::slice::Iter<PortForward> | {
self.port_forwards.iter()
} | identifier_body | |
bundle_es5.js | Content = document.getElementsByClassName('tab_content'),
tabCalc = document.querySelector('.popup_calc'),
tabCalcInput = tabCalc.getElementsByTagName('input'),
calcItem = document.querySelector('.balcon_icons'),
closeCalc = tabCalc.getElementsByTagName('strong')[0];
var _loop2 = function _loop2(_i7) {
tabCalcInput[_i7].addEventListener('change', function () {
if (_i7 == 0) {
calcAll.width = tabCalcInput[_i7].value;
} else if (_i7 == 1) {
calcAll.height = tabCalcInput[_i7].value;
}
});
};
for (var _i7 = 0; _i7 < tabCalcInput.length; _i7++) {
_loop2(_i7);
}
function hideTabContent(a) {
for (var _i8 = a; _i8 < tabContent.length; _i8++) {
tabContent[_i8].classList.remove('show');
tabContent[_i8].classList.add('hide');
tab[_i8].classList.remove('active');
}
}
hideTabContent(1);
function ShowTabContent(b) {
if (tabContent[b].classList.contains('hide')) {
hideTabContent(0);
tabContent[b].classList.remove('hide');
tabContent[b].classList.add('show');
tab[b].classList.add('active');
}
}
tabWrap.addEventListener('click', function (event) {
var target = event.target;
if (target.matches('.tab')) {
for (var _i9 = 0; _i9 < tab.length; _i9++) {
if (target == tab[_i9]) {
ShowTabContent(_i9);
break;
}
}
}
});
//btn
tabWrap.addEventListener('click', function (event) {
var target = event.target;
if (target.tagName == 'BUTTON') {
tabCalc.style.display = 'flex';
document.body.style.overflow = 'hidden';
event.preventDefault();
}
});
document.body.addEventListener('click', function (event) {
var target = event.target;
if (target.classList.contains('popup_calc')) {
tabCalc.style.display = 'none';
document.body.style.overflow = '';
}
});
closeCalc.addEventListener('click', function () {
tabCalc.style.display = 'none';
document.body.style.overflow = '';
});
//calc
var calcContent = document.querySelectorAll('.big_img__items'),
calcTab = document.querySelectorAll('.balcon_icons_items'),
calcBtn = document.querySelector('.popup_calc_button'),
calcNextModal = document.querySelector('.popup_calc_profile'),
calcInputs = calcNextModal.getElementsByTagName("input"),
closeNextModal = calcNextModal.getElementsByTagName('strong')[0],
calcEndBtn = document.querySelector('.popup_calc_profile_button'),
calcEndPopup = document.querySelector('.popup_calc_end'),
closeEndPopup = calcEndPopup.getElementsByTagName('strong')[0];
function hideCalcContent(c) {
for (var _i10 = c; _i10 < calcContent.length; _i10++) {
calcContent[_i10].classList.remove('show');
calcContent[_i10].classList.add('hide');
}
}
hideCalcContent(1);
function ShowCalcContent(d) {
if (calcContent[d].classList.contains('hide')) {
hideCalcContent(0);
calcContent[d].classList.remove('hide');
calcContent[d].classList.add('show');
}
}
calcItem.addEventListener('click', function (event) {
var target = event.target;
if (target.tagName == 'IMG') {
event.preventDefault();
for (var _i11 = 0; _i11 < calcTab.length; _i11++) {
if (target == calcTab[_i11]) {
ShowCalcContent(_i11);
break;
}
}
}
});
// form
calcBtn.addEventListener('click', function () {
tabCalc.style.display = 'none';
calcNextModal.style.display = 'flex';
});
for (var _i12 = 0; _i12 < calcInputs.length; _i12++) {
if (calcInputs[_i12].type == "checkbox") {
calcInputs[_i12].onchange = function () {
for (var _i13 = 0; _i13 < calcInputs.length; _i13++) {
if (calcInputs[_i13].type == "checkbox") {
calcInputs[_i13].checked = false;
}
this.checked = true;
}
};
}
}
closeNextModal.addEventListener('click', function () {
calcNextModal.style.display = 'none';
document.body.style.overflow = '';
});
calcEndBtn.addEventListener('click', function () {
calcNextModal.style.display = 'none';
calcEndPopup.style.display = 'flex';
});
closeEndPopup.addEventListener('click', function () {
calcEndPopup.style.display = 'none';
document.body.style.overflow = '';
});
var calcEndPopupInput = calcEndPopup.getElementsByTagName('input'),
checkbox = document.getElementsByName('checkbox-test'),
calcAll = {
name: name,
number: '',
width: width,
height: height,
profile: ''
};
var _loop3 = function _loop3(r) {
calcEndPopupInput[r].addEventListener('change', function () {
if (i == 0) {
calcAll.name = calcEndPopupInput[r].value;
} else if (i == 1) {
calcAll.number = calcEndPopupInput[r].value;
}
});
};
for (var r = 0; r < calcEndPopupInput.length; r++) {
_loop3(r);
}
for (var z = 0; z < checkbox.length; z++) {
if (checkbox[z].type === 'radio' && checkbox[z].checked) {
calcAll.profile = checkbox[z].value;
}
}
}
module.exports = tabFirst;
}, {}], 6: [function (require, module, exports) {
function tabSeconds() {
var tabSecond = document.querySelectorAll('.decoration_sliders__items'),
wrapTabSecond = document.querySelector('.decoration_slider'),
tabSecondActive = document.querySelectorAll('.no_click'),
tabSecondContent = document.querySelectorAll('.decoration_content_items');
function hideTabSecond(f) {
for (var _i14 = f; _i14 < tabSecondContent.length; _i14++) {
tabSecondContent[_i14].classList.remove('show');
tabSecondContent[_i14].classList.add('hide');
tabSecondActive[_i14].classList.remove('after_click');
}
}
hideTabSecond(1);
function ShowTabSecond(g) {
if (tabSecondContent[g].classList.contains('hide')) {
hideTabSecond(0);
tabSecondContent[g].classList.remove('hide');
tabSecondContent[g].classList.add('show');
tabSecondActive[g].classList.add('after_click');
}
}
wrapTabSecond.addEventListener('click', function (event) {
var target = event.target;
if (target.matches('.decoration_sliders__items')) {
for (var _i15 = 0; _i15 < tabSecond.length; _i15++) {
if (target == tabSecond[_i15]) {
ShowTabSecond(_i15);
break;
}
}
}
});
}
module.exports = tabSeconds;
}, {}], 7: [function (require, module, exports) {
function timer() {
var deadline = '2019/07/04';
function getTimeRemaining(endtime) {
var t = Date.parse(endtime) - Date.parse(new Date()),
seconds = Math.floor(t / 1000 % 60),
minutes = Math.floor(t / 1000 / 60 % 60),
hours = Math.floor(t / 10006060 % 24),
days = Math.floor(t / (10006060 * 24));
return {
'total': t,
'days': days,
'hours': hours,
'minutes': minutes,
'seconds': seconds
};
}
//функция, которая запускает часы
function setClock(id, endtime) {
var timer = document.getElementsByClassName('timer')[0],
days = timer.querySelector('.days'),
hours = timer.querySelector('.hours'),
minutes = timer.querySelector('.minutes'),
seconds = timer.querySelector('.seconds'); |
function updateClock() {
var t = getTimeRemaining(endtime);
days.innerHTML = t.days < 10 ? '0' + t.days : t.days; | random_line_split | |
bundle_es5.js | ", "application/x-www-form-urlencoded");
var formData = new FormData(form[_i5]);
request.send(formData);
request.onreadystatechange = function () {
if (request.readyState < 4) {
statusMessage.innerHTML = message.loading;
} else if (request.readyState === 4) {
if (request.status == 200 && request.status < 300) {
console.log(form[_i5]);
statusMessage.innerHTML = message.success;
} else {
statusMessage.innerHTML = message.failure;
}
}
};
for (var _i6 = 0; _i6 < input.length; _i6++) {
input[_i6].value = '';
}
});
};
for (var _i5 = 0; _i5 < form.length; _i5++) {
_loop(_i5);
};
}
module.exports = form;
}, {}], 4: [function (require, module, exports) {
function modal() {
var body = document.querySelector('body'),
popup = document.querySelector('.popup');
timePopup();
body.addEventListener('click', function (e) {
clearTimeout(timePopup);
e.preventDefault();
var target = e.target;
if (target.classList.contains('header_btn') || target.classList.contains('phone_link')) {
popup.style.display = "block";
}
if (target.classList.contains('popup_close') || target.parentElement.classList.contains('popup_close') || target.classList.contains('popup') && !target.classList.contains('popup_form')) {
popup.style.display = "none";
}
});
function timePopup() {
setTimeout(function () {
popup.style.display = "block";
}, 60000);
}
}
module.exports = modal;
}, {}], 5: [function (require, module, exports) {
function tabFirst() {
//TabFirst
var tabWrap = document.querySelector('.glazing'),
tab = document.querySelectorAll('.tab'),
tabContent = document.getElementsByClassName('tab_content'),
tabCalc = document.querySelector('.popup_calc'),
tabCalcInput = tabCalc.getElementsByTagName('input'),
calcItem = document.querySelector('.balcon_icons'),
closeCalc = tabCalc.getElementsByTagName('strong')[0];
var _loop2 = function _loop2(_i7) {
tabCalcInput[_i7].addEventListener('change', function () {
if (_i7 == 0) {
calcAll.width = tabCalcInput[_i7].value;
} else if (_i7 == 1) {
calcAll.height = tabCalcInput[_i7].value;
}
});
};
for (var _i7 = 0; _i7 < tabCalcInput.length; _i7++) {
_loop2(_i7);
}
function hideTabContent(a) {
for (var _i8 = a; _i8 < tabContent.length; _i8++) {
tabContent[_i8].classList.remove('show');
tabContent[_i8].classList.add('hide');
tab[_i8].classList.remove('active');
}
}
hideTabContent(1);
function ShowTabContent(b) {
if (tabContent[b].classList.contains('hide')) {
hideTabContent(0);
tabContent[b].classList.remove('hide');
tabContent[b].classList.add('show');
tab[b].classList.add('active');
}
}
tabWrap.addEventListener('click', function (event) {
var target = event.target;
if (target.matches('.tab')) {
for (var _i9 = 0; _i9 < tab.length; _i9++) {
if (target == tab[_i9]) {
ShowTabContent(_i9);
break;
}
}
}
});
//btn
tabWrap.addEventListener('click', function (event) {
var target = event.target;
if (target.tagName == 'BUTTON') {
tabCalc.style.display = 'flex';
document.body.style.overflow = 'hidden';
event.preventDefault();
}
});
document.body.addEventListener('click', function (event) {
var target = event.target;
if (target.classList.contains('popup_calc')) {
tabCalc.style.display = 'none';
document.body.style.overflow = '';
}
});
closeCalc.addEventListener('click', function () {
tabCalc.style.display = 'none';
document.body.style.overflow = '';
});
//calc
var calcContent = document.querySelectorAll('.big_img__items'),
calcTab = document.querySelectorAll('.balcon_icons_items'),
calcBtn = document.querySelector('.popup_calc_button'),
calcNextModal = document.querySelector('.popup_calc_profile'),
calcInputs = calcNextModal.getElementsByTagName("input"),
closeNextModal = calcNextModal.getElementsByTagName('strong')[0],
calcEndBtn = document.querySelector('.popup_calc_profile_button'),
calcEndPopup = document.querySelector('.popup_calc_end'),
closeEndPopup = calcEndPopup.getElementsByTagName('strong')[0];
function hideCalcContent(c) {
for (var _i10 = c; _i10 < calcContent.length; _i10++) {
calcContent[_i10].classList.remove('show');
calcContent[_i10].classList.add('hide');
}
}
hideCalcContent(1);
function ShowCalcContent(d) {
if (calcContent[d].classList.contains('hide')) {
hideCalcContent(0);
calcContent[d].classList.remove('hide');
calcContent[d].classList.add('show');
}
}
calcItem.addEventListener('click', function (event) {
var target = event.target;
if (target.tagName == 'IMG') {
event.preventDefault();
for (var _i11 = 0; _i11 < calcTab.length; _i11++) {
if (target == calcTab[_i11]) {
ShowCalcContent(_i11);
break;
}
}
}
});
// form
calcBtn.addEventListener('click', function () {
tabCalc.style.display = 'none';
calcNextModal.style.display = 'flex';
});
for (var _i12 = 0; _i12 < calcInputs.length; _i12++) {
if (calcInputs[_i12].type == "checkbox") {
calcInputs[_i12].onchange = function () {
for (var _i13 = 0; _i13 < calcInputs.length; _i13++) {
if (calcInputs[_i13].type == "checkbox") {
calcInputs[_i13].checked = false;
}
this.checked = true;
}
};
}
}
closeNextModal.addEventListener('click', function () {
calcNextModal.style.display = 'none';
document.body.style.overflow = '';
});
calcEndBtn.addEventListener('click', function () {
calcNextModal.style.display = 'none';
calcEndPopup.style.display = 'flex';
});
closeEndPopup.addEventListener('click', function () {
calcEndPopup.style.display = 'none';
document.body.style.overflow = '';
});
var calcEndPopupInput = calcEndPopup.getElementsByTagName('input'),
checkbox = document.getElementsByName('checkbox-test'),
calcAll = {
name: name,
number: '',
width: width,
height: height,
profile: ''
};
var _loop3 = function _loop3(r) {
calcEndPopupInput[r].addEventListener('change', function () {
if (i == 0) {
calcAll.name = calcEndPopupInput[r].value;
} else if (i == 1) {
calcAll.number = calcEndPopupInput[r].value;
}
});
};
for (var r = 0; r < calcEndPopupInput.length; r++) {
_loop3(r);
}
for (var z = 0; z < checkbox.length; z++) {
if (checkbox[z].type === 'radio' && checkbox[z].checked) {
calcAll.profile = checkbox[z].value;
}
}
}
module.exports = tabFirst;
}, {}], 6: [function (require, module, exports) {
function tabSeconds() {
var tabSecond = document.querySelectorAll('.decoration_sliders__items'),
wrapTabSecond = document.querySelector('.decoration_slider'),
tabSecondActive = document.querySelectorAll('.no_click'),
tabSecondContent = document.querySelectorAll('.decoration_content_items');
function hideTabSecond(f) {
for (var _i14 = f; _i14 < tabSecondContent.length; _i14++) {
tabSecondContent[_i14].classList.remove('show');
tabSecondContent[_i14].classList.add('hide');
tabSecondActive[_i14].classList.remove('after_click');
}
}
hideTabSecond(1);
function ShowTabSecond(g) {
if (tabSecondContent[g].classList.contains('h | ide')) {
| identifier_name | |
bundle_es5.js | return r;
})()({ 1: [function (require, module, exports) {
window.addEventListener('DOMContentLoaded', function () {
var modal = require('../parts/modal.js');
var form = require('../parts/form.js');
var tabFirst = require('../parts/tabFirst.js');
var tabSeconds = require('../parts/tabSeconds.js');
var timer = require('../parts/timer.js');
var bigImages = require('../parts/bigImages.js');
modal();
form();
tabFirst();
tabSeconds();
timer();
bigImages();
});
}, { "../parts/bigImages.js": 2, "../parts/form.js": 3, "../parts/modal.js": 4, "../parts/tabFirst.js": 5, "../parts/tabSeconds.js": 6, "../parts/timer.js": 7 }], 2: [function (require, module, exports) {
function bigImages() {
var imagesGallery = document.getElementsByClassName('col-lg-3 col-md-4 col-sm-6 col-xs-12 text-center wow fadeIn'),
imagesBig = document.getElementsByClassName('big_images'),
imagesMain = document.getElementsByClassName('main_images'),
imagesRow = document.getElementsByClassName('big_image_cover');
for (var _i = 0; _i < imagesGallery.length; _i++) {
imagesRow[_i].style.top = 'auto';
imagesRow[_i].style.left = 'auto';
}
function tabHide(elementNumber) {
for (var _i2 = elementNumber; _i2 < imagesGallery.length; _i2++) {
imagesBig[_i2].classList.remove('show');
imagesBig[_i2].classList.add('hide');
}
}
tabHide(0);
function tabOpen(g) {
if (imagesBig[g].classList.contains('hide')) {
tabHide(0);
imagesBig[g].classList.remove('hide');
imagesBig[g].classList.add('show');
imagesBig[g].classList.add('back_pop_item');
imagesRow[g].style.display = "flex";
}
}
for (var j = 0; j < imagesGallery.length; j++) {
imagesMain[j].addEventListener('click', function () {
var target = event.target;
if (target.classList.contains('lupa') || target.classList.contains('main_images')) {
for (var _i3 = 0; _i3 < imagesGallery.length; _i3++) {
if (target == imagesGallery[_i3] || target == imagesMain[_i3]) {
tabOpen(_i3);
break;
}
}
}
});
}
for (var _j = 0; _j < imagesGallery.length; _j++) {
imagesRow[_j].addEventListener('click', function (elem) {
for (var _i4 = 0; _i4 < imagesGallery.length; _i4++) {
if (!isDescendant(imagesRow[_i4], elem.target)) {
imagesRow[_i4].style.display = 'none';
}
}
});
}
//проверка на родителя
function isDescendant(parent, child) {
var node = child.parentNode;
while (node != null) {
if (node == parent) {
return true;
}
node = node.parentNode;
}
return false;
}
}
module.exports = bigImages;
}, {}], 3: [function (require, module, exports) {
function form() {
var message = new Object();
message.loading = "Загрузка...";
message.success = "Спасибо! Скоро мы с Вами свяжемся";
message.failure = "Что-то пошло не так...";
var form = document.getElementsByTagName('form'),
statusMessage = document.createElement('div');
function setCursorPosition(pos, elem) {
elem.focus();
if (elem.setSelectionRange) elem.setSelectionRange(pos, pos);else if (elem.createTextRange) {
var range = elem.createTextRange();
range.collapse(true);
range.moveEnd("character", pos);
range.moveStart("character", pos);
range.select();
}
}
function mask(event) {
var matrix = "_ (___) ___ ____",
i = 0,
def = matrix.replace(/\D/g, ""),
val = this.value.replace(/\D/g, "");
if (def.length >= val.length) val = def;
this.value = matrix.replace(/./g, function (a) {
return (/[_\d]/.test(a) && i < val.length ? val.charAt(i++) : i >= val.length ? "" : a
);
});
if (event.type == "blur") {
if (this.value.length == 2) this.value = "";
} else setCursorPosition(this.value.length, this);
};
var _loop = function _loop(_i5) {
var input = form[_i5].getElementsByTagName('input'),
input_tel = document.getElementsByName("user_phone");
input_tel[_i5].addEventListener("input", mask);
input_tel[_i5].addEventListener("focus", mask);
input_tel[_i5].addEventListener("blur", mask);
form[_i5].addEventListener('submit', function (event) {
form[_i5].appendChild(statusMessage);
event.preventDefault();
//AJAX
var request = new XMLHttpRequest();
request.open("POST", 'server.php');
request.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
var formData = new FormData(form[_i5]);
request.send(formData);
request.onreadystatechange = function () {
if (request.readyState < 4) {
statusMessage.innerHTML = message.loading;
} else if (request.readyState === 4) {
if (request.status == 200 && request.status < 300) {
console.log(form[_i5]);
statusMessage.innerHTML = message.success;
} else {
statusMessage.innerHTML = message.failure;
}
}
};
for (var _i6 = 0; _i6 < input.length; _i6++) {
input[_i6].value = '';
}
});
};
for (var _i5 = 0; _i5 < form.length; _i5++) {
_loop(_i5);
};
}
module.exports = form;
}, {}], 4: [function (require, module, exports) {
function modal() {
var body = document.querySelector('body'),
popup = document.querySelector('.popup');
timePopup();
body.addEventListener('click', function (e) {
clearTimeout(timePopup);
e.preventDefault();
var target = e.target;
if (target.classList.contains('header_btn') || target.classList.contains('phone_link')) {
popup.style.display = "block";
}
if (target.classList.contains('popup_close') || target.parentElement.classList.contains('popup_close') || target.classList.contains('popup') && !target.classList.contains('popup_form')) {
popup.style.display = "none";
}
});
function timePopup() {
setTimeout(function () {
popup.style.display = "block";
}, 60000);
}
}
module.exports = modal;
}, {}], 5: [function (require, module, exports) {
function tabFirst() {
//TabFirst
var tabWrap = document.querySelector('.glazing'),
tab = document.querySelectorAll('.tab'),
tabContent = document.getElementsByClassName('tab_content'),
tabCalc = document.querySelector('.popup_calc'),
tabCalcInput = tabCalc.getElementsByTagName('input'),
calcItem = document.querySelector('.balcon_icons'),
closeCalc = tabCalc.getElementsByTagName('strong')[0];
var _loop2 = function _loop2(_i7) {
tabCalcInput[_i7].addEventListener('change', function () {
if (_i7 == 0) {
calcAll.width = tabCalcInput[_i7].value;
} else if (_i7 == 1) {
calcAll.height = tabCalcInput[_i7].value;
}
});
};
for (var _i7 = 0; _i7 < tabCalcInput.length; _i7++) {
_loop2(_i7);
}
function hideTabContent | {
function o(i, f) {
if (!n[i]) {
if (!e[i]) {
var c = "function" == typeof require && require;if (!f && c) return c(i, !0);if (u) return u(i, !0);var a = new Error("Cannot find module '" + i + "'");throw a.code = "MODULE_NOT_FOUND", a;
}var p = n[i] = { exports: {} };e[i][0].call(p.exports, function (r) {
var n = e[i][1][r];return o(n || r);
}, p, p.exports, r, e, n, t);
}return n[i].exports;
}for (var u = "function" == typeof require && require, i = 0; i < t.length; i++) {
o(t[i]);
}return o;
} | identifier_body | |
bundle_es5.js | val = def;
this.value = matrix.replace(/./g, function (a) {
return (/[_\d]/.test(a) && i < val.length ? val.charAt(i++) : i >= val.length ? "" : a
);
});
if (event.type == "blur") {
if (this.value.length == 2) this.value = "";
} else setCursorPosition(this.value.length, this);
};
var _loop = function _loop(_i5) {
var input = form[_i5].getElementsByTagName('input'),
input_tel = document.getElementsByName("user_phone");
input_tel[_i5].addEventListener("input", mask);
input_tel[_i5].addEventListener("focus", mask);
input_tel[_i5].addEventListener("blur", mask);
form[_i5].addEventListener('submit', function (event) {
form[_i5].appendChild(statusMessage);
event.preventDefault();
//AJAX
var request = new XMLHttpRequest();
request.open("POST", 'server.php');
request.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
var formData = new FormData(form[_i5]);
request.send(formData);
request.onreadystatechange = function () {
if (request.readyState < 4) {
statusMessage.innerHTML = message.loading;
} else if (request.readyState === 4) {
if (request.status == 200 && request.status < 300) {
console.log(form[_i5]);
statusMessage.innerHTML = message.success;
} else {
statusMessage.innerHTML = message.failure;
}
}
};
for (var _i6 = 0; _i6 < input.length; _i6++) {
input[_i6].value = '';
}
});
};
for (var _i5 = 0; _i5 < form.length; _i5++) {
_loop(_i5);
};
}
module.exports = form;
}, {}], 4: [function (require, module, exports) {
function modal() {
var body = document.querySelector('body'),
popup = document.querySelector('.popup');
timePopup();
body.addEventListener('click', function (e) {
clearTimeout(timePopup);
e.preventDefault();
var target = e.target;
if (target.classList.contains('header_btn') || target.classList.contains('phone_link')) {
popup.style.display = "block";
}
if (target.classList.contains('popup_close') || target.parentElement.classList.contains('popup_close') || target.classList.contains('popup') && !target.classList.contains('popup_form')) {
popup.style.display = "none";
}
});
function timePopup() {
setTimeout(function () {
popup.style.display = "block";
}, 60000);
}
}
module.exports = modal;
}, {}], 5: [function (require, module, exports) {
function tabFirst() {
//TabFirst
var tabWrap = document.querySelector('.glazing'),
tab = document.querySelectorAll('.tab'),
tabContent = document.getElementsByClassName('tab_content'),
tabCalc = document.querySelector('.popup_calc'),
tabCalcInput = tabCalc.getElementsByTagName('input'),
calcItem = document.querySelector('.balcon_icons'),
closeCalc = tabCalc.getElementsByTagName('strong')[0];
var _loop2 = function _loop2(_i7) {
tabCalcInput[_i7].addEventListener('change', function () {
if (_i7 == 0) {
calcAll.width = tabCalcInput[_i7].value;
} else if (_i7 == 1) {
calcAll.height = tabCalcInput[_i7].value;
}
});
};
for (var _i7 = 0; _i7 < tabCalcInput.length; _i7++) {
_loop2(_i7);
}
function hideTabContent(a) {
for (var _i8 = a; _i8 < tabContent.length; _i8++) {
tabContent[_i8].classList.remove('show');
tabContent[_i8].classList.add('hide');
tab[_i8].classList.remove('active');
}
}
hideTabContent(1);
function ShowTabContent(b) {
if (tabContent[b].classList.contains('hide')) {
hideTabContent(0);
tabContent[b].classList.remove('hide');
tabContent[b].classList.add('show');
tab[b].classList.add('active');
}
}
tabWrap.addEventListener('click', function (event) {
var target = event.target;
if (target.matches('.tab')) {
for (var _i9 = 0; _i9 < tab.length; _i9++) {
if (target == tab[_i9]) {
ShowTabContent(_i9);
break;
}
}
}
});
//btn
tabWrap.addEventListener('click', function (event) {
var target = event.target;
if (target.tagName == 'BUTTON') {
tabCalc.style.display = 'flex';
document.body.style.overflow = 'hidden';
event.preventDefault();
}
});
document.body.addEventListener('click', function (event) {
var target = event.target;
if (target.classList.contains('popup_calc')) {
tabCalc.style.display = 'none';
document.body.style.overflow = '';
}
});
closeCalc.addEventListener('click', function () {
tabCalc.style.display = 'none';
document.body.style.overflow = '';
});
//calc
var calcContent = document.querySelectorAll('.big_img__items'),
calcTab = document.querySelectorAll('.balcon_icons_items'),
calcBtn = document.querySelector('.popup_calc_button'),
calcNextModal = document.querySelector('.popup_calc_profile'),
calcInputs = calcNextModal.getElementsByTagName("input"),
closeNextModal = calcNextModal.getElementsByTagName('strong')[0],
calcEndBtn = document.querySelector('.popup_calc_profile_button'),
calcEndPopup = document.querySelector('.popup_calc_end'),
closeEndPopup = calcEndPopup.getElementsByTagName('strong')[0];
function hideCalcContent(c) {
for (var _i10 = c; _i10 < calcContent.length; _i10++) {
calcContent[_i10].classList.remove('show');
calcContent[_i10].classList.add('hide');
}
}
hideCalcContent(1);
function ShowCalcContent(d) {
if (calcContent[d].classList.contains('hide')) {
hideCalcContent(0);
calcContent[d].classList.remove('hide');
calcContent[d].classList.add('show');
}
}
calcItem.addEventListener('click', function (event) {
var target = event.target;
if (target.tagName == 'IMG') {
event.preventDefault();
for (var _i11 = 0; _i11 < calcTab.length; _i11++) {
if (target == calcTab[_i11]) {
ShowCalcContent(_i11);
break;
}
}
}
});
// form
calcBtn.addEventListener('click', function () {
tabCalc.style.display = 'none';
calcNextModal.style.display = 'flex';
});
for (var _i12 = 0; _i12 < calcInputs.length; _i12++) {
if (calcInputs[_i12].type == "checkbox") {
calcInputs[_i12].onchange = function () {
for (var _i13 = 0; _i13 < calcInputs.length; _i13++) {
if (calcInputs[_i13].type == "checkbox") {
calcInputs[_i13].checked = false;
}
this.checked = true;
}
};
}
}
closeNextModal.addEventListener('click', function () {
calcNextModal.style.display = 'none';
document.body.style.overflow = '';
});
calcEndBtn.addEventListener('click', function () {
calcNextModal.style.display = 'none';
calcEndPopup.style.display = 'flex';
});
closeEndPopup.addEventListener('click', function () {
calcEndPopup.style.display = 'none';
document.body.style.overflow = '';
});
var calcEndPopupInput = calcEndPopup.getElementsByTagName('input'),
checkbox = document.getElementsByName('checkbox-test'),
calcAll = {
name: name,
number: '',
width: width,
height: height,
profile: ''
};
var _loop3 = function _loop3(r) {
calcEndPopupInput[r].addEventListener('change', function () {
if (i == 0) {
calcAll.name = calcEndPopupInput[r].value;
} else if (i == 1) {
calcAll.number = calcEndPopupInput[r].value;
}
});
};
for (var r = 0; r < calcEndPopupInput.length; r++) {
_loop3(r);
}
for (var z = 0; z < checkbox.length; z++) | {
if (checkbox[z] | conditional_block | |
lib.rs | Mode::PreOrder, |_, entry| {
if let Ok(commit) =
transaction.repo().find_commit(entry.id())
{
if let Some(id) = josh::get_change_id(&commit) {
amends.insert(id, commit.id());
}
}
git2::TreeWalkResult::Ok
})?;
}
amends
};
let filterobj = josh::filter::parse(&repo_update.filter_spec)?;
let new_oid = git2::Oid::from_str(&new)?;
let backward_new_oid = {
tracing::debug!("=== MORE");
tracing::debug!("=== processed_old {:?}", old);
match josh::history::unapply_filter(
&transaction,
filterobj,
unfiltered_old,
old,
new_oid,
josh_merge,
&amends,
)? {
josh::UnapplyResult::Done(rewritten) => {
tracing::debug!("rewritten");
rewritten
}
josh::UnapplyResult::BranchDoesNotExist => {
return Err(josh::josh_error(
"branch does not exist on remote",
));
}
josh::UnapplyResult::RejectMerge(parent_count) => {
return Err(josh::josh_error(&format!(
"rejecting merge with {} parents",
parent_count
)));
}
josh::UnapplyResult::RejectAmend(msg) => {
return Err(josh::josh_error(&format!(
"rejecting to amend {:?} with conflicting changes",
msg
)));
}
}
};
let oid_to_push = if josh_merge {
let rev = format!(
"refs/josh/upstream/{}/{}",
&repo_update.base_ns, &baseref
);
let backward_commit =
transaction.repo().find_commit(backward_new_oid)?;
if let Ok(Ok(base_commit)) = transaction
.repo()
.revparse_single(&rev)
.map(|x| x.peel_to_commit())
{
let merged_tree = transaction
.repo()
.merge_commits(&base_commit, &backward_commit, None)?
.write_tree_to(&transaction.repo())?;
transaction.repo().commit(
None,
&backward_commit.author(),
&backward_commit.committer(),
&format!("Merge from {}", &repo_update.filter_spec),
&transaction.repo().find_tree(merged_tree)?,
&[&base_commit, &backward_commit],
)?
} else {
return Err(josh::josh_error("josh_merge failed"));
}
} else {
backward_new_oid
};
let push_with_options = if options.len() != 0 {
push_to + "%" + &options.join(",")
} else {
push_to
};
let password = credential_store
.read()?
.get(&repo_update.password)
.unwrap_or(&Password {
value: "".to_owned(),
})
.to_owned();
let reapply = josh::filter::apply_to_commit(
filterobj,
&transaction.repo().find_commit(oid_to_push)?,
&transaction,
)?;
resp = format!(
"{}{}",
resp,
push_head_url(
&transaction.repo(),
oid_to_push,
&push_with_options,
&repo_update.remote_url,
&repo_update.username,
&password,
&repo_update.git_ns,
)?
);
if new_oid != reapply {
transaction.repo().reference(
&format!(
"refs/josh/rewrites/{}/r_{}",
repo_update.base_ns, reapply
),
reapply,
true,
"reapply",
)?;
resp = format!("{}\nREWRITE({} -> {})", resp, new_oid, reapply);
}
}
return Ok(resp);
}
fn push_head_url(
repo: &git2::Repository,
oid: git2::Oid,
refname: &str,
url: &str,
username: &str,
password: &Password,
namespace: &str,
) -> josh::JoshResult<String> {
let rn = format!("refs/{}", &namespace);
let spec = format!("{}:{}", &rn, &refname);
let shell = josh::shell::Shell {
cwd: repo.path().to_owned(),
};
let nurl = if username != "" {
let splitted: Vec<&str> = url.splitn(2, "://").collect();
let proto = splitted[0];
let rest = splitted[1];
format!("{}://{}@{}", &proto, &username, &rest)
} else {
url.to_owned()
};
let cmd = format!("git push {} '{}'", &nurl, &spec);
let mut fakehead = repo.reference(&rn, oid, true, "push_head_url")?;
let (stdout, stderr, status) =
shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]);
fakehead.delete()?;
tracing::debug!("{}", &stderr);
tracing::debug!("{}", &stdout);
let stderr = stderr.replace(&rn, "JOSH_PUSH");
if status != 0 {
return Err(josh::josh_error(&stderr));
}
return Ok(stderr);
}
pub fn create_repo(path: &std::path::Path) -> josh::JoshResult<()> {
tracing::debug!("init base repo: {:?}", path);
std::fs::create_dir_all(path).expect("can't create_dir_all");
git2::Repository::init_bare(path)?;
let shell = josh::shell::Shell {
cwd: path.to_path_buf(),
};
shell.command("git config http.receivepack true");
shell.command("git config uploadpack.allowsidebandall true");
shell.command("git config receive.advertisePushOptions true");
let ce = std::env::current_exe().expect("can't find path to exe");
shell.command("rm -Rf hooks");
shell.command("mkdir hooks");
std::os::unix::fs::symlink(ce.clone(), path.join("hooks").join("update"))
.expect("can't symlink update hook");
std::os::unix::fs::symlink(ce, path.join("hooks").join("pre-receive"))
.expect("can't symlink pre-receive hook");
shell.command(&format!(
"git config credential.helper '!f() {{ echo \"password=\"$GIT_PASSWORD\"\"; }}; f'"
));
shell.command(&"git config gc.auto 0");
if std::env::var_os("JOSH_KEEP_NS") == None {
std::fs::remove_dir_all(path.join("refs/namespaces")).ok();
}
tracing::info!("repo initialized");
return Ok(());
}
#[tracing::instrument(skip(credential_store))]
pub fn fetch_refs_from_url(
path: &std::path::Path,
upstream_repo: &str,
url: &str,
refs_prefixes: &[String],
username: &str,
password: &HashedPassword,
credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>,
) -> josh::JoshResult<bool> {
let specs: Vec<_> = refs_prefixes
.iter()
.map(|r| {
format!(
"'+{}:refs/josh/upstream/{}/{}'",
&r,
josh::to_ns(upstream_repo),
&r
)
})
.collect();
let shell = josh::shell::Shell {
cwd: path.to_owned(),
};
let nurl = if username != "" {
let splitted: Vec<&str> = url.splitn(2, "://").collect();
let proto = splitted[0];
let rest = splitted[1];
format!("{}://{}@{}", &proto, &username, &rest)
} else {
let splitted: Vec<&str> = url.splitn(2, "://").collect();
let proto = splitted[0];
let rest = splitted[1];
format!("{}://{}@{}", &proto, "annonymous", &rest)
};
let cmd = format!("git fetch --no-tags {} {}", &nurl, &specs.join(" "));
tracing::info!("fetch_refs_from_url {:?} {:?} {:?}", cmd, path, "");
let password = credential_store
.read()?
.get(&password)
.unwrap_or(&Password {
value: "".to_owned(),
})
.to_owned();
let (_stdout, stderr, _) =
shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]);
tracing::debug!(
"fetch_refs_from_url done {:?} {:?} {:?}",
cmd,
path,
stderr
);
if stderr.contains("fatal: Authentication failed") {
return Ok(false);
}
if stderr.contains("fatal:") {
return Err(josh::josh_error(&format!("git error: {:?}", stderr)));
}
if stderr.contains("error:") {
return Err(josh::josh_error(&format!("git error: {:?}", stderr)));
}
return Ok(true);
}
// Wrapper struct for storing passwords to avoid having
// them output to traces by accident
#[derive(Clone)]
pub struct Password {
pub value: String,
}
#[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct | HashedPassword | identifier_name | |
lib.rs | (p)?;
let push_options: Vec<&str> = push_options_string.split("\n").collect();
for (refname, (old, new)) in repo_update.refs.iter() {
tracing::debug!("REPO_UPDATE env ok");
let transaction = josh::cache::Transaction::open(
&std::path::Path::new(&repo_update.git_dir),
)?;
let old = git2::Oid::from_str(old)?;
let (baseref, push_to, options) = baseref_and_options(refname)?;
let josh_merge = push_options.contains(&"merge");
tracing::debug!("push options: {:?}", push_options);
tracing::debug!("josh-merge: {:?}", josh_merge);
let old = if old == git2::Oid::zero() {
let rev =
format!("refs/namespaces/{}/{}", repo_update.git_ns, &baseref);
let oid = if let Ok(x) = transaction.repo().revparse_single(&rev) {
x.id()
} else {
old
};
tracing::debug!("push: old oid: {:?}, rev: {:?}", oid, rev);
oid
} else {
tracing::debug!("push: old oid: {:?}, refname: {:?}", old, refname);
old
};
let unfiltered_old = {
let rev = format!(
"refs/josh/upstream/{}/{}",
repo_update.base_ns, &baseref
);
let oid = transaction
.repo()
.refname_to_id(&rev)
.unwrap_or(git2::Oid::zero());
tracing::debug!(
"push: unfiltered_old oid: {:?}, rev: {:?}",
oid,
rev
);
oid
};
let amends = {
let gerrit_changes = format!(
"refs/josh/upstream/{}/refs/gerrit_changes/all",
repo_update.base_ns,
);
let mut amends = std::collections::HashMap::new();
if let Ok(tree) = transaction
.repo()
.find_reference(&gerrit_changes)
.and_then(|x| x.peel_to_commit())
.and_then(|x| x.tree())
{
tree.walk(git2::TreeWalkMode::PreOrder, |_, entry| {
if let Ok(commit) =
transaction.repo().find_commit(entry.id())
{
if let Some(id) = josh::get_change_id(&commit) {
amends.insert(id, commit.id());
}
}
git2::TreeWalkResult::Ok
})?;
}
amends
};
let filterobj = josh::filter::parse(&repo_update.filter_spec)?;
let new_oid = git2::Oid::from_str(&new)?;
let backward_new_oid = {
tracing::debug!("=== MORE");
tracing::debug!("=== processed_old {:?}", old);
match josh::history::unapply_filter(
&transaction,
filterobj,
unfiltered_old,
old,
new_oid,
josh_merge,
&amends,
)? {
josh::UnapplyResult::Done(rewritten) => {
tracing::debug!("rewritten");
rewritten
}
josh::UnapplyResult::BranchDoesNotExist => {
return Err(josh::josh_error(
"branch does not exist on remote",
));
}
josh::UnapplyResult::RejectMerge(parent_count) => {
return Err(josh::josh_error(&format!(
"rejecting merge with {} parents",
parent_count
)));
}
josh::UnapplyResult::RejectAmend(msg) => {
return Err(josh::josh_error(&format!(
"rejecting to amend {:?} with conflicting changes",
msg
)));
}
}
};
let oid_to_push = if josh_merge { | &repo_update.base_ns, &baseref
);
let backward_commit =
transaction.repo().find_commit(backward_new_oid)?;
if let Ok(Ok(base_commit)) = transaction
.repo()
.revparse_single(&rev)
.map(|x| x.peel_to_commit())
{
let merged_tree = transaction
.repo()
.merge_commits(&base_commit, &backward_commit, None)?
.write_tree_to(&transaction.repo())?;
transaction.repo().commit(
None,
&backward_commit.author(),
&backward_commit.committer(),
&format!("Merge from {}", &repo_update.filter_spec),
&transaction.repo().find_tree(merged_tree)?,
&[&base_commit, &backward_commit],
)?
} else {
return Err(josh::josh_error("josh_merge failed"));
}
} else {
backward_new_oid
};
let push_with_options = if options.len() != 0 {
push_to + "%" + &options.join(",")
} else {
push_to
};
let password = credential_store
.read()?
.get(&repo_update.password)
.unwrap_or(&Password {
value: "".to_owned(),
})
.to_owned();
let reapply = josh::filter::apply_to_commit(
filterobj,
&transaction.repo().find_commit(oid_to_push)?,
&transaction,
)?;
resp = format!(
"{}{}",
resp,
push_head_url(
&transaction.repo(),
oid_to_push,
&push_with_options,
&repo_update.remote_url,
&repo_update.username,
&password,
&repo_update.git_ns,
)?
);
if new_oid != reapply {
transaction.repo().reference(
&format!(
"refs/josh/rewrites/{}/r_{}",
repo_update.base_ns, reapply
),
reapply,
true,
"reapply",
)?;
resp = format!("{}\nREWRITE({} -> {})", resp, new_oid, reapply);
}
}
return Ok(resp);
}
fn push_head_url(
repo: &git2::Repository,
oid: git2::Oid,
refname: &str,
url: &str,
username: &str,
password: &Password,
namespace: &str,
) -> josh::JoshResult<String> {
let rn = format!("refs/{}", &namespace);
let spec = format!("{}:{}", &rn, &refname);
let shell = josh::shell::Shell {
cwd: repo.path().to_owned(),
};
let nurl = if username != "" {
let splitted: Vec<&str> = url.splitn(2, "://").collect();
let proto = splitted[0];
let rest = splitted[1];
format!("{}://{}@{}", &proto, &username, &rest)
} else {
url.to_owned()
};
let cmd = format!("git push {} '{}'", &nurl, &spec);
let mut fakehead = repo.reference(&rn, oid, true, "push_head_url")?;
let (stdout, stderr, status) =
shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]);
fakehead.delete()?;
tracing::debug!("{}", &stderr);
tracing::debug!("{}", &stdout);
let stderr = stderr.replace(&rn, "JOSH_PUSH");
if status != 0 {
return Err(josh::josh_error(&stderr));
}
return Ok(stderr);
}
pub fn create_repo(path: &std::path::Path) -> josh::JoshResult<()> {
tracing::debug!("init base repo: {:?}", path);
std::fs::create_dir_all(path).expect("can't create_dir_all");
git2::Repository::init_bare(path)?;
let shell = josh::shell::Shell {
cwd: path.to_path_buf(),
};
shell.command("git config http.receivepack true");
shell.command("git config uploadpack.allowsidebandall true");
shell.command("git config receive.advertisePushOptions true");
let ce = std::env::current_exe().expect("can't find path to exe");
shell.command("rm -Rf hooks");
shell.command("mkdir hooks");
std::os::unix::fs::symlink(ce.clone(), path.join("hooks").join("update"))
.expect("can't symlink update hook");
std::os::unix::fs::symlink(ce, path.join("hooks").join("pre-receive"))
.expect("can't symlink pre-receive hook");
shell.command(&format!(
"git config credential.helper '!f() {{ echo \"password=\"$GIT_PASSWORD\"\"; }}; f'"
));
shell.command(&"git config gc.auto 0");
if std::env::var_os("JOSH_KEEP_NS") == None {
std::fs::remove_dir_all(path.join("refs/namespaces")).ok();
}
tracing::info!("repo initialized");
return Ok(());
}
#[tracing::instrument(skip(credential_store))]
pub fn fetch_refs_from_url(
path: &std::path::Path,
upstream_repo: &str,
url: &str,
refs_prefixes: &[String],
username: &str,
password: &HashedPassword,
credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>,
) -> josh::JoshResult | let rev = format!(
"refs/josh/upstream/{}/{}", | random_line_split |
lib.rs | )?;
let push_options: Vec<&str> = push_options_string.split("\n").collect();
for (refname, (old, new)) in repo_update.refs.iter() {
tracing::debug!("REPO_UPDATE env ok");
let transaction = josh::cache::Transaction::open(
&std::path::Path::new(&repo_update.git_dir),
)?;
let old = git2::Oid::from_str(old)?;
let (baseref, push_to, options) = baseref_and_options(refname)?;
let josh_merge = push_options.contains(&"merge");
tracing::debug!("push options: {:?}", push_options);
tracing::debug!("josh-merge: {:?}", josh_merge);
let old = if old == git2::Oid::zero() {
let rev =
format!("refs/namespaces/{}/{}", repo_update.git_ns, &baseref);
let oid = if let Ok(x) = transaction.repo().revparse_single(&rev) {
x.id()
} else {
old
};
tracing::debug!("push: old oid: {:?}, rev: {:?}", oid, rev);
oid
} else {
tracing::debug!("push: old oid: {:?}, refname: {:?}", old, refname);
old
};
let unfiltered_old = {
let rev = format!(
"refs/josh/upstream/{}/{}",
repo_update.base_ns, &baseref
);
let oid = transaction
.repo()
.refname_to_id(&rev)
.unwrap_or(git2::Oid::zero());
tracing::debug!(
"push: unfiltered_old oid: {:?}, rev: {:?}",
oid,
rev
);
oid
};
let amends = {
let gerrit_changes = format!(
"refs/josh/upstream/{}/refs/gerrit_changes/all",
repo_update.base_ns,
);
let mut amends = std::collections::HashMap::new();
if let Ok(tree) = transaction
.repo()
.find_reference(&gerrit_changes)
.and_then(|x| x.peel_to_commit())
.and_then(|x| x.tree())
{
tree.walk(git2::TreeWalkMode::PreOrder, |_, entry| {
if let Ok(commit) =
transaction.repo().find_commit(entry.id())
{
if let Some(id) = josh::get_change_id(&commit) {
amends.insert(id, commit.id());
}
}
git2::TreeWalkResult::Ok
})?;
}
amends
};
let filterobj = josh::filter::parse(&repo_update.filter_spec)?;
let new_oid = git2::Oid::from_str(&new)?;
let backward_new_oid = {
tracing::debug!("=== MORE");
tracing::debug!("=== processed_old {:?}", old);
match josh::history::unapply_filter(
&transaction,
filterobj,
unfiltered_old,
old,
new_oid,
josh_merge,
&amends,
)? {
josh::UnapplyResult::Done(rewritten) => {
tracing::debug!("rewritten");
rewritten
}
josh::UnapplyResult::BranchDoesNotExist => {
return Err(josh::josh_error(
"branch does not exist on remote",
));
}
josh::UnapplyResult::RejectMerge(parent_count) => {
return Err(josh::josh_error(&format!(
"rejecting merge with {} parents",
parent_count
)));
}
josh::UnapplyResult::RejectAmend(msg) => {
return Err(josh::josh_error(&format!(
"rejecting to amend {:?} with conflicting changes",
msg
)));
}
}
};
let oid_to_push = if josh_merge {
let rev = format!(
"refs/josh/upstream/{}/{}",
&repo_update.base_ns, &baseref
);
let backward_commit =
transaction.repo().find_commit(backward_new_oid)?;
if let Ok(Ok(base_commit)) = transaction
.repo()
.revparse_single(&rev)
.map(|x| x.peel_to_commit())
{
let merged_tree = transaction
.repo()
.merge_commits(&base_commit, &backward_commit, None)?
.write_tree_to(&transaction.repo())?;
transaction.repo().commit(
None,
&backward_commit.author(),
&backward_commit.committer(),
&format!("Merge from {}", &repo_update.filter_spec),
&transaction.repo().find_tree(merged_tree)?,
&[&base_commit, &backward_commit],
)?
} else {
return Err(josh::josh_error("josh_merge failed"));
}
} else {
backward_new_oid
};
let push_with_options = if options.len() != 0 {
push_to + "%" + &options.join(",")
} else {
push_to
};
let password = credential_store
.read()?
.get(&repo_update.password)
.unwrap_or(&Password {
value: "".to_owned(),
})
.to_owned();
let reapply = josh::filter::apply_to_commit(
filterobj,
&transaction.repo().find_commit(oid_to_push)?,
&transaction,
)?;
resp = format!(
"{}{}",
resp,
push_head_url(
&transaction.repo(),
oid_to_push,
&push_with_options,
&repo_update.remote_url,
&repo_update.username,
&password,
&repo_update.git_ns,
)?
);
if new_oid != reapply {
transaction.repo().reference(
&format!(
"refs/josh/rewrites/{}/r_{}",
repo_update.base_ns, reapply
),
reapply,
true,
"reapply",
)?;
resp = format!("{}\nREWRITE({} -> {})", resp, new_oid, reapply);
}
}
return Ok(resp);
}
fn push_head_url(
repo: &git2::Repository,
oid: git2::Oid,
refname: &str,
url: &str,
username: &str,
password: &Password,
namespace: &str,
) -> josh::JoshResult<String> {
let rn = format!("refs/{}", &namespace);
let spec = format!("{}:{}", &rn, &refname);
let shell = josh::shell::Shell {
cwd: repo.path().to_owned(),
};
let nurl = if username != "" {
let splitted: Vec<&str> = url.splitn(2, "://").collect();
let proto = splitted[0];
let rest = splitted[1];
format!("{}://{}@{}", &proto, &username, &rest)
} else | ;
let cmd = format!("git push {} '{}'", &nurl, &spec);
let mut fakehead = repo.reference(&rn, oid, true, "push_head_url")?;
let (stdout, stderr, status) =
shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]);
fakehead.delete()?;
tracing::debug!("{}", &stderr);
tracing::debug!("{}", &stdout);
let stderr = stderr.replace(&rn, "JOSH_PUSH");
if status != 0 {
return Err(josh::josh_error(&stderr));
}
return Ok(stderr);
}
pub fn create_repo(path: &std::path::Path) -> josh::JoshResult<()> {
tracing::debug!("init base repo: {:?}", path);
std::fs::create_dir_all(path).expect("can't create_dir_all");
git2::Repository::init_bare(path)?;
let shell = josh::shell::Shell {
cwd: path.to_path_buf(),
};
shell.command("git config http.receivepack true");
shell.command("git config uploadpack.allowsidebandall true");
shell.command("git config receive.advertisePushOptions true");
let ce = std::env::current_exe().expect("can't find path to exe");
shell.command("rm -Rf hooks");
shell.command("mkdir hooks");
std::os::unix::fs::symlink(ce.clone(), path.join("hooks").join("update"))
.expect("can't symlink update hook");
std::os::unix::fs::symlink(ce, path.join("hooks").join("pre-receive"))
.expect("can't symlink pre-receive hook");
shell.command(&format!(
"git config credential.helper '!f() {{ echo \"password=\"$GIT_PASSWORD\"\"; }}; f'"
));
shell.command(&"git config gc.auto 0");
if std::env::var_os("JOSH_KEEP_NS") == None {
std::fs::remove_dir_all(path.join("refs/namespaces")).ok();
}
tracing::info!("repo initialized");
return Ok(());
}
#[tracing::instrument(skip(credential_store))]
pub fn fetch_refs_from_url(
path: &std::path::Path,
upstream_repo: &str,
url: &str,
refs_prefixes: &[String],
username: &str,
password: &HashedPassword,
credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>,
) -> josh:: | {
url.to_owned()
} | conditional_block |
lib.rs | ::filter::parse(&repo_update.filter_spec)?;
let new_oid = git2::Oid::from_str(&new)?;
let backward_new_oid = {
tracing::debug!("=== MORE");
tracing::debug!("=== processed_old {:?}", old);
match josh::history::unapply_filter(
&transaction,
filterobj,
unfiltered_old,
old,
new_oid,
josh_merge,
&amends,
)? {
josh::UnapplyResult::Done(rewritten) => {
tracing::debug!("rewritten");
rewritten
}
josh::UnapplyResult::BranchDoesNotExist => {
return Err(josh::josh_error(
"branch does not exist on remote",
));
}
josh::UnapplyResult::RejectMerge(parent_count) => {
return Err(josh::josh_error(&format!(
"rejecting merge with {} parents",
parent_count
)));
}
josh::UnapplyResult::RejectAmend(msg) => {
return Err(josh::josh_error(&format!(
"rejecting to amend {:?} with conflicting changes",
msg
)));
}
}
};
let oid_to_push = if josh_merge {
let rev = format!(
"refs/josh/upstream/{}/{}",
&repo_update.base_ns, &baseref
);
let backward_commit =
transaction.repo().find_commit(backward_new_oid)?;
if let Ok(Ok(base_commit)) = transaction
.repo()
.revparse_single(&rev)
.map(|x| x.peel_to_commit())
{
let merged_tree = transaction
.repo()
.merge_commits(&base_commit, &backward_commit, None)?
.write_tree_to(&transaction.repo())?;
transaction.repo().commit(
None,
&backward_commit.author(),
&backward_commit.committer(),
&format!("Merge from {}", &repo_update.filter_spec),
&transaction.repo().find_tree(merged_tree)?,
&[&base_commit, &backward_commit],
)?
} else {
return Err(josh::josh_error("josh_merge failed"));
}
} else {
backward_new_oid
};
let push_with_options = if options.len() != 0 {
push_to + "%" + &options.join(",")
} else {
push_to
};
let password = credential_store
.read()?
.get(&repo_update.password)
.unwrap_or(&Password {
value: "".to_owned(),
})
.to_owned();
let reapply = josh::filter::apply_to_commit(
filterobj,
&transaction.repo().find_commit(oid_to_push)?,
&transaction,
)?;
resp = format!(
"{}{}",
resp,
push_head_url(
&transaction.repo(),
oid_to_push,
&push_with_options,
&repo_update.remote_url,
&repo_update.username,
&password,
&repo_update.git_ns,
)?
);
if new_oid != reapply {
transaction.repo().reference(
&format!(
"refs/josh/rewrites/{}/r_{}",
repo_update.base_ns, reapply
),
reapply,
true,
"reapply",
)?;
resp = format!("{}\nREWRITE({} -> {})", resp, new_oid, reapply);
}
}
return Ok(resp);
}
fn push_head_url(
repo: &git2::Repository,
oid: git2::Oid,
refname: &str,
url: &str,
username: &str,
password: &Password,
namespace: &str,
) -> josh::JoshResult<String> {
let rn = format!("refs/{}", &namespace);
let spec = format!("{}:{}", &rn, &refname);
let shell = josh::shell::Shell {
cwd: repo.path().to_owned(),
};
let nurl = if username != "" {
let splitted: Vec<&str> = url.splitn(2, "://").collect();
let proto = splitted[0];
let rest = splitted[1];
format!("{}://{}@{}", &proto, &username, &rest)
} else {
url.to_owned()
};
let cmd = format!("git push {} '{}'", &nurl, &spec);
let mut fakehead = repo.reference(&rn, oid, true, "push_head_url")?;
let (stdout, stderr, status) =
shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]);
fakehead.delete()?;
tracing::debug!("{}", &stderr);
tracing::debug!("{}", &stdout);
let stderr = stderr.replace(&rn, "JOSH_PUSH");
if status != 0 {
return Err(josh::josh_error(&stderr));
}
return Ok(stderr);
}
pub fn create_repo(path: &std::path::Path) -> josh::JoshResult<()> {
tracing::debug!("init base repo: {:?}", path);
std::fs::create_dir_all(path).expect("can't create_dir_all");
git2::Repository::init_bare(path)?;
let shell = josh::shell::Shell {
cwd: path.to_path_buf(),
};
shell.command("git config http.receivepack true");
shell.command("git config uploadpack.allowsidebandall true");
shell.command("git config receive.advertisePushOptions true");
let ce = std::env::current_exe().expect("can't find path to exe");
shell.command("rm -Rf hooks");
shell.command("mkdir hooks");
std::os::unix::fs::symlink(ce.clone(), path.join("hooks").join("update"))
.expect("can't symlink update hook");
std::os::unix::fs::symlink(ce, path.join("hooks").join("pre-receive"))
.expect("can't symlink pre-receive hook");
shell.command(&format!(
"git config credential.helper '!f() {{ echo \"password=\"$GIT_PASSWORD\"\"; }}; f'"
));
shell.command(&"git config gc.auto 0");
if std::env::var_os("JOSH_KEEP_NS") == None {
std::fs::remove_dir_all(path.join("refs/namespaces")).ok();
}
tracing::info!("repo initialized");
return Ok(());
}
#[tracing::instrument(skip(credential_store))]
pub fn fetch_refs_from_url(
path: &std::path::Path,
upstream_repo: &str,
url: &str,
refs_prefixes: &[String],
username: &str,
password: &HashedPassword,
credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>,
) -> josh::JoshResult<bool> {
let specs: Vec<_> = refs_prefixes
.iter()
.map(|r| {
format!(
"'+{}:refs/josh/upstream/{}/{}'",
&r,
josh::to_ns(upstream_repo),
&r
)
})
.collect();
let shell = josh::shell::Shell {
cwd: path.to_owned(),
};
let nurl = if username != "" {
let splitted: Vec<&str> = url.splitn(2, "://").collect();
let proto = splitted[0];
let rest = splitted[1];
format!("{}://{}@{}", &proto, &username, &rest)
} else {
let splitted: Vec<&str> = url.splitn(2, "://").collect();
let proto = splitted[0];
let rest = splitted[1];
format!("{}://{}@{}", &proto, "annonymous", &rest)
};
let cmd = format!("git fetch --no-tags {} {}", &nurl, &specs.join(" "));
tracing::info!("fetch_refs_from_url {:?} {:?} {:?}", cmd, path, "");
let password = credential_store
.read()?
.get(&password)
.unwrap_or(&Password {
value: "".to_owned(),
})
.to_owned();
let (_stdout, stderr, _) =
shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]);
tracing::debug!(
"fetch_refs_from_url done {:?} {:?} {:?}",
cmd,
path,
stderr
);
if stderr.contains("fatal: Authentication failed") {
return Ok(false);
}
if stderr.contains("fatal:") {
return Err(josh::josh_error(&format!("git error: {:?}", stderr)));
}
if stderr.contains("error:") {
return Err(josh::josh_error(&format!("git error: {:?}", stderr)));
}
return Ok(true);
}
// Wrapper struct for storing passwords to avoid having
// them output to traces by accident
#[derive(Clone)]
pub struct Password {
pub value: String,
}
#[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct HashedPassword {
pub hash: String,
}
pub type CredentialStore = std::collections::HashMap<HashedPassword, Password>;
impl std::fmt::Debug for HashedPassword {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result | {
f.debug_struct("HashedPassword")
.field("value", &self.hash)
.finish()
} | identifier_body | |
nklab-bids-convert.py | derivatives directories will be deleted.')
parser.add_argument('--exceptionlist', action='store',
help='File containing list of exception dicoms to be manually identifed due to problems with the obtained datetime')
parser.add_argument('--noprompt',action='store_true', default=False,
help='Use this flag to bypass prompts. Necessary for running on the HPC')
parser.add_argument('--noanon', action='store_true', default=False,help='do not anonymize json files')
parser.add_argument('--version', action='version', version='nklab-bids-convert v{}'.format(__version__))
return parser
def logtext(logfile, textstr):
|
def main():
opts = get_parser().parse_args()
ROOTDIR=os.path.abspath(opts.dicomdir)
STAGEDIR=os.path.abspath(opts.stagedir)
OUTPUTDIR=os.path.abspath(opts.bidsdir)
if opts.workdir:
WORKDIR=os.path.abspath(opts.workdir)
else:
WORKDIR=os.getcwd()
BIDSROOTDIR= os.path.dirname(STAGEDIR)
BIDSKITROOTDIR = os.path.join(BIDSROOTDIR, 'derivatives', 'conversion')
BIDSKITWORKDIR = os.path.join(BIDSROOTDIR, 'work', 'conversion')
ProtocolTranslator=os.path.join(BIDSKITROOTDIR,"Protocol_Translator.json")
origProtocolTranslator=os.path.join(BIDSKITROOTDIR,"origProtocol_Translator.json")
if opts.bidskit:
BIDSKIT=os.path.abspath(opts.bidskit)
else:
BIDSKIT="/opt/bin/bidskit"
if opts.sessions:
SESSIONS=opts.sessions
CLUSTER=len(SESSIONS)
else:
CLUSTER=0
if opts.bidstranslator:
BIDSTRANSLATOR=os.path.abspath(opts.bidstranslator)
else:
BIDSTRANSLATOR=None
if opts.exceptionlist:
EXCEPTIONFILE=os.path.abspath(opts.exceptionlist)
else:
EXCEPTIONFILE=None
if opts.logname:
BASELOGNAME=opts.logname
else:
BASELOGNAME='nklab-bids-convert'
bCONVERTONLY=opts.convertonly
bBYPASS=opts.bypass
bNOPROMPT=opts.noprompt
bNOANON= opts.noanon
bINCREMENTAL= opts.incremental
bSTAGEONLY=opts.stageonly
# always run conversions only incrementally
if bCONVERTONLY:
bINCREMENTAL=True
TIMESTAMP=datetime.datetime.now().strftime("%m%d%y%H%M%S%p")
LOGFILENAME=BASELOGNAME + '_' + TIMESTAMP + '.log'
LOGFILE = open(os.path.join(WORKDIR,LOGFILENAME), 'w')
BackupProtocolTranslator=os.path.join(BIDSKITROOTDIR,"backup_Protocol_Translator_" + TIMESTAMP + ".json")
FINALERRORS=""
#add code to only search for directories
subjects = [f for f in glob.glob(ROOTDIR + '/*') if os.path.isdir(f)]
for subject in subjects:
SUBJECTDIROUT=os.path.join(STAGEDIR,os.path.basename(subject))
logtext(LOGFILE,"processing subject " + subject)
if CLUSTER > 0:
logtext(LOGFILE,"setting up BIDS hierarchy for " + str(CLUSTER) + " sessions > " + str(SESSIONS))
else:
logtext(LOGFILE,"setting up BIDS hierarchy without sessions")
if os.path.exists(SUBJECTDIROUT):
logtext(LOGFILE, "Subject " + subject + " already exists.")
if bBYPASS:
logtext(LOGFILE, "Processing will skip to next subject")
continue
else:
logtext(LOGFILE, "Existing files will be overwritten")
dicomdirs=[]
for root, dirs, files in os.walk(subject,topdown=False):
for name in dirs:
subdir=os.path.join(root,name)
#need a better method below for case insensitive and user supplied globbing
if len(glob.glob(subdir + '/*.dcm')) > 0 or len(glob.glob(subdir + '/*.IMA')) > 0:
dicomdirs.append(subdir)
if len(glob.glob(subdir + '/*.DCM')) > 0 or len(glob.glob(subdir + '/*.ima')) > 0:
dicomdirs.append(subdir)
acquisitions=[]
datetime_str=""
for dicomdir in dicomdirs:
dcmfiles=glob.glob(dicomdir+'/*.dcm')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.DCM')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.IMA')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.ima')
dcmfile=dcmfiles[0]
logtext(LOGFILE, "Processing DICOM : " + dcmfile)
ds = pydicom.dcmread(dcmfile)
try:
date = ds.AcquisitionDate
time = ds.AcquisitionTime
datetime_str=date + ' ' + time
except:
logtext(LOGFILE, "Date and/or time missing for DICOM : " + dcmfile)
FINALERRORS=FINALERRORS + "Date and/or time missing for DICOM : " + dcmfile + "\n"
logtext(LOGFILE, "Ensure that this DICOM is defined in exception.json and then run conversion again")
FINALERRORS=FINALERRORS + "Ensure that this DICOM is defined in exception.json and then run conversion again." + "\n\n"
if len(datetime_str) == 0:
datetime_str=datetime.datetime.now()
if "." in datetime_str:
datetime_obj=datetime.datetime.strptime(datetime_str, '%Y%m%d %H%M%S.%f')
else:
datetime_obj=datetime.datetime.strptime(datetime_str, '%Y%m%d %H%M%S')
datestamp=datetime.datetime.timestamp(datetime_obj)
acquisitions.append(datestamp)
if CLUSTER>1:
if not EXCEPTIONFILE == None:
exceptionFile=open(EXCEPTIONFILE)
exceptions=json.load(exceptionFile)
for key in exceptions:
twin=exceptions[key]
twin=twin[0]
exceptionIndex=None
correctIndex=None
for i,j in enumerate(dicomdirs):
if key in j:
exceptionIndex=i
if twin in j:
correctIndex=i
if exceptionIndex != None and correctIndex != None:
acquisitions[exceptionIndex]=acquisitions[correctIndex]
X=[[x,1] for x in acquisitions]
Y=np.array(X)
kmeans = KMeans(n_clusters=CLUSTER, random_state=0).fit(Y)
labels = kmeans.predict(Y)
timelabels=[]
for index,value in enumerate(labels):
timelabels.append([acquisitions[index],value])
timelabels.sort()
sortedlabels=[x[1] for x in timelabels]
mapping=[]
for i,v in enumerate(sortedlabels):
if i==0:
newvalue=v
newindex=0
mapping.append([v, newindex])
else:
if v != newvalue:
newvalue=v
newindex=newindex+1
mapping.append([v, newindex])
sessionlist=[]
outputlist=[]
for session in SESSIONS:
sessionlist.append([])
dirpath=os.path.join(STAGEDIR,os.path.basename(subject))
dirpath=os.path.join(dirpath, session)
outputlist.append(dirpath)
for index, label in enumerate(labels):
for map in mapping:
if map[0]==label:
newindex=map[1]
sessionlist[newindex].append(dicomdirs[index])
elif CLUSTER == 1:
newlist=dicomdirs.copy()
sessionlist=[newlist]
dirpath=os.path.join(STAGEDIR,os.path.basename(subject))
dirpath=os.path.join(dirpath, SESSIONS[0])
outputlist=[dirpath]
else:
newlist=dicomdirs.copy()
sessionlist=[newlist]
outputlist=[os.path.join(STAGEDIR,os.path.basename(subject))]
for index, output in enumerate(outputlist):
for subdir in sessionlist[index]:
outpath=os.path.join(output,os.path.basename(subdir))
logtext(LOGFILE,"copying "+subdir + " to " + outpath)
copy_tree(subdir,outpath)
logtext(LOGFILE,"Copying of dicoms for all subjects completed.")
if bSTAGEONLY:
logtext(LOGFILE,"Please run BIDSKIT manually later or run this program without --stageonly.")
sys.exit()
if CLUSTER < 2:
NOSESSIONS="--no-sessions"
else:
NOSESSIONS=""
if bNOANON:
NOANON="--noanon"
else:
NOANON=""
#adding some "intelligence" - if incremental provided but clearly BIDSKIT hasn't been run then make non-incremental
if bINCREMENTAL:
logtext(LOGFILE,"Running BIDSConvert incrementally.")
| stamp=datetime.datetime.now().strftime("%m-%d-%y %H:%M:%S%p")
textstring=stamp + ' ' + textstr
print(textstring)
logfile.write(textstring+'\n') | identifier_body |
nklab-bids-convert.py | derivatives directories will be deleted.')
parser.add_argument('--exceptionlist', action='store',
help='File containing list of exception dicoms to be manually identifed due to problems with the obtained datetime')
parser.add_argument('--noprompt',action='store_true', default=False,
help='Use this flag to bypass prompts. Necessary for running on the HPC')
parser.add_argument('--noanon', action='store_true', default=False,help='do not anonymize json files')
parser.add_argument('--version', action='version', version='nklab-bids-convert v{}'.format(__version__))
return parser
def | (logfile, textstr):
stamp=datetime.datetime.now().strftime("%m-%d-%y %H:%M:%S%p")
textstring=stamp + ' ' + textstr
print(textstring)
logfile.write(textstring+'\n')
def main():
opts = get_parser().parse_args()
ROOTDIR=os.path.abspath(opts.dicomdir)
STAGEDIR=os.path.abspath(opts.stagedir)
OUTPUTDIR=os.path.abspath(opts.bidsdir)
if opts.workdir:
WORKDIR=os.path.abspath(opts.workdir)
else:
WORKDIR=os.getcwd()
BIDSROOTDIR= os.path.dirname(STAGEDIR)
BIDSKITROOTDIR = os.path.join(BIDSROOTDIR, 'derivatives', 'conversion')
BIDSKITWORKDIR = os.path.join(BIDSROOTDIR, 'work', 'conversion')
ProtocolTranslator=os.path.join(BIDSKITROOTDIR,"Protocol_Translator.json")
origProtocolTranslator=os.path.join(BIDSKITROOTDIR,"origProtocol_Translator.json")
if opts.bidskit:
BIDSKIT=os.path.abspath(opts.bidskit)
else:
BIDSKIT="/opt/bin/bidskit"
if opts.sessions:
SESSIONS=opts.sessions
CLUSTER=len(SESSIONS)
else:
CLUSTER=0
if opts.bidstranslator:
BIDSTRANSLATOR=os.path.abspath(opts.bidstranslator)
else:
BIDSTRANSLATOR=None
if opts.exceptionlist:
EXCEPTIONFILE=os.path.abspath(opts.exceptionlist)
else:
EXCEPTIONFILE=None
if opts.logname:
BASELOGNAME=opts.logname
else:
BASELOGNAME='nklab-bids-convert'
bCONVERTONLY=opts.convertonly
bBYPASS=opts.bypass
bNOPROMPT=opts.noprompt
bNOANON= opts.noanon
bINCREMENTAL= opts.incremental
bSTAGEONLY=opts.stageonly
# always run conversions only incrementally
if bCONVERTONLY:
bINCREMENTAL=True
TIMESTAMP=datetime.datetime.now().strftime("%m%d%y%H%M%S%p")
LOGFILENAME=BASELOGNAME + '_' + TIMESTAMP + '.log'
LOGFILE = open(os.path.join(WORKDIR,LOGFILENAME), 'w')
BackupProtocolTranslator=os.path.join(BIDSKITROOTDIR,"backup_Protocol_Translator_" + TIMESTAMP + ".json")
FINALERRORS=""
#add code to only search for directories
subjects = [f for f in glob.glob(ROOTDIR + '/*') if os.path.isdir(f)]
for subject in subjects:
SUBJECTDIROUT=os.path.join(STAGEDIR,os.path.basename(subject))
logtext(LOGFILE,"processing subject " + subject)
if CLUSTER > 0:
logtext(LOGFILE,"setting up BIDS hierarchy for " + str(CLUSTER) + " sessions > " + str(SESSIONS))
else:
logtext(LOGFILE,"setting up BIDS hierarchy without sessions")
if os.path.exists(SUBJECTDIROUT):
logtext(LOGFILE, "Subject " + subject + " already exists.")
if bBYPASS:
logtext(LOGFILE, "Processing will skip to next subject")
continue
else:
logtext(LOGFILE, "Existing files will be overwritten")
dicomdirs=[]
for root, dirs, files in os.walk(subject,topdown=False):
for name in dirs:
subdir=os.path.join(root,name)
#need a better method below for case insensitive and user supplied globbing
if len(glob.glob(subdir + '/*.dcm')) > 0 or len(glob.glob(subdir + '/*.IMA')) > 0:
dicomdirs.append(subdir)
if len(glob.glob(subdir + '/*.DCM')) > 0 or len(glob.glob(subdir + '/*.ima')) > 0:
dicomdirs.append(subdir)
acquisitions=[]
datetime_str=""
for dicomdir in dicomdirs:
dcmfiles=glob.glob(dicomdir+'/*.dcm')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.DCM')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.IMA')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.ima')
dcmfile=dcmfiles[0]
logtext(LOGFILE, "Processing DICOM : " + dcmfile)
ds = pydicom.dcmread(dcmfile)
try:
date = ds.AcquisitionDate
time = ds.AcquisitionTime
datetime_str=date + ' ' + time
except:
logtext(LOGFILE, "Date and/or time missing for DICOM : " + dcmfile)
FINALERRORS=FINALERRORS + "Date and/or time missing for DICOM : " + dcmfile + "\n"
logtext(LOGFILE, "Ensure that this DICOM is defined in exception.json and then run conversion again")
FINALERRORS=FINALERRORS + "Ensure that this DICOM is defined in exception.json and then run conversion again." + "\n\n"
if len(datetime_str) == 0:
datetime_str=datetime.datetime.now()
if "." in datetime_str:
datetime_obj=datetime.datetime.strptime(datetime_str, '%Y%m%d %H%M%S.%f')
else:
datetime_obj=datetime.datetime.strptime(datetime_str, '%Y%m%d %H%M%S')
datestamp=datetime.datetime.timestamp(datetime_obj)
acquisitions.append(datestamp)
if CLUSTER>1:
if not EXCEPTIONFILE == None:
exceptionFile=open(EXCEPTIONFILE)
exceptions=json.load(exceptionFile)
for key in exceptions:
twin=exceptions[key]
twin=twin[0]
exceptionIndex=None
correctIndex=None
for i,j in enumerate(dicomdirs):
if key in j:
exceptionIndex=i
if twin in j:
correctIndex=i
if exceptionIndex != None and correctIndex != None:
acquisitions[exceptionIndex]=acquisitions[correctIndex]
X=[[x,1] for x in acquisitions]
Y=np.array(X)
kmeans = KMeans(n_clusters=CLUSTER, random_state=0).fit(Y)
labels = kmeans.predict(Y)
timelabels=[]
for index,value in enumerate(labels):
timelabels.append([acquisitions[index],value])
timelabels.sort()
sortedlabels=[x[1] for x in timelabels]
mapping=[]
for i,v in enumerate(sortedlabels):
if i==0:
newvalue=v
newindex=0
mapping.append([v, newindex])
else:
if v != newvalue:
newvalue=v
newindex=newindex+1
mapping.append([v, newindex])
sessionlist=[]
outputlist=[]
for session in SESSIONS:
sessionlist.append([])
dirpath=os.path.join(STAGEDIR,os.path.basename(subject))
dirpath=os.path.join(dirpath, session)
outputlist.append(dirpath)
for index, label in enumerate(labels):
for map in mapping:
if map[0]==label:
newindex=map[1]
sessionlist[newindex].append(dicomdirs[index])
elif CLUSTER == 1:
newlist=dicomdirs.copy()
sessionlist=[newlist]
dirpath=os.path.join(STAGEDIR,os.path.basename(subject))
dirpath=os.path.join(dirpath, SESSIONS[0])
outputlist=[dirpath]
else:
newlist=dicomdirs.copy()
sessionlist=[newlist]
outputlist=[os.path.join(STAGEDIR,os.path.basename(subject))]
for index, output in enumerate(outputlist):
for subdir in sessionlist[index]:
outpath=os.path.join(output,os.path.basename(subdir))
logtext(LOGFILE,"copying "+subdir + " to " + outpath)
copy_tree(subdir,outpath)
logtext(LOGFILE,"Copying of dicoms for all subjects completed.")
if bSTAGEONLY:
logtext(LOGFILE,"Please run BIDSKIT manually later or run this program without --stageonly.")
sys.exit()
if CLUSTER < 2:
NOSESSIONS="--no-sessions"
else:
NOSESSIONS=""
if bNOANON:
NOANON="--noanon"
else:
NOANON=""
#adding some "intelligence" - if incremental provided but clearly BIDSKIT hasn't been run then make non-incremental
if bINCREMENTAL:
logtext(LOGFILE,"Running BIDSConvert incrementally.")
| logtext | identifier_name |
nklab-bids-convert.py | derivatives directories will be deleted.')
parser.add_argument('--exceptionlist', action='store',
help='File containing list of exception dicoms to be manually identifed due to problems with the obtained datetime')
parser.add_argument('--noprompt',action='store_true', default=False,
help='Use this flag to bypass prompts. Necessary for running on the HPC')
parser.add_argument('--noanon', action='store_true', default=False,help='do not anonymize json files')
parser.add_argument('--version', action='version', version='nklab-bids-convert v{}'.format(__version__))
return parser
def logtext(logfile, textstr):
stamp=datetime.datetime.now().strftime("%m-%d-%y %H:%M:%S%p")
textstring=stamp + ' ' + textstr
print(textstring)
logfile.write(textstring+'\n')
def main():
opts = get_parser().parse_args()
ROOTDIR=os.path.abspath(opts.dicomdir)
STAGEDIR=os.path.abspath(opts.stagedir)
OUTPUTDIR=os.path.abspath(opts.bidsdir)
if opts.workdir:
WORKDIR=os.path.abspath(opts.workdir)
else:
WORKDIR=os.getcwd()
BIDSROOTDIR= os.path.dirname(STAGEDIR)
BIDSKITROOTDIR = os.path.join(BIDSROOTDIR, 'derivatives', 'conversion')
BIDSKITWORKDIR = os.path.join(BIDSROOTDIR, 'work', 'conversion')
ProtocolTranslator=os.path.join(BIDSKITROOTDIR,"Protocol_Translator.json")
origProtocolTranslator=os.path.join(BIDSKITROOTDIR,"origProtocol_Translator.json")
if opts.bidskit:
BIDSKIT=os.path.abspath(opts.bidskit)
else:
BIDSKIT="/opt/bin/bidskit"
if opts.sessions:
SESSIONS=opts.sessions
CLUSTER=len(SESSIONS)
else: | if opts.bidstranslator:
BIDSTRANSLATOR=os.path.abspath(opts.bidstranslator)
else:
BIDSTRANSLATOR=None
if opts.exceptionlist:
EXCEPTIONFILE=os.path.abspath(opts.exceptionlist)
else:
EXCEPTIONFILE=None
if opts.logname:
BASELOGNAME=opts.logname
else:
BASELOGNAME='nklab-bids-convert'
bCONVERTONLY=opts.convertonly
bBYPASS=opts.bypass
bNOPROMPT=opts.noprompt
bNOANON= opts.noanon
bINCREMENTAL= opts.incremental
bSTAGEONLY=opts.stageonly
# always run conversions only incrementally
if bCONVERTONLY:
bINCREMENTAL=True
TIMESTAMP=datetime.datetime.now().strftime("%m%d%y%H%M%S%p")
LOGFILENAME=BASELOGNAME + '_' + TIMESTAMP + '.log'
LOGFILE = open(os.path.join(WORKDIR,LOGFILENAME), 'w')
BackupProtocolTranslator=os.path.join(BIDSKITROOTDIR,"backup_Protocol_Translator_" + TIMESTAMP + ".json")
FINALERRORS=""
#add code to only search for directories
subjects = [f for f in glob.glob(ROOTDIR + '/*') if os.path.isdir(f)]
for subject in subjects:
SUBJECTDIROUT=os.path.join(STAGEDIR,os.path.basename(subject))
logtext(LOGFILE,"processing subject " + subject)
if CLUSTER > 0:
logtext(LOGFILE,"setting up BIDS hierarchy for " + str(CLUSTER) + " sessions > " + str(SESSIONS))
else:
logtext(LOGFILE,"setting up BIDS hierarchy without sessions")
if os.path.exists(SUBJECTDIROUT):
logtext(LOGFILE, "Subject " + subject + " already exists.")
if bBYPASS:
logtext(LOGFILE, "Processing will skip to next subject")
continue
else:
logtext(LOGFILE, "Existing files will be overwritten")
dicomdirs=[]
for root, dirs, files in os.walk(subject,topdown=False):
for name in dirs:
subdir=os.path.join(root,name)
#need a better method below for case insensitive and user supplied globbing
if len(glob.glob(subdir + '/*.dcm')) > 0 or len(glob.glob(subdir + '/*.IMA')) > 0:
dicomdirs.append(subdir)
if len(glob.glob(subdir + '/*.DCM')) > 0 or len(glob.glob(subdir + '/*.ima')) > 0:
dicomdirs.append(subdir)
acquisitions=[]
datetime_str=""
for dicomdir in dicomdirs:
dcmfiles=glob.glob(dicomdir+'/*.dcm')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.DCM')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.IMA')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.ima')
dcmfile=dcmfiles[0]
logtext(LOGFILE, "Processing DICOM : " + dcmfile)
ds = pydicom.dcmread(dcmfile)
try:
date = ds.AcquisitionDate
time = ds.AcquisitionTime
datetime_str=date + ' ' + time
except:
logtext(LOGFILE, "Date and/or time missing for DICOM : " + dcmfile)
FINALERRORS=FINALERRORS + "Date and/or time missing for DICOM : " + dcmfile + "\n"
logtext(LOGFILE, "Ensure that this DICOM is defined in exception.json and then run conversion again")
FINALERRORS=FINALERRORS + "Ensure that this DICOM is defined in exception.json and then run conversion again." + "\n\n"
if len(datetime_str) == 0:
datetime_str=datetime.datetime.now()
if "." in datetime_str:
datetime_obj=datetime.datetime.strptime(datetime_str, '%Y%m%d %H%M%S.%f')
else:
datetime_obj=datetime.datetime.strptime(datetime_str, '%Y%m%d %H%M%S')
datestamp=datetime.datetime.timestamp(datetime_obj)
acquisitions.append(datestamp)
if CLUSTER>1:
if not EXCEPTIONFILE == None:
exceptionFile=open(EXCEPTIONFILE)
exceptions=json.load(exceptionFile)
for key in exceptions:
twin=exceptions[key]
twin=twin[0]
exceptionIndex=None
correctIndex=None
for i,j in enumerate(dicomdirs):
if key in j:
exceptionIndex=i
if twin in j:
correctIndex=i
if exceptionIndex != None and correctIndex != None:
acquisitions[exceptionIndex]=acquisitions[correctIndex]
X=[[x,1] for x in acquisitions]
Y=np.array(X)
kmeans = KMeans(n_clusters=CLUSTER, random_state=0).fit(Y)
labels = kmeans.predict(Y)
timelabels=[]
for index,value in enumerate(labels):
timelabels.append([acquisitions[index],value])
timelabels.sort()
sortedlabels=[x[1] for x in timelabels]
mapping=[]
for i,v in enumerate(sortedlabels):
if i==0:
newvalue=v
newindex=0
mapping.append([v, newindex])
else:
if v != newvalue:
newvalue=v
newindex=newindex+1
mapping.append([v, newindex])
sessionlist=[]
outputlist=[]
for session in SESSIONS:
sessionlist.append([])
dirpath=os.path.join(STAGEDIR,os.path.basename(subject))
dirpath=os.path.join(dirpath, session)
outputlist.append(dirpath)
for index, label in enumerate(labels):
for map in mapping:
if map[0]==label:
newindex=map[1]
sessionlist[newindex].append(dicomdirs[index])
elif CLUSTER == 1:
newlist=dicomdirs.copy()
sessionlist=[newlist]
dirpath=os.path.join(STAGEDIR,os.path.basename(subject))
dirpath=os.path.join(dirpath, SESSIONS[0])
outputlist=[dirpath]
else:
newlist=dicomdirs.copy()
sessionlist=[newlist]
outputlist=[os.path.join(STAGEDIR,os.path.basename(subject))]
for index, output in enumerate(outputlist):
for subdir in sessionlist[index]:
outpath=os.path.join(output,os.path.basename(subdir))
logtext(LOGFILE,"copying "+subdir + " to " + outpath)
copy_tree(subdir,outpath)
logtext(LOGFILE,"Copying of dicoms for all subjects completed.")
if bSTAGEONLY:
logtext(LOGFILE,"Please run BIDSKIT manually later or run this program without --stageonly.")
sys.exit()
if CLUSTER < 2:
NOSESSIONS="--no-sessions"
else:
NOSESSIONS=""
if bNOANON:
NOANON="--noanon"
else:
NOANON=""
#adding some "intelligence" - if incremental provided but clearly BIDSKIT hasn't been run then make non-incremental
if bINCREMENTAL:
logtext(LOGFILE,"Running BIDSConvert incrementally.")
SEL | CLUSTER=0
| random_line_split |
nklab-bids-convert.py | opts.convertonly
bBYPASS=opts.bypass
bNOPROMPT=opts.noprompt
bNOANON= opts.noanon
bINCREMENTAL= opts.incremental
bSTAGEONLY=opts.stageonly
# always run conversions only incrementally
if bCONVERTONLY:
bINCREMENTAL=True
TIMESTAMP=datetime.datetime.now().strftime("%m%d%y%H%M%S%p")
LOGFILENAME=BASELOGNAME + '_' + TIMESTAMP + '.log'
LOGFILE = open(os.path.join(WORKDIR,LOGFILENAME), 'w')
BackupProtocolTranslator=os.path.join(BIDSKITROOTDIR,"backup_Protocol_Translator_" + TIMESTAMP + ".json")
FINALERRORS=""
#add code to only search for directories
subjects = [f for f in glob.glob(ROOTDIR + '/*') if os.path.isdir(f)]
for subject in subjects:
SUBJECTDIROUT=os.path.join(STAGEDIR,os.path.basename(subject))
logtext(LOGFILE,"processing subject " + subject)
if CLUSTER > 0:
logtext(LOGFILE,"setting up BIDS hierarchy for " + str(CLUSTER) + " sessions > " + str(SESSIONS))
else:
logtext(LOGFILE,"setting up BIDS hierarchy without sessions")
if os.path.exists(SUBJECTDIROUT):
logtext(LOGFILE, "Subject " + subject + " already exists.")
if bBYPASS:
logtext(LOGFILE, "Processing will skip to next subject")
continue
else:
logtext(LOGFILE, "Existing files will be overwritten")
dicomdirs=[]
for root, dirs, files in os.walk(subject,topdown=False):
for name in dirs:
subdir=os.path.join(root,name)
#need a better method below for case insensitive and user supplied globbing
if len(glob.glob(subdir + '/*.dcm')) > 0 or len(glob.glob(subdir + '/*.IMA')) > 0:
dicomdirs.append(subdir)
if len(glob.glob(subdir + '/*.DCM')) > 0 or len(glob.glob(subdir + '/*.ima')) > 0:
dicomdirs.append(subdir)
acquisitions=[]
datetime_str=""
for dicomdir in dicomdirs:
dcmfiles=glob.glob(dicomdir+'/*.dcm')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.DCM')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.IMA')
if len(dcmfiles) == 0:
dcmfiles=glob.glob(dicomdir+'/*.ima')
dcmfile=dcmfiles[0]
logtext(LOGFILE, "Processing DICOM : " + dcmfile)
ds = pydicom.dcmread(dcmfile)
try:
date = ds.AcquisitionDate
time = ds.AcquisitionTime
datetime_str=date + ' ' + time
except:
logtext(LOGFILE, "Date and/or time missing for DICOM : " + dcmfile)
FINALERRORS=FINALERRORS + "Date and/or time missing for DICOM : " + dcmfile + "\n"
logtext(LOGFILE, "Ensure that this DICOM is defined in exception.json and then run conversion again")
FINALERRORS=FINALERRORS + "Ensure that this DICOM is defined in exception.json and then run conversion again." + "\n\n"
if len(datetime_str) == 0:
datetime_str=datetime.datetime.now()
if "." in datetime_str:
datetime_obj=datetime.datetime.strptime(datetime_str, '%Y%m%d %H%M%S.%f')
else:
datetime_obj=datetime.datetime.strptime(datetime_str, '%Y%m%d %H%M%S')
datestamp=datetime.datetime.timestamp(datetime_obj)
acquisitions.append(datestamp)
if CLUSTER>1:
if not EXCEPTIONFILE == None:
exceptionFile=open(EXCEPTIONFILE)
exceptions=json.load(exceptionFile)
for key in exceptions:
twin=exceptions[key]
twin=twin[0]
exceptionIndex=None
correctIndex=None
for i,j in enumerate(dicomdirs):
if key in j:
exceptionIndex=i
if twin in j:
correctIndex=i
if exceptionIndex != None and correctIndex != None:
acquisitions[exceptionIndex]=acquisitions[correctIndex]
X=[[x,1] for x in acquisitions]
Y=np.array(X)
kmeans = KMeans(n_clusters=CLUSTER, random_state=0).fit(Y)
labels = kmeans.predict(Y)
timelabels=[]
for index,value in enumerate(labels):
timelabels.append([acquisitions[index],value])
timelabels.sort()
sortedlabels=[x[1] for x in timelabels]
mapping=[]
for i,v in enumerate(sortedlabels):
if i==0:
newvalue=v
newindex=0
mapping.append([v, newindex])
else:
if v != newvalue:
newvalue=v
newindex=newindex+1
mapping.append([v, newindex])
sessionlist=[]
outputlist=[]
for session in SESSIONS:
sessionlist.append([])
dirpath=os.path.join(STAGEDIR,os.path.basename(subject))
dirpath=os.path.join(dirpath, session)
outputlist.append(dirpath)
for index, label in enumerate(labels):
for map in mapping:
if map[0]==label:
newindex=map[1]
sessionlist[newindex].append(dicomdirs[index])
elif CLUSTER == 1:
newlist=dicomdirs.copy()
sessionlist=[newlist]
dirpath=os.path.join(STAGEDIR,os.path.basename(subject))
dirpath=os.path.join(dirpath, SESSIONS[0])
outputlist=[dirpath]
else:
newlist=dicomdirs.copy()
sessionlist=[newlist]
outputlist=[os.path.join(STAGEDIR,os.path.basename(subject))]
for index, output in enumerate(outputlist):
for subdir in sessionlist[index]:
outpath=os.path.join(output,os.path.basename(subdir))
logtext(LOGFILE,"copying "+subdir + " to " + outpath)
copy_tree(subdir,outpath)
logtext(LOGFILE,"Copying of dicoms for all subjects completed.")
if bSTAGEONLY:
logtext(LOGFILE,"Please run BIDSKIT manually later or run this program without --stageonly.")
sys.exit()
if CLUSTER < 2:
NOSESSIONS="--no-sessions"
else:
NOSESSIONS=""
if bNOANON:
NOANON="--noanon"
else:
NOANON=""
#adding some "intelligence" - if incremental provided but clearly BIDSKIT hasn't been run then make non-incremental
if bINCREMENTAL:
logtext(LOGFILE,"Running BIDSConvert incrementally.")
SELCONVERT="--selective_convert "
if not os.path.exists(BIDSKITWORKDIR):
logtext(LOGFILE,"BIDSKIT doesnt seem to have been run before - disabling incremental run")
SELCONVERT=""
bINCREMENTAL=False
else:
logtext(LOGFILE,"Running BIDSConvert in a non-incremental fashion. All previous files in work and derivatives directories will be deleted.")
SELCONVERT=""
if os.path.exists(ProtocolTranslator):
copyfile(ProtocolTranslator,BackupProtocolTranslator)
logtext(LOGFILE,"protocol translator backed up as " + BackupProtocolTranslator)
elif os.path.exists(origProtocolTranslator):
copyfile(origProtocolTranslator,BackupProtocolTranslator)
logtext(LOGFILE,"original protocol translator backed up as " + BackupProtocolTranslator)
bSTOPCODE=False
if not os.path.exists(ProtocolTranslator) and BIDSTRANSLATOR == None:
logtext(LOGFILE,"Cannot find an existing Protocol_translator.json - code will terminate after first pass. Please rerun after Protocol Translator has been provided" )
bSTOPCODE=True
if not bCONVERTONLY:
if not bINCREMENTAL:
if os.path.exists(BIDSKITROOTDIR):
logtext(LOGFILE,"Deleting the folder " + BIDSKITROOTDIR )
rmtree(BIDSKITROOTDIR)
if os.path.exists(BIDSKITWORKDIR):
logtext(LOGFILE,"Deleting the folder " + BIDSKITWORKDIR)
rmtree(BIDSKITWORKDIR)
logtext(LOGFILE, "Running Bidskit for the first time.")
#Now run bidskit for the first time
callstring=("python3 -u {bidskit}/dcm2bids.py -i {dicom} -o {source} {nosessions} --overwrite {selconv} {noanon}").format(bidskit=BIDSKIT, dicom=STAGEDIR,source=OUTPUTDIR, nosessions=NOSESSIONS,noanon=NOANON,selconv=SELCONVERT)
call(callstring, shell=True)
logtext(LOGFILE, "protocol translator file created - please edit with new values")
if not os.path.exists(origProtocolTranslator):
| copyfile(ProtocolTranslator,origProtocolTranslator)
logtext(LOGFILE,"protocol translator backed up as " + origProtocolTranslator) | conditional_block | |
build.rs | use std::io::prelude::*;
use std::io;
use std::path::Path;
pub fn all(path: &Path, mcus: &[Mcu]) -> Result<(), io::Error> {
fs::create_dir_all(path)?;
let mut module_names = Vec::new();
// Create modules for each mcu.
for mcu in mcus.iter() {
let module_name = self::mcu_module_name(mcu);
let module_path = path.join(format!("{}.rs", module_name));
eprintln!("generating module for {}", mcu.device.name);
generate_mcu_module(mcu, &module_path)?;
module_names.push(module_name);
}
generate_entry_module(path, &module_names)
}
/// Generate a `mod.rs` file that binds a list of submodules.
fn generate_entry_module(output_path: &Path, module_names: &[String]) -> Result<(), io::Error> {
let mut mod_rs = File::create(output_path.join("mod.rs"))?;
writeln!(mod_rs, "// Device definitions")?;
for module_name in module_names {
writeln!(mod_rs, "pub mod {};", module_name)?;
}
writeln!(mod_rs)?;
const CURRENT_MOD_SUMMARY: &'static str = "Contains definitions for the current AVR device";
writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?;
writeln!(mod_rs, "///")?;
writeln!(mod_rs, "/// **NOTE**: We are showing the ATmega328 here, even though the library")?;
writeln!(mod_rs, "/// is not targeting a real AVR device. If you compile this library for")?;
writeln!(mod_rs, "/// a specific AVR MCU, the module for that device will aliased here.")?;
writeln!(mod_rs, "// If we are targeting a non-AVR device, just pick the ATmega328p so")?;
writeln!(mod_rs, "// that users can see what the API would look like")?;
writeln!(mod_rs, "//")?;
writeln!(mod_rs, "// Note that we reexport rather than alias so that we can add a note about")?;
writeln!(mod_rs, "// this behaviour to the documentation.")?;
writeln!(mod_rs, "#[cfg(not(target_arch = \"avr\"))]")?;
writeln!(mod_rs, "pub mod current {{ pub use super::atmega328::*; }}")?;
writeln!(mod_rs)?;
writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?;
writeln!(mod_rs, "// If we are targeting AVR, lookup the current device's module")?;
writeln!(mod_rs, "// and alias it to the `current` module.")?;
writeln!(mod_rs, "#[cfg(target_arch = \"avr\")]")?;
writeln!(mod_rs, "pub mod current {{")?;
writeln!(mod_rs, " // NOTE: 'target_cpu' is a cfg flag specific to the avr-rust fork")?;
for module_name in module_names {
writeln!(mod_rs, " #[cfg(target_cpu = \"{}\")] pub use super::{} as current;",
module_name, module_name)?;
}
writeln!(mod_rs, "}}")?;
Ok(())
}
/// Generates a self-contained module for each individual mcu.
fn generate_mcu_module(mcu: &Mcu, path: &Path) -> Result<(), io::Error> {
let mut file = File::create(path)?;
self::mcu_module_doc(mcu, &mut file)?;
writeln!(file)?;
self::mcu_module_code(mcu, &mut file)?;
Ok(())
}
/// Gets the module name for a mcu.
fn mcu_module_name(mcu: &Mcu) -> String {
mcu.device.name.to_lowercase()
}
pub fn mcu_module_doc(mcu: &Mcu, w: &mut Write)
-> Result<(), io::Error> {
writeln!(w, "//! The AVR {} microcontroller", mcu.device.name)?;
writeln!(w, "//!")?;
writeln!(w, "//! # Variants")?;
writeln!(w, "//! | | Pinout | Mcuage | Operating temperature | Operating voltage | Max speed |")?;
writeln!(w, "//! |--------|--------|---------|-----------------------|-------------------|-----------|")?;
for variant in mcu.variants.iter() {
let pinout_label = variant.pinout.as_ref().map(|p| p.replace('_', "-").to_owned()).unwrap_or_else(|| String::new());
let speed_mhz = variant.speed_max_hz / 1_000_000;
writeln!(w, "//! | {} | {} | {} | {}°C - {}°C | {}V - {}V | {} MHz |",
variant.name, pinout_label,
variant.package, variant.temperature_min,
variant.temperature_max, variant.voltage_min, variant.voltage_max,
speed_mhz)?;
}
writeln!(w, "//!")?;
Ok(())
}
pub fn mcu_module_code(mcu: &Mcu, w: &mut Write)
-> Result<(), io::Error> {
let registers = ordered_registers(mcu);
let register_bitfields = documentable_bitfields(®isters);
writeln!(w, "#![allow(non_upper_case_globals)]")?;
writeln!(w)?;
for register in registers.iter() {
let ty = integer_type(register.size);
if !register.caption.is_empty() {
let mut caption = register.caption.trim().to_owned();
if !caption.ends_with('.') { caption.push('.') }
writeln!(w, "/// {}", caption)?;
} else {
writeln!(w, "/// {} register", register.name)?;
}
let mut bitfields = register_bitfields.iter().filter_map(|&(reg,bitfield)| {
if reg == register { Some(bitfield) } else { None }
}).peekable();
if bitfields.peek().is_some() {
writeln!(w, "///")?;
writeln!(w, "/// Bitfields:")?;
writeln!(w, "///")?;
writeln!(w, "/// | Name | Mask (binary) |")?;
writeln!(w, "/// | ---- | ------------- |")?;
while let Some(bitfield) = bitfields.next() {
writeln!(w, "/// | {} | {:b} |", bitfield.name, bitfield.mask)?;
}
}
writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};",
register.name, ty, register.offset, ty)?;
writeln!(w)?;
}
for (register, bitfield) in register_bitfields {
let ty = integer_type(bitfield.size);
writeln!(w, "/// Bitfield on register {}", register.name)?;
writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};",
bitfield.name, ty, bitfield.mask, ty)?;
writeln!(w)?;
}
Ok(())
}
fn ordered_registers(mcu: &Mcu) -> Vec<Register> {
let mut unique_registers = self::unique_registers(mcu);
insert_high_low_variants(&mut unique_registers);
let mut registers: Vec<Register> = unique_registers.into_iter().map(|a| a.1).collect();
registers.sort_by_key(|r| r.offset);
registers
}
fn insert_high_low_variants(registers: &mut HashMap<String, Register>) {
let wide_registers: Vec<_> = registers.values()
.filter(|r| r.size == 2)
.cloned()
.collect();
for r in wide_registers {
let (high, low) = high_low_variants(&r);
if !registers.contains_key(&high.name) {
registers.insert(high.name.clone(), high);
}
if !registers.contains_key(&low.name) {
registers.insert(low.name.clone(), low);
}
}
}
fn high_low_variants(r: &Register) -> (Register, Register) {
assert_eq!(2, r.size, "only 16-bit registers have high low variants");
(
Register { name: r.name.clone() + "H",
caption: r.caption.clone() + " high byte",
offset: r.offset + 1,
size: r.size / 2,
mask: None,
bitfields: Vec::new(), // these are already in parent.
rw: r.rw.clone() },
Register { name: r.name.clone() + "L",
caption: r.caption.clone() + " low byte",
offset: r.offset + 0,
size: r.size / 2,
mask: None,
bitfields: Vec::new(), // these are already in parent.
rw: r.rw.clone() },
)
}
fn unique_registers(mcu: &Mcu) -> HashMap<String, Register> {
let mut result = HashMap::new();
for module in mcu.modules.iter() {
for register_group in module.register_groups.iter() {
for register in register_group.registers.iter() {
// Check if we've already seen this register.
// Remove it if so and combine it with the current Register.
let r: Register = if let Some(ref existing) = result.remove(®ister.name) {
register.union(existing)
} else {
|
result.insert(r.name.clone(), r);
}
}
}
result
}
| register.clone()
};
| conditional_block |
build.rs | use std::io::prelude::*;
use std::io;
use std::path::Path;
pub fn all(path: &Path, mcus: &[Mcu]) -> Result<(), io::Error> {
fs::create_dir_all(path)?;
let mut module_names = Vec::new();
// Create modules for each mcu.
for mcu in mcus.iter() {
let module_name = self::mcu_module_name(mcu);
let module_path = path.join(format!("{}.rs", module_name));
eprintln!("generating module for {}", mcu.device.name);
generate_mcu_module(mcu, &module_path)?;
module_names.push(module_name);
}
generate_entry_module(path, &module_names)
}
/// Generate a `mod.rs` file that binds a list of submodules.
fn generate_entry_module(output_path: &Path, module_names: &[String]) -> Result<(), io::Error> {
let mut mod_rs = File::create(output_path.join("mod.rs"))?;
writeln!(mod_rs, "// Device definitions")?;
for module_name in module_names {
writeln!(mod_rs, "pub mod {};", module_name)?;
}
writeln!(mod_rs)?;
const CURRENT_MOD_SUMMARY: &'static str = "Contains definitions for the current AVR device";
writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?;
writeln!(mod_rs, "///")?;
writeln!(mod_rs, "/// **NOTE**: We are showing the ATmega328 here, even though the library")?;
writeln!(mod_rs, "/// is not targeting a real AVR device. If you compile this library for")?;
writeln!(mod_rs, "/// a specific AVR MCU, the module for that device will aliased here.")?;
writeln!(mod_rs, "// If we are targeting a non-AVR device, just pick the ATmega328p so")?;
writeln!(mod_rs, "// that users can see what the API would look like")?;
writeln!(mod_rs, "//")?;
writeln!(mod_rs, "// Note that we reexport rather than alias so that we can add a note about")?;
writeln!(mod_rs, "// this behaviour to the documentation.")?;
writeln!(mod_rs, "#[cfg(not(target_arch = \"avr\"))]")?;
writeln!(mod_rs, "pub mod current {{ pub use super::atmega328::*; }}")?;
writeln!(mod_rs)?;
writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?;
writeln!(mod_rs, "// If we are targeting AVR, lookup the current device's module")?;
writeln!(mod_rs, "// and alias it to the `current` module.")?;
writeln!(mod_rs, "#[cfg(target_arch = \"avr\")]")?;
writeln!(mod_rs, "pub mod current {{")?;
writeln!(mod_rs, " // NOTE: 'target_cpu' is a cfg flag specific to the avr-rust fork")?;
for module_name in module_names {
writeln!(mod_rs, " #[cfg(target_cpu = \"{}\")] pub use super::{} as current;",
module_name, module_name)?;
}
writeln!(mod_rs, "}}")?;
Ok(())
}
/// Generates a self-contained module for each individual mcu.
fn generate_mcu_module(mcu: &Mcu, path: &Path) -> Result<(), io::Error> {
let mut file = File::create(path)?;
self::mcu_module_doc(mcu, &mut file)?;
writeln!(file)?;
self::mcu_module_code(mcu, &mut file)?;
Ok(())
}
/// Gets the module name for a mcu.
fn mcu_module_name(mcu: &Mcu) -> String {
mcu.device.name.to_lowercase()
}
pub fn mcu_module_doc(mcu: &Mcu, w: &mut Write)
-> Result<(), io::Error> {
writeln!(w, "//! The AVR {} microcontroller", mcu.device.name)?;
writeln!(w, "//!")?;
writeln!(w, "//! # Variants")?;
writeln!(w, "//! | | Pinout | Mcuage | Operating temperature | Operating voltage | Max speed |")?;
writeln!(w, "//! |--------|--------|---------|-----------------------|-------------------|-----------|")?;
for variant in mcu.variants.iter() {
let pinout_label = variant.pinout.as_ref().map(|p| p.replace('_', "-").to_owned()).unwrap_or_else(|| String::new());
let speed_mhz = variant.speed_max_hz / 1_000_000;
writeln!(w, "//! | {} | {} | {} | {}°C - {}°C | {}V - {}V | {} MHz |",
variant.name, pinout_label,
variant.package, variant.temperature_min,
variant.temperature_max, variant.voltage_min, variant.voltage_max,
speed_mhz)?;
}
writeln!(w, "//!")?;
Ok(())
}
pub fn mcu_module_code(mcu: &Mcu, w: &mut Write)
-> Result<(), io::Error> {
let registers = ordered_registers(mcu);
let register_bitfields = documentable_bitfields(®isters);
writeln!(w, "#![allow(non_upper_case_globals)]")?;
writeln!(w)?;
for register in registers.iter() {
let ty = integer_type(register.size);
if !register.caption.is_empty() {
let mut caption = register.caption.trim().to_owned();
if !caption.ends_with('.') { caption.push('.') }
writeln!(w, "/// {}", caption)?;
} else {
writeln!(w, "/// {} register", register.name)?;
}
let mut bitfields = register_bitfields.iter().filter_map(|&(reg,bitfield)| {
if reg == register { Some(bitfield) } else { None }
}).peekable();
if bitfields.peek().is_some() {
writeln!(w, "///")?;
writeln!(w, "/// Bitfields:")?;
writeln!(w, "///")?;
writeln!(w, "/// | Name | Mask (binary) |")?;
writeln!(w, "/// | ---- | ------------- |")?;
while let Some(bitfield) = bitfields.next() {
writeln!(w, "/// | {} | {:b} |", bitfield.name, bitfield.mask)?;
}
}
writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};",
register.name, ty, register.offset, ty)?;
writeln!(w)?;
}
for (register, bitfield) in register_bitfields {
let ty = integer_type(bitfield.size);
writeln!(w, "/// Bitfield on register {}", register.name)?;
writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};",
bitfield.name, ty, bitfield.mask, ty)?;
writeln!(w)?; |
}
Ok(())
}
fn ordered_registers(mcu: &Mcu) -> Vec<Register> {
let mut unique_registers = self::unique_registers(mcu);
insert_high_low_variants(&mut unique_registers);
let mut registers: Vec<Register> = unique_registers.into_iter().map(|a| a.1).collect();
registers.sort_by_key(|r| r.offset);
registers
}
fn insert_high_low_variants(registers: &mut HashMap<String, Register>) {
let wide_registers: Vec<_> = registers.values()
.filter(|r| r.size == 2)
.cloned()
.collect();
for r in wide_registers {
let (high, low) = high_low_variants(&r);
if !registers.contains_key(&high.name) {
registers.insert(high.name.clone(), high);
}
if !registers.contains_key(&low.name) {
registers.insert(low.name.clone(), low);
}
}
}
fn high_low_variants(r: &Register) -> (Register, Register) {
assert_eq!(2, r.size, "only 16-bit registers have high low variants");
(
Register { name: r.name.clone() + "H",
caption: r.caption.clone() + " high byte",
offset: r.offset + 1,
size: r.size / 2,
mask: None,
bitfields: Vec::new(), // these are already in parent.
rw: r.rw.clone() },
Register { name: r.name.clone() + "L",
caption: r.caption.clone() + " low byte",
offset: r.offset + 0,
size: r.size / 2,
mask: None,
bitfields: Vec::new(), // these are already in parent.
rw: r.rw.clone() },
)
}
fn unique_registers(mcu: &Mcu) -> HashMap<String, Register> {
let mut result = HashMap::new();
for module in mcu.modules.iter() {
for register_group in module.register_groups.iter() {
for register in register_group.registers.iter() {
// Check if we've already seen this register.
// Remove it if so and combine it with the current Register.
let r: Register = if let Some(ref existing) = result.remove(®ister.name) {
register.union(existing)
} else {
register.clone()
};
result.insert(r.name.clone(), r);
}
}
}
result
| random_line_split | |
build.rs | use std::io::prelude::*;
use std::io;
use std::path::Path;
pub fn all(path: &Path, mcus: &[Mcu]) -> Result<(), io::Error> {
fs::create_dir_all(path)?;
let mut module_names = Vec::new();
// Create modules for each mcu.
for mcu in mcus.iter() {
let module_name = self::mcu_module_name(mcu);
let module_path = path.join(format!("{}.rs", module_name));
eprintln!("generating module for {}", mcu.device.name);
generate_mcu_module(mcu, &module_path)?;
module_names.push(module_name);
}
generate_entry_module(path, &module_names)
}
/// Generate a `mod.rs` file that binds a list of submodules.
fn generate_entry_module(output_path: &Path, module_names: &[String]) -> Result<(), io::Error> {
let mut mod_rs = File::create(output_path.join("mod.rs"))?;
writeln!(mod_rs, "// Device definitions")?;
for module_name in module_names {
writeln!(mod_rs, "pub mod {};", module_name)?;
}
writeln!(mod_rs)?;
const CURRENT_MOD_SUMMARY: &'static str = "Contains definitions for the current AVR device";
writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?;
writeln!(mod_rs, "///")?;
writeln!(mod_rs, "/// **NOTE**: We are showing the ATmega328 here, even though the library")?;
writeln!(mod_rs, "/// is not targeting a real AVR device. If you compile this library for")?;
writeln!(mod_rs, "/// a specific AVR MCU, the module for that device will aliased here.")?;
writeln!(mod_rs, "// If we are targeting a non-AVR device, just pick the ATmega328p so")?;
writeln!(mod_rs, "// that users can see what the API would look like")?;
writeln!(mod_rs, "//")?;
writeln!(mod_rs, "// Note that we reexport rather than alias so that we can add a note about")?;
writeln!(mod_rs, "// this behaviour to the documentation.")?;
writeln!(mod_rs, "#[cfg(not(target_arch = \"avr\"))]")?;
writeln!(mod_rs, "pub mod current {{ pub use super::atmega328::*; }}")?;
writeln!(mod_rs)?;
writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?;
writeln!(mod_rs, "// If we are targeting AVR, lookup the current device's module")?;
writeln!(mod_rs, "// and alias it to the `current` module.")?;
writeln!(mod_rs, "#[cfg(target_arch = \"avr\")]")?;
writeln!(mod_rs, "pub mod current {{")?;
writeln!(mod_rs, " // NOTE: 'target_cpu' is a cfg flag specific to the avr-rust fork")?;
for module_name in module_names {
writeln!(mod_rs, " #[cfg(target_cpu = \"{}\")] pub use super::{} as current;",
module_name, module_name)?;
}
writeln!(mod_rs, "}}")?;
Ok(())
}
/// Generates a self-contained module for each individual mcu.
fn generate_mcu_module(mcu: &Mcu, path: &Path) -> Result<(), io::Error> {
let mut file = File::create(path)?;
self::mcu_module_doc(mcu, &mut file)?;
writeln!(file)?;
self::mcu_module_code(mcu, &mut file)?;
Ok(())
}
/// Gets the module name for a mcu.
fn mcu_module_name(mcu: &Mcu) -> String {
mcu.device.name.to_lowercase()
}
pub fn mcu_module_doc(mcu: &Mcu, w: &mut Write)
-> Result<(), io::Error> {
writeln!(w, "//! The AVR {} microcontroller", mcu.device.name)?;
writeln!(w, "//!")?;
writeln!(w, "//! # Variants")?;
writeln!(w, "//! | | Pinout | Mcuage | Operating temperature | Operating voltage | Max speed |")?;
writeln!(w, "//! |--------|--------|---------|-----------------------|-------------------|-----------|")?;
for variant in mcu.variants.iter() {
let pinout_label = variant.pinout.as_ref().map(|p| p.replace('_', "-").to_owned()).unwrap_or_else(|| String::new());
let speed_mhz = variant.speed_max_hz / 1_000_000;
writeln!(w, "//! | {} | {} | {} | {}°C - {}°C | {}V - {}V | {} MHz |",
variant.name, pinout_label,
variant.package, variant.temperature_min,
variant.temperature_max, variant.voltage_min, variant.voltage_max,
speed_mhz)?;
}
writeln!(w, "//!")?;
Ok(())
}
pub fn mcu_module_code(mcu: &Mcu, w: &mut Write)
-> Result<(), io::Error> {
let registers = ordered_registers(mcu);
let register_bitfields = documentable_bitfields(®isters);
writeln!(w, "#![allow(non_upper_case_globals)]")?;
writeln!(w)?;
for register in registers.iter() {
let ty = integer_type(register.size);
if !register.caption.is_empty() {
let mut caption = register.caption.trim().to_owned();
if !caption.ends_with('.') { caption.push('.') }
writeln!(w, "/// {}", caption)?;
} else {
writeln!(w, "/// {} register", register.name)?;
}
let mut bitfields = register_bitfields.iter().filter_map(|&(reg,bitfield)| {
if reg == register { Some(bitfield) } else { None }
}).peekable();
if bitfields.peek().is_some() {
writeln!(w, "///")?;
writeln!(w, "/// Bitfields:")?;
writeln!(w, "///")?;
writeln!(w, "/// | Name | Mask (binary) |")?;
writeln!(w, "/// | ---- | ------------- |")?;
while let Some(bitfield) = bitfields.next() {
writeln!(w, "/// | {} | {:b} |", bitfield.name, bitfield.mask)?;
}
}
writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};",
register.name, ty, register.offset, ty)?;
writeln!(w)?;
}
for (register, bitfield) in register_bitfields {
let ty = integer_type(bitfield.size);
writeln!(w, "/// Bitfield on register {}", register.name)?;
writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};",
bitfield.name, ty, bitfield.mask, ty)?;
writeln!(w)?;
}
Ok(())
}
fn ordered_registers(mcu: &Mcu) -> Vec<Register> {
| fn insert_high_low_variants(registers: &mut HashMap<String, Register>) {
let wide_registers: Vec<_> = registers.values()
.filter(|r| r.size == 2)
.cloned()
.collect();
for r in wide_registers {
let (high, low) = high_low_variants(&r);
if !registers.contains_key(&high.name) {
registers.insert(high.name.clone(), high);
}
if !registers.contains_key(&low.name) {
registers.insert(low.name.clone(), low);
}
}
}
fn high_low_variants(r: &Register) -> (Register, Register) {
assert_eq!(2, r.size, "only 16-bit registers have high low variants");
(
Register { name: r.name.clone() + "H",
caption: r.caption.clone() + " high byte",
offset: r.offset + 1,
size: r.size / 2,
mask: None,
bitfields: Vec::new(), // these are already in parent.
rw: r.rw.clone() },
Register { name: r.name.clone() + "L",
caption: r.caption.clone() + " low byte",
offset: r.offset + 0,
size: r.size / 2,
mask: None,
bitfields: Vec::new(), // these are already in parent.
rw: r.rw.clone() },
)
}
fn unique_registers(mcu: &Mcu) -> HashMap<String, Register> {
let mut result = HashMap::new();
for module in mcu.modules.iter() {
for register_group in module.register_groups.iter() {
for register in register_group.registers.iter() {
// Check if we've already seen this register.
// Remove it if so and combine it with the current Register.
let r: Register = if let Some(ref existing) = result.remove(®ister.name) {
register.union(existing)
} else {
register.clone()
};
result.insert(r.name.clone(), r);
}
}
}
result
}
| let mut unique_registers = self::unique_registers(mcu);
insert_high_low_variants(&mut unique_registers);
let mut registers: Vec<Register> = unique_registers.into_iter().map(|a| a.1).collect();
registers.sort_by_key(|r| r.offset);
registers
}
| identifier_body |
build.rs | use std::io::prelude::*;
use std::io;
use std::path::Path;
pub fn all(path: &Path, mcus: &[Mcu]) -> Result<(), io::Error> {
fs::create_dir_all(path)?;
let mut module_names = Vec::new();
// Create modules for each mcu.
for mcu in mcus.iter() {
let module_name = self::mcu_module_name(mcu);
let module_path = path.join(format!("{}.rs", module_name));
eprintln!("generating module for {}", mcu.device.name);
generate_mcu_module(mcu, &module_path)?;
module_names.push(module_name);
}
generate_entry_module(path, &module_names)
}
/// Generate a `mod.rs` file that binds a list of submodules.
fn generate_entry_module(output_path: &Path, module_names: &[String]) -> Result<(), io::Error> {
let mut mod_rs = File::create(output_path.join("mod.rs"))?;
writeln!(mod_rs, "// Device definitions")?;
for module_name in module_names {
writeln!(mod_rs, "pub mod {};", module_name)?;
}
writeln!(mod_rs)?;
const CURRENT_MOD_SUMMARY: &'static str = "Contains definitions for the current AVR device";
writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?;
writeln!(mod_rs, "///")?;
writeln!(mod_rs, "/// **NOTE**: We are showing the ATmega328 here, even though the library")?;
writeln!(mod_rs, "/// is not targeting a real AVR device. If you compile this library for")?;
writeln!(mod_rs, "/// a specific AVR MCU, the module for that device will aliased here.")?;
writeln!(mod_rs, "// If we are targeting a non-AVR device, just pick the ATmega328p so")?;
writeln!(mod_rs, "// that users can see what the API would look like")?;
writeln!(mod_rs, "//")?;
writeln!(mod_rs, "// Note that we reexport rather than alias so that we can add a note about")?;
writeln!(mod_rs, "// this behaviour to the documentation.")?;
writeln!(mod_rs, "#[cfg(not(target_arch = \"avr\"))]")?;
writeln!(mod_rs, "pub mod current {{ pub use super::atmega328::*; }}")?;
writeln!(mod_rs)?;
writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?;
writeln!(mod_rs, "// If we are targeting AVR, lookup the current device's module")?;
writeln!(mod_rs, "// and alias it to the `current` module.")?;
writeln!(mod_rs, "#[cfg(target_arch = \"avr\")]")?;
writeln!(mod_rs, "pub mod current {{")?;
writeln!(mod_rs, " // NOTE: 'target_cpu' is a cfg flag specific to the avr-rust fork")?;
for module_name in module_names {
writeln!(mod_rs, " #[cfg(target_cpu = \"{}\")] pub use super::{} as current;",
module_name, module_name)?;
}
writeln!(mod_rs, "}}")?;
Ok(())
}
/// Generates a self-contained module for each individual mcu.
fn | (mcu: &Mcu, path: &Path) -> Result<(), io::Error> {
let mut file = File::create(path)?;
self::mcu_module_doc(mcu, &mut file)?;
writeln!(file)?;
self::mcu_module_code(mcu, &mut file)?;
Ok(())
}
/// Gets the module name for a mcu.
fn mcu_module_name(mcu: &Mcu) -> String {
mcu.device.name.to_lowercase()
}
pub fn mcu_module_doc(mcu: &Mcu, w: &mut Write)
-> Result<(), io::Error> {
writeln!(w, "//! The AVR {} microcontroller", mcu.device.name)?;
writeln!(w, "//!")?;
writeln!(w, "//! # Variants")?;
writeln!(w, "//! | | Pinout | Mcuage | Operating temperature | Operating voltage | Max speed |")?;
writeln!(w, "//! |--------|--------|---------|-----------------------|-------------------|-----------|")?;
for variant in mcu.variants.iter() {
let pinout_label = variant.pinout.as_ref().map(|p| p.replace('_', "-").to_owned()).unwrap_or_else(|| String::new());
let speed_mhz = variant.speed_max_hz / 1_000_000;
writeln!(w, "//! | {} | {} | {} | {}°C - {}°C | {}V - {}V | {} MHz |",
variant.name, pinout_label,
variant.package, variant.temperature_min,
variant.temperature_max, variant.voltage_min, variant.voltage_max,
speed_mhz)?;
}
writeln!(w, "//!")?;
Ok(())
}
pub fn mcu_module_code(mcu: &Mcu, w: &mut Write)
-> Result<(), io::Error> {
let registers = ordered_registers(mcu);
let register_bitfields = documentable_bitfields(®isters);
writeln!(w, "#![allow(non_upper_case_globals)]")?;
writeln!(w)?;
for register in registers.iter() {
let ty = integer_type(register.size);
if !register.caption.is_empty() {
let mut caption = register.caption.trim().to_owned();
if !caption.ends_with('.') { caption.push('.') }
writeln!(w, "/// {}", caption)?;
} else {
writeln!(w, "/// {} register", register.name)?;
}
let mut bitfields = register_bitfields.iter().filter_map(|&(reg,bitfield)| {
if reg == register { Some(bitfield) } else { None }
}).peekable();
if bitfields.peek().is_some() {
writeln!(w, "///")?;
writeln!(w, "/// Bitfields:")?;
writeln!(w, "///")?;
writeln!(w, "/// | Name | Mask (binary) |")?;
writeln!(w, "/// | ---- | ------------- |")?;
while let Some(bitfield) = bitfields.next() {
writeln!(w, "/// | {} | {:b} |", bitfield.name, bitfield.mask)?;
}
}
writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};",
register.name, ty, register.offset, ty)?;
writeln!(w)?;
}
for (register, bitfield) in register_bitfields {
let ty = integer_type(bitfield.size);
writeln!(w, "/// Bitfield on register {}", register.name)?;
writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};",
bitfield.name, ty, bitfield.mask, ty)?;
writeln!(w)?;
}
Ok(())
}
fn ordered_registers(mcu: &Mcu) -> Vec<Register> {
let mut unique_registers = self::unique_registers(mcu);
insert_high_low_variants(&mut unique_registers);
let mut registers: Vec<Register> = unique_registers.into_iter().map(|a| a.1).collect();
registers.sort_by_key(|r| r.offset);
registers
}
fn insert_high_low_variants(registers: &mut HashMap<String, Register>) {
let wide_registers: Vec<_> = registers.values()
.filter(|r| r.size == 2)
.cloned()
.collect();
for r in wide_registers {
let (high, low) = high_low_variants(&r);
if !registers.contains_key(&high.name) {
registers.insert(high.name.clone(), high);
}
if !registers.contains_key(&low.name) {
registers.insert(low.name.clone(), low);
}
}
}
fn high_low_variants(r: &Register) -> (Register, Register) {
assert_eq!(2, r.size, "only 16-bit registers have high low variants");
(
Register { name: r.name.clone() + "H",
caption: r.caption.clone() + " high byte",
offset: r.offset + 1,
size: r.size / 2,
mask: None,
bitfields: Vec::new(), // these are already in parent.
rw: r.rw.clone() },
Register { name: r.name.clone() + "L",
caption: r.caption.clone() + " low byte",
offset: r.offset + 0,
size: r.size / 2,
mask: None,
bitfields: Vec::new(), // these are already in parent.
rw: r.rw.clone() },
)
}
fn unique_registers(mcu: &Mcu) -> HashMap<String, Register> {
let mut result = HashMap::new();
for module in mcu.modules.iter() {
for register_group in module.register_groups.iter() {
for register in register_group.registers.iter() {
// Check if we've already seen this register.
// Remove it if so and combine it with the current Register.
let r: Register = if let Some(ref existing) = result.remove(®ister.name) {
register.union(existing)
} else {
register.clone()
};
result.insert(r.name.clone(), r);
}
}
}
result
}
| generate_mcu_module | identifier_name |
txtfunctions.py | = 'Hate This & I\'ll Love You'
elif entry == 'IBTY':
entry = 'I Belong To You'
elif entry == 'IS':
entry = 'The 2nd Law: Isolated System'
elif entry == 'KoC':
entry = 'Knights of Cydonia'
elif entry == 'MotP':
entry = 'Map of the Problematique'
elif entry == 'MM':
entry = 'Muscle Museum'
elif entry == 'NSC':
entry = 'Neutron Star Collision'
elif entry == 'PiB':
entry = 'Plug In Baby'
elif entry == 'RBS':
entry = 'Ruled By Secrecy'
elif (entry == 'SMBH' or entry == 'Supermassive'):
entry = 'Supermassive Black Hole'
elif entry == 'SS':
entry = 'Stockholm Syndrome'
elif entry == 'TaB':
entry = 'Take A Bow'
elif entry == 'TIRO':
entry = 'Time Is Running Out'
elif entry == 'TOADA':
entry = 'Thoughts of a Dying Atheist'
elif entry == 'UD':
entry = 'Undisclosed Desires'
return entry
# Adds a song, or string of songs, to the .setlist file
# I/O Exceptions are silently ignored.
def add_song(argument):
songList = argument.split(', ')
for song in songList:
song = acronym_replace(song)
try:
filehandle.list_append('text/setlist', song)
except IOError:
print("Error adding song to setlist")
# Removed the most recently entered song from the setlist file
# Passes on I/O exceptions
# Throws Index exception is called on an empty setlist
def song_pop():
try:
undoList = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error opening file for song_undo")
if len(undoList) != 0:
del undoList[len(undoList) - 1]
else:
raise IndexError("Empty setlist")
try:
filehandle.put_list('text/setlist', undoList)
except IOError:
raise IOError("Error rewriting list for song_undo")
return
def create_set_string(songlist):
setstring = ''
for song in songlist:
if setstring == '':
setstring = filehandle.remove_nr(song)
else:
setstring = "%s, %s" % (setstring, filehandle.remove_nr(song))
return setstring
# Prints any setlest fed into it and returns a string to be messaged
# Very slow. Will fix in the future.
# Passes on I/O Exceptions
def print_set(filename):
try:
fileList = filehandle.get_list(filename)
except IOError:
raise IOError("Error opening file to print")
fileLength = len(fileList)
if fileLength == 0:
print('EMPTY SETLIST')
return ''
else:
if filename == 'text/previous':
print('Printing PREVIOUS')
return "%s: %s" % (filehandle.remove_nr(fileList[0]),
create_set_string(fileList[1:]))
else:
print('Printing set')
return create_set_string(fileList)
# Copies the gig and setlist to a previous setlist file
# Then copies the previous setlist to an archive with the gig as the file name
# Will update any song count and last played values in the archive database.
# Finally, will tweet the setlist to the associated Twitter account.
# Passes on I/O Exceptions
# Throws RuntimeError exception for gig formatting issues.
def set_previous():
setlist = filehandle.get_list('text/setlist')
gig = filehandle.get_list('text/gig')
tour = filehandle.get_list('text/tour')
print('GIG: %s' % (gig))
if len(gig) == 0:
print('SetPrevious Error: GIG not found')
outputString = 'ERROR: No gig set.'
return outputString
if len(setlist) == 0:
print('SetPrevious Error: Empty setlist')
outputString = 'ERROR: Setlist is empty.'
return outputString
gig = gig[0]
tour = tour[0].strip()
sys.stdout.write('FORMATTING GIG NAME...')
try:
gigParse = gig.split('/')
if len(gigParse) > 1:
i = 1
gigFileName = gigParse[0]
while i < len(gigParse):
gigFileName = '%s-%s' % (gigFileName, gigParse[i])
i += 1
else:
gigFileName = gigParse[0]
except:
raise RuntimeError("Error formatting gig")
sys.stdout.write('\t[DONE]\n')
gigFileName = gigFileName.split('\x0A')[0]
gigFileName = gigFileName.split('\r')[0]
print('GIG FILE NAME: %s' % (gigFileName))
sys.stdout.write('Writing previous setlist...')
archivePath = "GigArchive/" + tour + "/" + gigFileName
gig = [gig]
try:
filehandle.put_list('text/previous', gig)
filehandle.put_list(archivePath, gig)
except IOError:
raise IOError("Error writing setlists for set_previous")
for entry in setlist:
try:
filehandle.list_append('text/previous', entry)
filehandle.list_append(archivePath, entry)
except IOError:
raise IOError("Error appending song for set_previous")
sys.stdout.write('\t[DONE]\nUpdating database...')
database = filehandle.get_list('text/archive.db')
for entry in database:
if entry.startswith('c!'):
song = entry.split('!')[1].split('=')[0]
if song in [s.lower() for s in setlist]:
count = int(entry.split('!')[1].split('=')[1])
count = count + 1
database[database.index(entry)] = 'c!%s=%d' % (song, count)
elif entry.startswith('lp!'):
song = entry.split('!')[1].split('::')[0]
if song in [s.lower() for s in setlist]:
database[database.index(entry)] = 'lp!%s::%s' % (song, archivePath)
filehandle.put_list('text/archive.db', database)
sys.stdout.write('\t[DONE]\nTweeting setlist...')
twit.tweet_setlist()
sys.stdout.write('\t[DONE]\n')
outputString = "Current setlist copied over as previous set."
return outputString
# Takes 2 songs, finds the first song in the set, replaces it with the second
# Passes on I/O Exceptions
# Most other errors will be returned in outputMsg to be sent through IRC.
def replace_song(argument):
compared = False
songs = argument.split(', ')
length = len(songs)
if length != 2:
print("Invalid replacement arguments")
outputMsg = "ERROR: Invalid number of arguments."
return outputMsg
else:
try:
setlist = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error getting setlist for replace_song")
compareSong = songs[0]
compareSong = acronym_replace(compareSong)
replaceSong = songs[1]
replaceSong = acronym_replace(replaceSong)
sys.stdout.write('Replacing %s with %s' % (compareSong, replaceSong))
try:
songIndex = setlist.index(compareSong)
setlist[songIndex] = replaceSong
compared = True
except:
print('Replace Error: Song not found')
outputMsg = "ERROR: Could not find the song to replace."
return outputMsg
if compared:
try:
filehandle.put_list('text/setlist', setlist)
except IOError:
raise IOError("Error writing set for replace_song")
sys.stdout.write('\t[DONE]\n')
outputMsg = "Song has been replaced"
return outputMsg
# Nearly identical to replace_song. I will fix that some day
# Takes two songs, finds if the first song is in the set
# and inserts the second song before it
def insert_song(argument):
compared = False
songs = argument.split(', ')
length = len(songs)
| if length != 2:
print("Invalid replacement arguments")
return "ERROR: Invalid number of arguments."
else:
try:
setlist = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error getting setlist for insert_song")
insertSong = songs[0]
insertSong = acronym_replace(insertSong)
compareSong = songs[1]
compareSong = acronym_replace(compareSong)
sys.stdout.write('Inserting %s before %s' % (insertSong, compareSong))
try:
songIndex = setlist.index(compareSong)
setlist.insert(songIndex, insertSong)
compared = True
except:
print('Compare Error: Song not found')
return "ERROR: Could not find the song to insert before."
if compared:
try:
filehandle.put_list('text/setlist', setlist)
except IOError:
raise IOError("Error writing set for insert_song")
sys | random_line_split | |
txtfunctions.py | = 'Hate This & I\'ll Love You'
elif entry == 'IBTY':
entry = 'I Belong To You'
elif entry == 'IS':
entry = 'The 2nd Law: Isolated System'
elif entry == 'KoC':
entry = 'Knights of Cydonia'
elif entry == 'MotP':
entry = 'Map of the Problematique'
elif entry == 'MM':
entry = 'Muscle Museum'
elif entry == 'NSC':
entry = 'Neutron Star Collision'
elif entry == 'PiB':
entry = 'Plug In Baby'
elif entry == 'RBS':
entry = 'Ruled By Secrecy'
elif (entry == 'SMBH' or entry == 'Supermassive'):
entry = 'Supermassive Black Hole'
elif entry == 'SS':
entry = 'Stockholm Syndrome'
elif entry == 'TaB':
entry = 'Take A Bow'
elif entry == 'TIRO':
entry = 'Time Is Running Out'
elif entry == 'TOADA':
entry = 'Thoughts of a Dying Atheist'
elif entry == 'UD':
entry = 'Undisclosed Desires'
return entry
# Adds a song, or string of songs, to the .setlist file
# I/O Exceptions are silently ignored.
def add_song(argument):
songList = argument.split(', ')
for song in songList:
song = acronym_replace(song)
try:
filehandle.list_append('text/setlist', song)
except IOError:
print("Error adding song to setlist")
# Removed the most recently entered song from the setlist file
# Passes on I/O exceptions
# Throws Index exception is called on an empty setlist
def song_pop():
try:
undoList = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error opening file for song_undo")
if len(undoList) != 0:
del undoList[len(undoList) - 1]
else:
raise IndexError("Empty setlist")
try:
filehandle.put_list('text/setlist', undoList)
except IOError:
raise IOError("Error rewriting list for song_undo")
return
def create_set_string(songlist):
setstring = ''
for song in songlist:
if setstring == '':
setstring = filehandle.remove_nr(song)
else:
setstring = "%s, %s" % (setstring, filehandle.remove_nr(song))
return setstring
# Prints any setlest fed into it and returns a string to be messaged
# Very slow. Will fix in the future.
# Passes on I/O Exceptions
def print_set(filename):
try:
fileList = filehandle.get_list(filename)
except IOError:
raise IOError("Error opening file to print")
fileLength = len(fileList)
if fileLength == 0:
print('EMPTY SETLIST')
return ''
else:
if filename == 'text/previous':
print('Printing PREVIOUS')
return "%s: %s" % (filehandle.remove_nr(fileList[0]),
create_set_string(fileList[1:]))
else:
print('Printing set')
return create_set_string(fileList)
# Copies the gig and setlist to a previous setlist file
# Then copies the previous setlist to an archive with the gig as the file name
# Will update any song count and last played values in the archive database.
# Finally, will tweet the setlist to the associated Twitter account.
# Passes on I/O Exceptions
# Throws RuntimeError exception for gig formatting issues.
def set_previous():
| gigParse = gig.split('/')
if len(gigParse) > 1:
i = 1
gigFileName = gigParse[0]
while i < len(gigParse):
gigFileName = '%s-%s' % (gigFileName, gigParse[i])
i += 1
else:
gigFileName = gigParse[0]
except:
raise RuntimeError("Error formatting gig")
sys.stdout.write('\t[DONE]\n')
gigFileName = gigFileName.split('\x0A')[0]
gigFileName = gigFileName.split('\r')[0]
print('GIG FILE NAME: %s' % (gigFileName))
sys.stdout.write('Writing previous setlist...')
archivePath = "GigArchive/" + tour + "/" + gigFileName
gig = [gig]
try:
filehandle.put_list('text/previous', gig)
filehandle.put_list(archivePath, gig)
except IOError:
raise IOError("Error writing setlists for set_previous")
for entry in setlist:
try:
filehandle.list_append('text/previous', entry)
filehandle.list_append(archivePath, entry)
except IOError:
raise IOError("Error appending song for set_previous")
sys.stdout.write('\t[DONE]\nUpdating database...')
database = filehandle.get_list('text/archive.db')
for entry in database:
if entry.startswith('c!'):
song = entry.split('!')[1].split('=')[0]
if song in [s.lower() for s in setlist]:
count = int(entry.split('!')[1].split('=')[1])
count = count + 1
database[database.index(entry)] = 'c!%s=%d' % (song, count)
elif entry.startswith('lp!'):
song = entry.split('!')[1].split('::')[0]
if song in [s.lower() for s in setlist]:
database[database.index(entry)] = 'lp!%s::%s' % (song, archivePath)
filehandle.put_list('text/archive.db', database)
sys.stdout.write('\t[DONE]\nTweeting setlist...')
twit.tweet_setlist()
sys.stdout.write('\t[DONE]\n')
outputString = "Current setlist copied over as previous set."
return outputString
# Takes 2 songs, finds the first song in the set, replaces it with the second
# Passes on I/O Exceptions
# Most other errors will be returned in outputMsg to be sent through IRC.
def replace_song(argument):
compared = False
songs = argument.split(', ')
length = len(songs)
if length != 2:
print("Invalid replacement arguments")
outputMsg = "ERROR: Invalid number of arguments."
return outputMsg
else:
try:
setlist = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error getting setlist for replace_song")
compareSong = songs[0]
compareSong = acronym_replace(compareSong)
replaceSong = songs[1]
replaceSong = acronym_replace(replaceSong)
sys.stdout.write('Replacing %s with %s' % (compareSong, replaceSong))
try:
songIndex = setlist.index(compareSong)
setlist[songIndex] = replaceSong
compared = True
except:
print('Replace Error: Song not found')
outputMsg = "ERROR: Could not find the song to replace."
return outputMsg
if compared:
try:
filehandle.put_list('text/setlist', setlist)
except IOError:
raise IOError("Error writing set for replace_song")
sys.stdout.write('\t[DONE]\n')
outputMsg = "Song has been replaced"
return outputMsg
# Nearly identical to replace_song. I will fix that some day
# Takes two songs, finds if the first song is in the set
# and inserts the second song before it
def insert_song(argument):
compared = False
songs = argument.split(', ')
length = len(songs)
if length != 2:
print("Invalid replacement arguments")
return "ERROR: Invalid number of arguments."
else:
try:
setlist = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error getting setlist for insert_song")
insertSong = songs[0]
insertSong = acronym_replace(insertSong)
compareSong = songs[1]
compareSong = acronym_replace(compareSong)
sys.stdout.write('Inserting %s before %s' % (insertSong, compareSong))
try:
songIndex = setlist.index(compareSong)
setlist.insert(songIndex, insertSong)
compared = True
except:
print('Compare Error: Song not found')
return "ERROR: Could not find the song to insert before."
if compared:
try:
filehandle.put_list('text/setlist', setlist)
except IOError:
raise IOError("Error writing set for insert_song")
sys | setlist = filehandle.get_list('text/setlist')
gig = filehandle.get_list('text/gig')
tour = filehandle.get_list('text/tour')
print('GIG: %s' % (gig))
if len(gig) == 0:
print('SetPrevious Error: GIG not found')
outputString = 'ERROR: No gig set.'
return outputString
if len(setlist) == 0:
print('SetPrevious Error: Empty setlist')
outputString = 'ERROR: Setlist is empty.'
return outputString
gig = gig[0]
tour = tour[0].strip()
sys.stdout.write('FORMATTING GIG NAME...')
try: | identifier_body |
txtfunctions.py | = 'Hate This & I\'ll Love You'
elif entry == 'IBTY':
entry = 'I Belong To You'
elif entry == 'IS':
entry = 'The 2nd Law: Isolated System'
elif entry == 'KoC':
entry = 'Knights of Cydonia'
elif entry == 'MotP':
entry = 'Map of the Problematique'
elif entry == 'MM':
entry = 'Muscle Museum'
elif entry == 'NSC':
entry = 'Neutron Star Collision'
elif entry == 'PiB':
entry = 'Plug In Baby'
elif entry == 'RBS':
entry = 'Ruled By Secrecy'
elif (entry == 'SMBH' or entry == 'Supermassive'):
entry = 'Supermassive Black Hole'
elif entry == 'SS':
entry = 'Stockholm Syndrome'
elif entry == 'TaB':
entry = 'Take A Bow'
elif entry == 'TIRO':
entry = 'Time Is Running Out'
elif entry == 'TOADA':
entry = 'Thoughts of a Dying Atheist'
elif entry == 'UD':
entry = 'Undisclosed Desires'
return entry
# Adds a song, or string of songs, to the .setlist file
# I/O Exceptions are silently ignored.
def add_song(argument):
songList = argument.split(', ')
for song in songList:
song = acronym_replace(song)
try:
filehandle.list_append('text/setlist', song)
except IOError:
print("Error adding song to setlist")
# Removed the most recently entered song from the setlist file
# Passes on I/O exceptions
# Throws Index exception is called on an empty setlist
def song_pop():
try:
undoList = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error opening file for song_undo")
if len(undoList) != 0:
del undoList[len(undoList) - 1]
else:
raise IndexError("Empty setlist")
try:
filehandle.put_list('text/setlist', undoList)
except IOError:
raise IOError("Error rewriting list for song_undo")
return
def | (songlist):
setstring = ''
for song in songlist:
if setstring == '':
setstring = filehandle.remove_nr(song)
else:
setstring = "%s, %s" % (setstring, filehandle.remove_nr(song))
return setstring
# Prints any setlest fed into it and returns a string to be messaged
# Very slow. Will fix in the future.
# Passes on I/O Exceptions
def print_set(filename):
try:
fileList = filehandle.get_list(filename)
except IOError:
raise IOError("Error opening file to print")
fileLength = len(fileList)
if fileLength == 0:
print('EMPTY SETLIST')
return ''
else:
if filename == 'text/previous':
print('Printing PREVIOUS')
return "%s: %s" % (filehandle.remove_nr(fileList[0]),
create_set_string(fileList[1:]))
else:
print('Printing set')
return create_set_string(fileList)
# Copies the gig and setlist to a previous setlist file
# Then copies the previous setlist to an archive with the gig as the file name
# Will update any song count and last played values in the archive database.
# Finally, will tweet the setlist to the associated Twitter account.
# Passes on I/O Exceptions
# Throws RuntimeError exception for gig formatting issues.
def set_previous():
setlist = filehandle.get_list('text/setlist')
gig = filehandle.get_list('text/gig')
tour = filehandle.get_list('text/tour')
print('GIG: %s' % (gig))
if len(gig) == 0:
print('SetPrevious Error: GIG not found')
outputString = 'ERROR: No gig set.'
return outputString
if len(setlist) == 0:
print('SetPrevious Error: Empty setlist')
outputString = 'ERROR: Setlist is empty.'
return outputString
gig = gig[0]
tour = tour[0].strip()
sys.stdout.write('FORMATTING GIG NAME...')
try:
gigParse = gig.split('/')
if len(gigParse) > 1:
i = 1
gigFileName = gigParse[0]
while i < len(gigParse):
gigFileName = '%s-%s' % (gigFileName, gigParse[i])
i += 1
else:
gigFileName = gigParse[0]
except:
raise RuntimeError("Error formatting gig")
sys.stdout.write('\t[DONE]\n')
gigFileName = gigFileName.split('\x0A')[0]
gigFileName = gigFileName.split('\r')[0]
print('GIG FILE NAME: %s' % (gigFileName))
sys.stdout.write('Writing previous setlist...')
archivePath = "GigArchive/" + tour + "/" + gigFileName
gig = [gig]
try:
filehandle.put_list('text/previous', gig)
filehandle.put_list(archivePath, gig)
except IOError:
raise IOError("Error writing setlists for set_previous")
for entry in setlist:
try:
filehandle.list_append('text/previous', entry)
filehandle.list_append(archivePath, entry)
except IOError:
raise IOError("Error appending song for set_previous")
sys.stdout.write('\t[DONE]\nUpdating database...')
database = filehandle.get_list('text/archive.db')
for entry in database:
if entry.startswith('c!'):
song = entry.split('!')[1].split('=')[0]
if song in [s.lower() for s in setlist]:
count = int(entry.split('!')[1].split('=')[1])
count = count + 1
database[database.index(entry)] = 'c!%s=%d' % (song, count)
elif entry.startswith('lp!'):
song = entry.split('!')[1].split('::')[0]
if song in [s.lower() for s in setlist]:
database[database.index(entry)] = 'lp!%s::%s' % (song, archivePath)
filehandle.put_list('text/archive.db', database)
sys.stdout.write('\t[DONE]\nTweeting setlist...')
twit.tweet_setlist()
sys.stdout.write('\t[DONE]\n')
outputString = "Current setlist copied over as previous set."
return outputString
# Takes 2 songs, finds the first song in the set, replaces it with the second
# Passes on I/O Exceptions
# Most other errors will be returned in outputMsg to be sent through IRC.
def replace_song(argument):
compared = False
songs = argument.split(', ')
length = len(songs)
if length != 2:
print("Invalid replacement arguments")
outputMsg = "ERROR: Invalid number of arguments."
return outputMsg
else:
try:
setlist = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error getting setlist for replace_song")
compareSong = songs[0]
compareSong = acronym_replace(compareSong)
replaceSong = songs[1]
replaceSong = acronym_replace(replaceSong)
sys.stdout.write('Replacing %s with %s' % (compareSong, replaceSong))
try:
songIndex = setlist.index(compareSong)
setlist[songIndex] = replaceSong
compared = True
except:
print('Replace Error: Song not found')
outputMsg = "ERROR: Could not find the song to replace."
return outputMsg
if compared:
try:
filehandle.put_list('text/setlist', setlist)
except IOError:
raise IOError("Error writing set for replace_song")
sys.stdout.write('\t[DONE]\n')
outputMsg = "Song has been replaced"
return outputMsg
# Nearly identical to replace_song. I will fix that some day
# Takes two songs, finds if the first song is in the set
# and inserts the second song before it
def insert_song(argument):
compared = False
songs = argument.split(', ')
length = len(songs)
if length != 2:
print("Invalid replacement arguments")
return "ERROR: Invalid number of arguments."
else:
try:
setlist = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error getting setlist for insert_song")
insertSong = songs[0]
insertSong = acronym_replace(insertSong)
compareSong = songs[1]
compareSong = acronym_replace(compareSong)
sys.stdout.write('Inserting %s before %s' % (insertSong, compareSong))
try:
songIndex = setlist.index(compareSong)
setlist.insert(songIndex, insertSong)
compared = True
except:
print('Compare Error: Song not found')
return "ERROR: Could not find the song to insert before."
if compared:
try:
filehandle.put_list('text/setlist', setlist)
except IOError:
raise IOError("Error writing set for insert_song")
| create_set_string | identifier_name |
txtfunctions.py | = 'Hate This & I\'ll Love You'
elif entry == 'IBTY':
entry = 'I Belong To You'
elif entry == 'IS':
entry = 'The 2nd Law: Isolated System'
elif entry == 'KoC':
entry = 'Knights of Cydonia'
elif entry == 'MotP':
entry = 'Map of the Problematique'
elif entry == 'MM':
entry = 'Muscle Museum'
elif entry == 'NSC':
entry = 'Neutron Star Collision'
elif entry == 'PiB':
entry = 'Plug In Baby'
elif entry == 'RBS':
entry = 'Ruled By Secrecy'
elif (entry == 'SMBH' or entry == 'Supermassive'):
entry = 'Supermassive Black Hole'
elif entry == 'SS':
entry = 'Stockholm Syndrome'
elif entry == 'TaB':
entry = 'Take A Bow'
elif entry == 'TIRO':
entry = 'Time Is Running Out'
elif entry == 'TOADA':
entry = 'Thoughts of a Dying Atheist'
elif entry == 'UD':
entry = 'Undisclosed Desires'
return entry
# Adds a song, or string of songs, to the .setlist file
# I/O Exceptions are silently ignored.
def add_song(argument):
songList = argument.split(', ')
for song in songList:
song = acronym_replace(song)
try:
filehandle.list_append('text/setlist', song)
except IOError:
print("Error adding song to setlist")
# Removed the most recently entered song from the setlist file
# Passes on I/O exceptions
# Throws Index exception is called on an empty setlist
def song_pop():
try:
undoList = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error opening file for song_undo")
if len(undoList) != 0:
del undoList[len(undoList) - 1]
else:
raise IndexError("Empty setlist")
try:
filehandle.put_list('text/setlist', undoList)
except IOError:
raise IOError("Error rewriting list for song_undo")
return
def create_set_string(songlist):
setstring = ''
for song in songlist:
if setstring == '':
setstring = filehandle.remove_nr(song)
else:
setstring = "%s, %s" % (setstring, filehandle.remove_nr(song))
return setstring
# Prints any setlest fed into it and returns a string to be messaged
# Very slow. Will fix in the future.
# Passes on I/O Exceptions
def print_set(filename):
try:
fileList = filehandle.get_list(filename)
except IOError:
raise IOError("Error opening file to print")
fileLength = len(fileList)
if fileLength == 0:
print('EMPTY SETLIST')
return ''
else:
if filename == 'text/previous':
print('Printing PREVIOUS')
return "%s: %s" % (filehandle.remove_nr(fileList[0]),
create_set_string(fileList[1:]))
else:
print('Printing set')
return create_set_string(fileList)
# Copies the gig and setlist to a previous setlist file
# Then copies the previous setlist to an archive with the gig as the file name
# Will update any song count and last played values in the archive database.
# Finally, will tweet the setlist to the associated Twitter account.
# Passes on I/O Exceptions
# Throws RuntimeError exception for gig formatting issues.
def set_previous():
setlist = filehandle.get_list('text/setlist')
gig = filehandle.get_list('text/gig')
tour = filehandle.get_list('text/tour')
print('GIG: %s' % (gig))
if len(gig) == 0:
print('SetPrevious Error: GIG not found')
outputString = 'ERROR: No gig set.'
return outputString
if len(setlist) == 0:
print('SetPrevious Error: Empty setlist')
outputString = 'ERROR: Setlist is empty.'
return outputString
gig = gig[0]
tour = tour[0].strip()
sys.stdout.write('FORMATTING GIG NAME...')
try:
gigParse = gig.split('/')
if len(gigParse) > 1:
i = 1
gigFileName = gigParse[0]
while i < len(gigParse):
gigFileName = '%s-%s' % (gigFileName, gigParse[i])
i += 1
else:
gigFileName = gigParse[0]
except:
raise RuntimeError("Error formatting gig")
sys.stdout.write('\t[DONE]\n')
gigFileName = gigFileName.split('\x0A')[0]
gigFileName = gigFileName.split('\r')[0]
print('GIG FILE NAME: %s' % (gigFileName))
sys.stdout.write('Writing previous setlist...')
archivePath = "GigArchive/" + tour + "/" + gigFileName
gig = [gig]
try:
filehandle.put_list('text/previous', gig)
filehandle.put_list(archivePath, gig)
except IOError:
raise IOError("Error writing setlists for set_previous")
for entry in setlist:
try:
filehandle.list_append('text/previous', entry)
filehandle.list_append(archivePath, entry)
except IOError:
raise IOError("Error appending song for set_previous")
sys.stdout.write('\t[DONE]\nUpdating database...')
database = filehandle.get_list('text/archive.db')
for entry in database:
|
filehandle.put_list('text/archive.db', database)
sys.stdout.write('\t[DONE]\nTweeting setlist...')
twit.tweet_setlist()
sys.stdout.write('\t[DONE]\n')
outputString = "Current setlist copied over as previous set."
return outputString
# Takes 2 songs, finds the first song in the set, replaces it with the second
# Passes on I/O Exceptions
# Most other errors will be returned in outputMsg to be sent through IRC.
def replace_song(argument):
compared = False
songs = argument.split(', ')
length = len(songs)
if length != 2:
print("Invalid replacement arguments")
outputMsg = "ERROR: Invalid number of arguments."
return outputMsg
else:
try:
setlist = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error getting setlist for replace_song")
compareSong = songs[0]
compareSong = acronym_replace(compareSong)
replaceSong = songs[1]
replaceSong = acronym_replace(replaceSong)
sys.stdout.write('Replacing %s with %s' % (compareSong, replaceSong))
try:
songIndex = setlist.index(compareSong)
setlist[songIndex] = replaceSong
compared = True
except:
print('Replace Error: Song not found')
outputMsg = "ERROR: Could not find the song to replace."
return outputMsg
if compared:
try:
filehandle.put_list('text/setlist', setlist)
except IOError:
raise IOError("Error writing set for replace_song")
sys.stdout.write('\t[DONE]\n')
outputMsg = "Song has been replaced"
return outputMsg
# Nearly identical to replace_song. I will fix that some day
# Takes two songs, finds if the first song is in the set
# and inserts the second song before it
def insert_song(argument):
compared = False
songs = argument.split(', ')
length = len(songs)
if length != 2:
print("Invalid replacement arguments")
return "ERROR: Invalid number of arguments."
else:
try:
setlist = filehandle.get_list('text/setlist')
except IOError:
raise IOError("Error getting setlist for insert_song")
insertSong = songs[0]
insertSong = acronym_replace(insertSong)
compareSong = songs[1]
compareSong = acronym_replace(compareSong)
sys.stdout.write('Inserting %s before %s' % (insertSong, compareSong))
try:
songIndex = setlist.index(compareSong)
setlist.insert(songIndex, insertSong)
compared = True
except:
print('Compare Error: Song not found')
return "ERROR: Could not find the song to insert before."
if compared:
try:
filehandle.put_list('text/setlist', setlist)
except IOError:
raise IOError("Error writing set for insert_song")
| if entry.startswith('c!'):
song = entry.split('!')[1].split('=')[0]
if song in [s.lower() for s in setlist]:
count = int(entry.split('!')[1].split('=')[1])
count = count + 1
database[database.index(entry)] = 'c!%s=%d' % (song, count)
elif entry.startswith('lp!'):
song = entry.split('!')[1].split('::')[0]
if song in [s.lower() for s in setlist]:
database[database.index(entry)] = 'lp!%s::%s' % (song, archivePath) | conditional_block |
xfer-localToFile.go | jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer *pacer) {
// step 1: Get info from transfer.
info := jptm.Info()
u, _ := url.Parse(info.Destination)
fileURL := azfile.NewFileURL(*u, p)
fileSize := int64(info.SourceSize)
chunkSize := int64(info.BlockSize)
// If the given chunk Size for the Job is greater than maximum file chunk size i.e 4 MB
// then chunk size will be 4 MB.
if chunkSize > common.DefaultAzureFileChunkSize {
chunkSize = common.DefaultAzureFileChunkSize
if jptm.ShouldLog(pipeline.LogWarning) {
jptm.Log(pipeline.LogWarning,
fmt.Sprintf("Block size %d larger than maximum file chunk size, 4 MB chunk size used", info.BlockSize))
}
}
if jptm.ShouldLog(pipeline.LogInfo) {
jptm.LogTransferStart(info.Source, info.Destination, fmt.Sprintf("Chunk size %d", chunkSize))
}
// If the transfer was cancelled, then reporting transfer as done and increasing the bytestransferred by the size of the source.
if jptm.WasCanceled() {
jptm.ReportTransferDone()
return
}
// If the force Write flags is set to false
// then check the file exists or not.
// If it does, mark transfer as failed.
if !jptm.IsForceWriteTrue() {
_, err := fileURL.GetProperties(jptm.Context())
if err == nil {
// If the error is nil, then blob exists and it doesn't needs to be uploaded.
jptm.LogUploadError(info.Source, info.Destination, "File already exists ", 0)
// Mark the transfer as failed with FileAlreadyExistsFailure
jptm.SetStatus(common.ETransferStatus.FileAlreadyExistsFailure())
jptm.ReportTransferDone()
return
}
}
// step 2: Map file upload before transferring chunks and get info from map file.
srcFile, err := os.Open(info.Source)
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "File Open Error "+err.Error(), 0)
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
return
}
srcFileInfo, err := srcFile.Stat()
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "File Stat Error "+err.Error(), 0)
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
return
}
byteLength := common.Iffint64(srcFileInfo.Size() > 512, 512, srcFileInfo.Size())
byteBuffer := make([]byte, byteLength)
_, err = srcFile.Read(byteBuffer)
// Get http headers and meta data of file.
fileHTTPHeaders, metaData := jptm.FileDstData(byteBuffer)
// step 3: Create parent directories and file.
// 3a: Create the parent directories of the file. Note share must be existed, as the files are listed from share or directory.
err = createParentDirToRoot(jptm.Context(), fileURL, p)
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "Parent Directory Create Error "+err.Error(), 0)
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
srcFile.Close()
return
}
// 3b: Create Azure file with the source size.
_, err = fileURL.Create(jptm.Context(), fileSize, fileHTTPHeaders, metaData)
if err != nil {
status, msg := ErrorEx{err}.ErrorCodeAndString()
jptm.LogUploadError(info.Source, info.Destination, "File Create Error "+msg, status)
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.SetErrorCode(int32(status))
jptm.ReportTransferDone()
srcFile.Close()
// If the status code was 403, it means there was an authentication error and we exit.
// User can resume the job if completely ordered with a new sas.
if status == http.StatusForbidden {
common.GetLifecycleMgr().Exit(fmt.Sprintf("Authentication Failed. The SAS is not correct or expired or does not have the correct permission %s", err.Error()), 1)
}
return
}
// If the file size is 0, scheduling chunk msgs for UploadRange is not required
if info.SourceSize == 0 {
// mark the transfer as successful
jptm.SetStatus(common.ETransferStatus.Success())
jptm.ReportTransferDone()
return
}
numChunks := uint32(0)
if rem := fileSize % chunkSize; rem == 0 {
numChunks = uint32(fileSize / chunkSize)
} else {
numChunks = uint32(fileSize/chunkSize) + 1
}
jptm.SetNumberOfChunks(numChunks)
// step 4: Scheduling range update to the object created in Step 3
for startIndex := int64(0); startIndex < fileSize; startIndex += chunkSize {
adjustedChunkSize := chunkSize
// compute actual size of the chunk
if startIndex+chunkSize > fileSize {
adjustedChunkSize = fileSize - startIndex
}
// schedule the chunk job/msg
jptm.ScheduleChunks(fileUploadFunc(jptm, srcFile, fileURL, pacer, startIndex, adjustedChunkSize))
}
}
func fileUploadFunc(jptm IJobPartTransferMgr, srcFile *os.File, fileURL azfile.FileURL, pacer *pacer, startRange int64, rangeSize int64) chunkFunc {
info := jptm.Info()
return func(workerId int) {
// rangeDone is the function called after success / failure of each range.
// If the calling range is the last range of transfer, then it updates the transfer status,
// mark transfer done, unmap the source memory map and close the source file descriptor.
rangeDone := func() {
if lastRange, _ := jptm.ReportChunkDone(); lastRange {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug, "Finalizing transfer")
}
jptm.SetStatus(common.ETransferStatus.Success())
err := srcFile.Close()
if err != nil {
jptm.LogError(info.Source, "File Close Error ", err)
}
// If the transfer status is less than or equal to 0
// then transfer was either failed or cancelled
// the file created in share needs to be deleted
if jptm.TransferStatus() <= 0 {
_, err = fileURL.Delete(context.TODO())
if err != nil {
if jptm.ShouldLog(pipeline.LogError) {
jptm.Log(pipeline.LogInfo, fmt.Sprintf("error deleting the file %s. Failed with error %s", fileURL.String(), err.Error()))
}
}
}
jptm.ReportTransferDone()
}
}
srcMMF, err := common.NewMMF(srcFile, false, startRange, rangeSize)
if err != nil {
if err != nil {
if jptm.WasCanceled() {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Failed to UploadRange from %d to %d, transfer was cancelled", startRange, startRange+rangeSize))
}
} else {
status, msg := ErrorEx{err}.ErrorCodeAndString()
jptm.LogUploadError(info.Source, info.Destination, "Upload Range Error "+msg, status)
// cancelling the transfer
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.SetErrorCode(int32(status))
}
rangeDone()
return
}
}
defer srcMMF.Unmap()
if jptm.WasCanceled() {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Range %d not picked, transfer is cancelled", startRange))
}
rangeDone()
} else {
// rangeBytes is the byte slice of Range for the given range.
rangeBytes := srcMMF.Slice()
allBytesZero := true
for index := 0; index < len(rangeBytes); index++ {
if rangeBytes[index] != 0 {
// If one byte is non 0, then we need to perform the PutRange.
allBytesZero = false
break
}
}
// If all the bytes in the rangeBytes is 0, then we do not need to perform the PutRange.
// Updating number of chunks done.
if allBytesZero {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Not uploading range from %d to %d, all bytes are zero", startRange, startRange+rangeSize | ocalToFile( | identifier_name | |
xfer-localToFile.go | )
chunkSize := int64(info.BlockSize)
// If the given chunk Size for the Job is greater than maximum file chunk size i.e 4 MB
// then chunk size will be 4 MB.
if chunkSize > common.DefaultAzureFileChunkSize {
chunkSize = common.DefaultAzureFileChunkSize
if jptm.ShouldLog(pipeline.LogWarning) {
jptm.Log(pipeline.LogWarning,
fmt.Sprintf("Block size %d larger than maximum file chunk size, 4 MB chunk size used", info.BlockSize))
}
}
if jptm.ShouldLog(pipeline.LogInfo) {
jptm.LogTransferStart(info.Source, info.Destination, fmt.Sprintf("Chunk size %d", chunkSize))
}
// If the transfer was cancelled, then reporting transfer as done and increasing the bytestransferred by the size of the source.
if jptm.WasCanceled() {
jptm.ReportTransferDone()
return
}
// If the force Write flags is set to false
// then check the file exists or not.
// If it does, mark transfer as failed.
if !jptm.IsForceWriteTrue() {
_, err := fileURL.GetProperties(jptm.Context())
if err == nil {
// If the error is nil, then blob exists and it doesn't needs to be uploaded.
jptm.LogUploadError(info.Source, info.Destination, "File already exists ", 0)
// Mark the transfer as failed with FileAlreadyExistsFailure
jptm.SetStatus(common.ETransferStatus.FileAlreadyExistsFailure())
jptm.ReportTransferDone()
return
}
}
// step 2: Map file upload before transferring chunks and get info from map file.
srcFile, err := os.Open(info.Source)
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "File Open Error "+err.Error(), 0)
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
return
}
srcFileInfo, err := srcFile.Stat()
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "File Stat Error "+err.Error(), 0)
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
return
}
byteLength := common.Iffint64(srcFileInfo.Size() > 512, 512, srcFileInfo.Size())
byteBuffer := make([]byte, byteLength)
_, err = srcFile.Read(byteBuffer)
// Get http headers and meta data of file.
fileHTTPHeaders, metaData := jptm.FileDstData(byteBuffer)
// step 3: Create parent directories and file.
// 3a: Create the parent directories of the file. Note share must be existed, as the files are listed from share or directory.
err = createParentDirToRoot(jptm.Context(), fileURL, p)
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "Parent Directory Create Error "+err.Error(), 0)
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
srcFile.Close()
return
}
// 3b: Create Azure file with the source size.
_, err = fileURL.Create(jptm.Context(), fileSize, fileHTTPHeaders, metaData)
if err != nil {
status, msg := ErrorEx{err}.ErrorCodeAndString()
jptm.LogUploadError(info.Source, info.Destination, "File Create Error "+msg, status)
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.SetErrorCode(int32(status))
jptm.ReportTransferDone()
srcFile.Close()
// If the status code was 403, it means there was an authentication error and we exit.
// User can resume the job if completely ordered with a new sas.
if status == http.StatusForbidden {
common.GetLifecycleMgr().Exit(fmt.Sprintf("Authentication Failed. The SAS is not correct or expired or does not have the correct permission %s", err.Error()), 1)
}
return
}
// If the file size is 0, scheduling chunk msgs for UploadRange is not required
if info.SourceSize == 0 {
// mark the transfer as successful
jptm.SetStatus(common.ETransferStatus.Success())
jptm.ReportTransferDone()
return
}
numChunks := uint32(0)
if rem := fileSize % chunkSize; rem == 0 {
numChunks = uint32(fileSize / chunkSize)
} else {
numChunks = uint32(fileSize/chunkSize) + 1
}
jptm.SetNumberOfChunks(numChunks)
// step 4: Scheduling range update to the object created in Step 3
for startIndex := int64(0); startIndex < fileSize; startIndex += chunkSize {
adjustedChunkSize := chunkSize
// compute actual size of the chunk
if startIndex+chunkSize > fileSize {
adjustedChunkSize = fileSize - startIndex
}
// schedule the chunk job/msg
jptm.ScheduleChunks(fileUploadFunc(jptm, srcFile, fileURL, pacer, startIndex, adjustedChunkSize))
}
}
func fileUploadFunc(jptm IJobPartTransferMgr, srcFile *os.File, fileURL azfile.FileURL, pacer *pacer, startRange int64, rangeSize int64) chunkFunc {
info := jptm.Info()
return func(workerId int) {
// rangeDone is the function called after success / failure of each range.
// If the calling range is the last range of transfer, then it updates the transfer status,
// mark transfer done, unmap the source memory map and close the source file descriptor.
rangeDone := func() {
if lastRange, _ := jptm.ReportChunkDone(); lastRange {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug, "Finalizing transfer")
}
jptm.SetStatus(common.ETransferStatus.Success())
err := srcFile.Close()
if err != nil {
jptm.LogError(info.Source, "File Close Error ", err)
}
// If the transfer status is less than or equal to 0
// then transfer was either failed or cancelled
// the file created in share needs to be deleted
if jptm.TransferStatus() <= 0 {
_, err = fileURL.Delete(context.TODO())
if err != nil {
if jptm.ShouldLog(pipeline.LogError) {
jptm.Log(pipeline.LogInfo, fmt.Sprintf("error deleting the file %s. Failed with error %s", fileURL.String(), err.Error()))
}
}
}
jptm.ReportTransferDone()
}
}
srcMMF, err := common.NewMMF(srcFile, false, startRange, rangeSize)
if err != nil {
if err != nil {
if jptm.WasCanceled() {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Failed to UploadRange from %d to %d, transfer was cancelled", startRange, startRange+rangeSize))
}
} else {
status, msg := ErrorEx{err}.ErrorCodeAndString()
jptm.LogUploadError(info.Source, info.Destination, "Upload Range Error "+msg, status)
// cancelling the transfer
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.SetErrorCode(int32(status))
}
rangeDone()
return
}
}
defer srcMMF.Unmap()
if jptm.WasCanceled() {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Range %d not picked, transfer is cancelled", startRange))
}
rangeDone()
} else {
// rangeBytes is the byte slice of Range for the given range.
rangeBytes := srcMMF.Slice()
allBytesZero := true
for index := 0; index < len(rangeBytes); index++ {
if rangeBytes[index] != 0 {
// If one byte is non 0, then we need to perform the PutRange.
allBytesZero = false
break
}
}
// If all the bytes in the rangeBytes is 0, then we do not need to perform the PutRange.
// Updating number of chunks done.
if allBytesZero { |
body := newRequestBodyPacer(bytes.NewReader(rangeBytes), pacer, srcMMF)
_, err := fileURL.UploadRange(jptm.Context(), startRange, body)
if err != nil {
if jptm.WasCanceled() {
if jpt |
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Not uploading range from %d to %d, all bytes are zero", startRange, startRange+rangeSize))
}
rangeDone()
return
}
| conditional_block |
xfer-localToFile.go | File.Read(byteBuffer)
// Get http headers and meta data of file.
fileHTTPHeaders, metaData := jptm.FileDstData(byteBuffer)
// step 3: Create parent directories and file.
// 3a: Create the parent directories of the file. Note share must be existed, as the files are listed from share or directory.
err = createParentDirToRoot(jptm.Context(), fileURL, p)
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "Parent Directory Create Error "+err.Error(), 0)
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
srcFile.Close()
return
}
// 3b: Create Azure file with the source size.
_, err = fileURL.Create(jptm.Context(), fileSize, fileHTTPHeaders, metaData)
if err != nil {
status, msg := ErrorEx{err}.ErrorCodeAndString()
jptm.LogUploadError(info.Source, info.Destination, "File Create Error "+msg, status)
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.SetErrorCode(int32(status))
jptm.ReportTransferDone()
srcFile.Close()
// If the status code was 403, it means there was an authentication error and we exit.
// User can resume the job if completely ordered with a new sas.
if status == http.StatusForbidden {
common.GetLifecycleMgr().Exit(fmt.Sprintf("Authentication Failed. The SAS is not correct or expired or does not have the correct permission %s", err.Error()), 1)
}
return
}
// If the file size is 0, scheduling chunk msgs for UploadRange is not required
if info.SourceSize == 0 {
// mark the transfer as successful
jptm.SetStatus(common.ETransferStatus.Success())
jptm.ReportTransferDone()
return
}
numChunks := uint32(0)
if rem := fileSize % chunkSize; rem == 0 {
numChunks = uint32(fileSize / chunkSize)
} else {
numChunks = uint32(fileSize/chunkSize) + 1
}
jptm.SetNumberOfChunks(numChunks)
// step 4: Scheduling range update to the object created in Step 3
for startIndex := int64(0); startIndex < fileSize; startIndex += chunkSize {
adjustedChunkSize := chunkSize
// compute actual size of the chunk
if startIndex+chunkSize > fileSize {
adjustedChunkSize = fileSize - startIndex
}
// schedule the chunk job/msg
jptm.ScheduleChunks(fileUploadFunc(jptm, srcFile, fileURL, pacer, startIndex, adjustedChunkSize))
}
}
func fileUploadFunc(jptm IJobPartTransferMgr, srcFile *os.File, fileURL azfile.FileURL, pacer *pacer, startRange int64, rangeSize int64) chunkFunc {
info := jptm.Info()
return func(workerId int) {
// rangeDone is the function called after success / failure of each range.
// If the calling range is the last range of transfer, then it updates the transfer status,
// mark transfer done, unmap the source memory map and close the source file descriptor.
rangeDone := func() {
if lastRange, _ := jptm.ReportChunkDone(); lastRange {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug, "Finalizing transfer")
}
jptm.SetStatus(common.ETransferStatus.Success())
err := srcFile.Close()
if err != nil {
jptm.LogError(info.Source, "File Close Error ", err)
}
// If the transfer status is less than or equal to 0
// then transfer was either failed or cancelled
// the file created in share needs to be deleted
if jptm.TransferStatus() <= 0 {
_, err = fileURL.Delete(context.TODO())
if err != nil {
if jptm.ShouldLog(pipeline.LogError) {
jptm.Log(pipeline.LogInfo, fmt.Sprintf("error deleting the file %s. Failed with error %s", fileURL.String(), err.Error()))
}
}
}
jptm.ReportTransferDone()
}
}
srcMMF, err := common.NewMMF(srcFile, false, startRange, rangeSize)
if err != nil {
if err != nil {
if jptm.WasCanceled() {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Failed to UploadRange from %d to %d, transfer was cancelled", startRange, startRange+rangeSize))
}
} else {
status, msg := ErrorEx{err}.ErrorCodeAndString()
jptm.LogUploadError(info.Source, info.Destination, "Upload Range Error "+msg, status)
// cancelling the transfer
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.SetErrorCode(int32(status))
}
rangeDone()
return
}
}
defer srcMMF.Unmap()
if jptm.WasCanceled() {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Range %d not picked, transfer is cancelled", startRange))
}
rangeDone()
} else {
// rangeBytes is the byte slice of Range for the given range.
rangeBytes := srcMMF.Slice()
allBytesZero := true
for index := 0; index < len(rangeBytes); index++ {
if rangeBytes[index] != 0 {
// If one byte is non 0, then we need to perform the PutRange.
allBytesZero = false
break
}
}
// If all the bytes in the rangeBytes is 0, then we do not need to perform the PutRange.
// Updating number of chunks done.
if allBytesZero {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Not uploading range from %d to %d, all bytes are zero", startRange, startRange+rangeSize))
}
rangeDone()
return
}
body := newRequestBodyPacer(bytes.NewReader(rangeBytes), pacer, srcMMF)
_, err := fileURL.UploadRange(jptm.Context(), startRange, body)
if err != nil {
if jptm.WasCanceled() {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Failed to UploadRange from %d to %d, transfer was cancelled", startRange, startRange+rangeSize))
}
} else {
status, msg := ErrorEx{err}.ErrorCodeAndString()
jptm.LogUploadError(info.Source, info.Destination, "Upload Range Error "+msg, status)
// cancelling the transfer
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.SetErrorCode(int32(status))
// If the status code was 403, it means there was an authentication error and we exit.
// User can resume the job if completely ordered with a new sas.
if status == http.StatusForbidden {
common.GetLifecycleMgr().Exit(fmt.Sprintf("Authentication Failed. The SAS is not correct or expired or does not have the correct permission %s", err.Error()), 1)
}
}
rangeDone()
return
}
if jptm.ShouldLog(pipeline.LogInfo) {
jptm.Log(pipeline.LogInfo, "UPLOAD SUCCESSFUL")
}
rangeDone()
}
}
}
// getParentDirectoryURL gets parent directory URL of an Azure FileURL.
func getParentDirectoryURL(fileURL azfile.FileURL, p pipeline.Pipeline) azfile.DirectoryURL {
u := fileURL.URL()
u.Path = u.Path[:strings.LastIndex(u.Path, "/")]
return azfile.NewDirectoryURL(u, p)
}
// verifyAndHandleCreateErrors handles create errors, StatusConflict is ignored, as specific level directory could be existing.
// Report http.StatusForbidden, as user should at least have read and write permission of the destination,
// and there is no permission on directory level, i.e. create directory is a general permission for each level diretories for Azure file.
func verifyAndHandleCreateErrors(err error) error {
if err != nil {
sErr := err.(azfile.StorageError)
if sErr != nil && sErr.Response() != nil &&
(sErr.Response().StatusCode == http.StatusConflict) { // Note the ServiceCode actually be AuthenticationFailure when share failed to be created, if want to create share as well.
return nil
}
return err
}
return nil
}
// splitWithoutToken splits string with a given token, and returns splitted results without token.
func splitWithoutToken(str string, token rune) []string { |
return strings.FieldsFunc(str, func(c rune) bool {
return c == token
})
}
| identifier_body | |
xfer-localToFile.go | Size)
chunkSize := int64(info.BlockSize)
// If the given chunk Size for the Job is greater than maximum file chunk size i.e 4 MB
// then chunk size will be 4 MB.
if chunkSize > common.DefaultAzureFileChunkSize {
chunkSize = common.DefaultAzureFileChunkSize
if jptm.ShouldLog(pipeline.LogWarning) {
jptm.Log(pipeline.LogWarning,
fmt.Sprintf("Block size %d larger than maximum file chunk size, 4 MB chunk size used", info.BlockSize))
}
}
if jptm.ShouldLog(pipeline.LogInfo) {
jptm.LogTransferStart(info.Source, info.Destination, fmt.Sprintf("Chunk size %d", chunkSize))
}
// If the transfer was cancelled, then reporting transfer as done and increasing the bytestransferred by the size of the source.
if jptm.WasCanceled() {
jptm.ReportTransferDone()
return
}
// If the force Write flags is set to false
// then check the file exists or not.
// If it does, mark transfer as failed.
if !jptm.IsForceWriteTrue() {
_, err := fileURL.GetProperties(jptm.Context())
if err == nil {
// If the error is nil, then blob exists and it doesn't needs to be uploaded.
jptm.LogUploadError(info.Source, info.Destination, "File already exists ", 0)
// Mark the transfer as failed with FileAlreadyExistsFailure
jptm.SetStatus(common.ETransferStatus.FileAlreadyExistsFailure())
jptm.ReportTransferDone()
return
}
}
// step 2: Map file upload before transferring chunks and get info from map file.
srcFile, err := os.Open(info.Source)
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "File Open Error "+err.Error(), 0)
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
return
}
srcFileInfo, err := srcFile.Stat()
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "File Stat Error "+err.Error(), 0)
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
return
}
byteLength := common.Iffint64(srcFileInfo.Size() > 512, 512, srcFileInfo.Size())
byteBuffer := make([]byte, byteLength)
_, err = srcFile.Read(byteBuffer)
// Get http headers and meta data of file.
fileHTTPHeaders, metaData := jptm.FileDstData(byteBuffer)
// step 3: Create parent directories and file.
// 3a: Create the parent directories of the file. Note share must be existed, as the files are listed from share or directory.
err = createParentDirToRoot(jptm.Context(), fileURL, p)
if err != nil {
jptm.LogUploadError(info.Source, info.Destination, "Parent Directory Create Error "+err.Error(), 0)
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.ReportTransferDone()
srcFile.Close()
return
}
// 3b: Create Azure file with the source size.
_, err = fileURL.Create(jptm.Context(), fileSize, fileHTTPHeaders, metaData)
if err != nil {
status, msg := ErrorEx{err}.ErrorCodeAndString()
jptm.LogUploadError(info.Source, info.Destination, "File Create Error "+msg, status)
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.SetErrorCode(int32(status))
jptm.ReportTransferDone()
srcFile.Close()
// If the status code was 403, it means there was an authentication error and we exit.
// User can resume the job if completely ordered with a new sas.
if status == http.StatusForbidden {
common.GetLifecycleMgr().Exit(fmt.Sprintf("Authentication Failed. The SAS is not correct or expired or does not have the correct permission %s", err.Error()), 1)
}
return
}
// If the file size is 0, scheduling chunk msgs for UploadRange is not required
if info.SourceSize == 0 {
// mark the transfer as successful
jptm.SetStatus(common.ETransferStatus.Success())
jptm.ReportTransferDone()
return
}
numChunks := uint32(0)
if rem := fileSize % chunkSize; rem == 0 {
numChunks = uint32(fileSize / chunkSize)
} else {
numChunks = uint32(fileSize/chunkSize) + 1
}
jptm.SetNumberOfChunks(numChunks)
// step 4: Scheduling range update to the object created in Step 3
for startIndex := int64(0); startIndex < fileSize; startIndex += chunkSize {
adjustedChunkSize := chunkSize
// compute actual size of the chunk
if startIndex+chunkSize > fileSize {
adjustedChunkSize = fileSize - startIndex
}
// schedule the chunk job/msg
jptm.ScheduleChunks(fileUploadFunc(jptm, srcFile, fileURL, pacer, startIndex, adjustedChunkSize))
}
}
func fileUploadFunc(jptm IJobPartTransferMgr, srcFile *os.File, fileURL azfile.FileURL, pacer *pacer, startRange int64, rangeSize int64) chunkFunc {
info := jptm.Info()
return func(workerId int) {
// rangeDone is the function called after success / failure of each range.
// If the calling range is the last range of transfer, then it updates the transfer status,
// mark transfer done, unmap the source memory map and close the source file descriptor.
rangeDone := func() {
if lastRange, _ := jptm.ReportChunkDone(); lastRange {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug, "Finalizing transfer")
}
jptm.SetStatus(common.ETransferStatus.Success())
err := srcFile.Close()
if err != nil {
jptm.LogError(info.Source, "File Close Error ", err)
}
// If the transfer status is less than or equal to 0
// then transfer was either failed or cancelled
// the file created in share needs to be deleted
if jptm.TransferStatus() <= 0 {
_, err = fileURL.Delete(context.TODO())
if err != nil {
if jptm.ShouldLog(pipeline.LogError) {
jptm.Log(pipeline.LogInfo, fmt.Sprintf("error deleting the file %s. Failed with error %s", fileURL.String(), err.Error()))
}
}
}
jptm.ReportTransferDone()
}
}
srcMMF, err := common.NewMMF(srcFile, false, startRange, rangeSize)
if err != nil {
if err != nil {
if jptm.WasCanceled() {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Failed to UploadRange from %d to %d, transfer was cancelled", startRange, startRange+rangeSize))
}
} else {
status, msg := ErrorEx{err}.ErrorCodeAndString()
jptm.LogUploadError(info.Source, info.Destination, "Upload Range Error "+msg, status)
// cancelling the transfer
jptm.Cancel()
jptm.SetStatus(common.ETransferStatus.Failed())
jptm.SetErrorCode(int32(status))
}
rangeDone()
return
}
}
defer srcMMF.Unmap()
if jptm.WasCanceled() {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Range %d not picked, transfer is cancelled", startRange)) | rangeDone()
} else {
// rangeBytes is the byte slice of Range for the given range.
rangeBytes := srcMMF.Slice()
allBytesZero := true
for index := 0; index < len(rangeBytes); index++ {
if rangeBytes[index] != 0 {
// If one byte is non 0, then we need to perform the PutRange.
allBytesZero = false
break
}
}
// If all the bytes in the rangeBytes is 0, then we do not need to perform the PutRange.
// Updating number of chunks done.
if allBytesZero {
if jptm.ShouldLog(pipeline.LogDebug) {
jptm.Log(pipeline.LogDebug,
fmt.Sprintf("Not uploading range from %d to %d, all bytes are zero", startRange, startRange+rangeSize))
}
rangeDone()
return
}
body := newRequestBodyPacer(bytes.NewReader(rangeBytes), pacer, srcMMF)
_, err := fileURL.UploadRange(jptm.Context(), startRange, body)
if err != nil {
if jptm.WasCanceled() {
if jptm | } | random_line_split |
event.go | ARD Code:%d", event.Code, event.Keyboard)
case DeviceMouse:
return fmt.Sprintf("Device: MOUSE Code:%d", event.Code, event.Mouse)
}
return "Device: Unkown"
}
// デバイスの種別を取り扱う列挙型です
type Device int32
// Device型の値
const (
DeviceUnkown Device = iota // 不明なデバイス
DeviceKeyboard // キーボード
DeviceMouse // マウス
DeviceJoypad // ジョイパッド
)
// 動作の種類の列挙型です
type EventCode int32
// EventType型の値
const (
NoEvent EventCode = iota // 何もなかった場合
Unknown // 不明のイベント
MouseLeftDown // 左ボタンを押した
MouseLeftUp // 左ボタン離した
MouseRightDown // 右ボタン押した
MouseRightUp // 右ボタン離した
MouseLeftDragging // 左ボタンを押したまま移動した
MouseRightDragging // 右ボタンを押したまま移動した
MouseLeftDrop // 左ボタンを押したまま移動して離した
MouseRightDrop // 右ボタンを押したたまま移動して離した
MouseMove // ボタンを押さずに移動した
MouseWheelUp // ホイールを上に動かした
MouseWheelDown // ホイールを下に動かした
KeyPressOff // 離した時
KeyPressOn // 押した時
KeyPressRepeat // キーを押し続けている時
)
// キーボードからの入力情報です。
type Keyboard struct {
Keycode Scancode // SDLスキャンコード(物理キーコード)
Repeat uint8 // キーが押しっぱなしなら1(Event.code>0の場合のみ)
}
// スキャンコード(キーのID)
type Scancode sdl.Scancode
// Scancode型の値
const (
K_UNKNOWN sdl.Scancode = sdl.K_UNKNOWN // "" (no name, empty string)
K_RETURN = sdl.K_RETURN // "Return" (the Enter key (main keyboard))
K_ESCAPE = sdl.K_ESCAPE // "Escape" (the Esc key)
K_BACKSPACE = sdl.K_BACKSPACE // "Backspace"
K_TAB = sdl.K_TAB // "Tab" (the Tab key)
K_SPACE = sdl.K_SPACE // "Space" (the Space Bar key(s))
K_EXCLAIM = sdl.K_EXCLAIM // "!"
K_QUOTEDBL = sdl.K_QUOTEDBL // """
K_HASH = sdl.K_HASH // "#"
K_PERCENT = sdl.K_PERCENT // "%"
K_DOLLAR = sdl.K_DOLLAR // "$"
K_AMPERSAND = sdl.K_AMPERSAND // "&"
K_QUOTE = sdl.K_QUOTE // "'"
K_LEFTPAREN = sdl.K_LEFTPAREN // "("
K_RIGHTPAREN = sdl.K_RIGHTPAREN // ")"
K_ASTERISK = sdl.K_ASTERISK // "*"
K_PLUS = sdl.K_PLUS // "+"
K_COMMA = sdl.K_COMMA // ","
K_MINUS = sdl.K_MINUS // "-"
K_PERIOD = sdl.K_PERIOD // "."
K_SLASH = sdl.K_SLASH // "/"
K_0 = sdl.K_0 // "0"
K_1 = sdl.K_1 // "1"
K_2 = sdl.K_2 // "2"
K_3 = sdl.K_3 // "3"
K_4 = sdl.K_4 // "4"
K_5 = sdl.K_5 // "5"
K_6 = sdl.K_6 // "6"
K_7 = sdl.K_7 // "7"
K_8 = sdl.K_8 // "8"
K_9 = sdl.K_9 // "9"
K_COLON = sdl.K_COLON // ":"
K_SEMICOLON = sdl.K_SEMICOLON // ";"
K_LESS = sdl.K_LESS // "<"
K_EQUALS = sdl.K_EQUALS // "="
K_GREATER = sdl.K_GREATER // ">"
K_QUESTION = sdl.K_QUESTION // "?"
K_AT = sdl.K_AT // "@"
/*
Skip uppercase letters
*/
K_LEFTBRACKET = sdl.K_LEFTBRACKET // "["
K_BACKSLASH = sdl.K_BACKSLASH // "\"
K_RIGHTBRACKET = sdl.K_RIGHTBRACKET // "]"
K_CARET = sdl.K_CARET // "^"
K_UNDERSCORE = sdl.K_UNDERSCORE // "_"
K_BACKQUOTE = sdl.K_BACKQUOTE // "`"
K_a = sdl.K_a // "A"
K_b = sdl.K_b // "B"
K_c = sdl.K_c // "C"
K_d = sdl.K_d // "D"
K_e = sdl.K_e // "E"
K_f = sdl.K_f // "F"
K_g = sdl.K_g // "G"
K_h = sdl.K_h // "H"
K_i = sdl.K_i // "I"
K_j = sdl.K_j // "J"
K_k = sdl.K_k // "K"
K_l = sdl.K_l // "L"
K_m = sdl.K_m // "M"
K_n = sdl.K_n // "N"
K_o = sdl.K_o // "O"
K_p = sdl.K_p // "P"
K_q = sdl.K_q // "Q"
K_r = sdl.K_r // "R"
K_s = sdl.K_s // "S"
K_t = sdl.K_t // "T"
K_u = sdl.K_u // "U"
K_v = sdl.K_v // "V"
K_w = sdl.K_w // "W"
K_x = sdl.K_x // "X"
K_y = sdl.K_y // "Y"
K_z = sdl.K_z // "Z"
K_CAPSLOCK = sdl.K_CAPSLOCK // "CapsLock"
K_F1 = sdl.K_F1 // "F1"
K_F2 = sdl.K_F2 // "F2"
K_F3 = sdl.K_F3 // "F3"
K_F4 = sdl.K_F4 // "F4"
K_F5 = sdl.K_F5 // "F5"
K_F6 = sdl.K_F6 // "F6"
K_F7 = sdl.K_F7 // "F7"
K_F8 = sdl.K_F8 // "F8"
K_F9 = sdl.K_F9 // "F9"
K_F10 = sdl.K_F10 // "F10"
K_F11 = sdl.K_F11 // "F11"
K_F12 = sdl.K_F12 // "F12"
K_PRINTSCREEN = sdl.K_PRINTSCREEN // "PrintScreen"
K_SCROLLLOCK = sdl.K_SCROLLLOCK // "ScrollLock"
K_PAUSE = sdl.K_PAUSE // "Pause" (the Pause / Break key)
K_INSERT = sdl.K_INSERT // "Insert" (insert on PC, help on some Mac keyboards (but does send code 73, not 117))
K_HOME = sdl.K_HOME // "Home"
K_PAGEUP = sdl.K_PAGEUP // "PageUp"
K_DELETE = sdl.K_DELETE // "Delete"
K_END = sdl.K_END // "End"
K_PAGEDOWN = sdl.K_PAGEDOWN // "PageDown"
K_RIGHT = sdl.K_RIGHT // "Right" (the Right arrow key (navigation keypad))
K_LEFT = sdl.K_LEFT // "Left" (the Left arrow key (navigation keypad))
K_DOWN = sdl.K_DOWN // "Down" (the Down arrow key (navigation keypad))
K_UP = sdl.K_UP // "Up" (the Up arrow key (navigation keypad))
K_NUMLOCKCLEAR = sdl.K_NUMLOCKCLEAR // "Numlock" (the Num Lock key (PC) / the Clear key (Mac))
K_KP_DIVIDE = sdl.K_KP_DIVIDE // "Keypad /" (the / key (numeric keypad))
K_KP_MULTIPLY = sdl.K_KP_MULTIPLY // "Keypad *" (the * key (numeric keypad))
K_KP_MINUS = sdl.K_KP_MINUS // "Keypad -" (the - key (numeric keypad))
K_KP_PLUS = sdl.K_KP_PLUS // "Keypad +" (the + key (numeric keypad))
K_KP_ENTER = sdl.K_KP_ENTER // "Keypad Enter" (the Enter key (numeric keypad))
K_KP_1 = sdl.K_KP_1 // "Keypad 1" (the 1 key (numeric keypad))
K_KP_2 = sdl.K_KP_2 // "Ke | KEYBO | identifier_name | |
event.go | // 不明なデバイス
DeviceKeyboard // キーボード
DeviceMouse // マウス
DeviceJoypad // ジョイパッド
)
// 動作の種類の列挙型です
type EventCode int32
// EventType型の値
const (
NoEvent EventCode = iota // 何もなかった場合
Unknown // 不明のイベント
MouseLeftDown // 左ボタンを押した
MouseLeftUp // 左ボタン離した
MouseRightDown // 右ボタン押した
MouseRightUp // 右ボタン離した
MouseLeftDragging // 左ボタンを押したまま移動した
MouseRightDragging // 右ボタンを押したまま移動した
MouseLeftDrop // 左ボタンを押したまま移動して離した
MouseRightDrop // 右ボタンを押したたまま移動して離した
MouseMove // ボタンを押さずに移動した
MouseWheelUp // ホイールを上に動かした
MouseWheelDown // ホイールを下に動かした
KeyPressOff // 離した時
KeyPressOn // 押した時
KeyPressRepeat // キーを押し続けている時
)
// キーボードからの入力情報です。
type Keyboard struct {
Keycode Scancode // SDLスキャンコード(物理キーコード)
Repeat uint8 // キーが押しっぱなしなら1(Event.code>0の場合のみ)
}
// スキャンコード(キーのID)
type Scancode sdl.Scancode
// Scancode型の値
const (
K_UNKNOWN sdl.Scancode = sdl.K_UNKNOWN // "" (no name, empty string)
K_RETURN = sdl.K_RETURN // "Return" (the Enter key (main keyboard))
K_ESCAPE = sdl.K_ESCAPE // "Escape" (the Esc key)
K_BACKSPACE = sdl.K_BACKSPACE // "Backspace"
K_TAB = sdl.K_TAB // "Tab" (the Tab key)
K_SPACE = sdl.K_SPACE // "Space" (the Space Bar key(s))
K_EXCLAIM = sdl.K_EXCLAIM // "!"
K_QUOTEDBL = sdl.K_QUOTEDBL // """
K_HASH = sdl.K_HASH // "#"
K_PERCENT = sdl.K_PERCENT // "%"
K_DOLLAR = sdl.K_DOLLAR // "$"
K_AMPERSAND = sdl.K_AMPERSAND // "&"
K_QUOTE = sdl.K_QUOTE // "'"
K_LEFTPAREN = sdl.K_LEFTPAREN // "("
K_RIGHTPAREN = sdl.K_RIGHTPAREN // ")"
K_ASTERISK = sdl.K_ASTERISK // "*"
K_PLUS = sdl.K_PLUS // "+"
K_COMMA = sdl.K_COMMA // ","
K_MINUS = sdl.K_MINUS // "-"
K_PERIOD = sdl.K_PERIOD // "."
K_SLASH = sdl.K_SLASH // "/"
K_0 = sdl.K_0 // "0"
K_1 = sdl.K_1 // "1"
K_2 = sdl.K_2 // "2"
K_3 = sdl.K_3 // "3"
K_4 = sdl.K_4 // "4"
K_5 = sdl.K_5 // "5"
K_6 = sdl.K_6 // "6"
K_7 = sdl.K_7 // "7"
K_8 = sdl.K_8 // "8"
K_9 = sdl.K_9 // "9"
K_COLON = sdl.K_COLON // ":"
K_SEMICOLON = sdl.K_SEMICOLON // ";"
K_LESS = sdl.K_LESS // "<"
K_EQUALS = sdl.K_EQUALS // "="
K_GREATER = sdl.K_GREATER // ">"
K_QUESTION = sdl.K_QUESTION // "?"
K_AT = sdl.K_AT // "@"
/*
Skip uppercase letters
*/
K_LEFTBRACKET = sdl.K_LEFTBRACKET // "["
K_BACKSLASH = sdl.K_BACKSLASH // "\"
K_RIGHTBRACKET = sdl.K_RIGHTBRACKET // "]"
K_CARET = sdl.K_CARET // "^"
K_UNDERSCORE = sdl.K_UNDERSCORE // "_"
K_BACKQUOTE = sdl.K_BACKQUOTE // "`"
K_a = sdl.K_a // "A"
K_b = sdl.K_b // "B"
K_c = sdl.K_c // "C"
K_d = sdl.K_d // "D"
K_e = sdl.K_e // "E"
K_f = sdl.K_f // "F"
K_g = sdl.K_g // "G"
K_h = sdl.K_h // "H"
K_i = sdl.K_i // "I"
K_j = sdl.K_j // "J"
K_k = sdl.K_k // "K"
K_l = sdl.K_l // "L"
K_m = sdl.K_m // "M"
K_n = sdl.K_n // "N"
K_o = sdl.K_o // "O"
K_p = sdl.K_p // "P"
K_q = sdl.K_q // "Q"
K_r = sdl.K_r // "R"
K_s = sdl.K_s // "S"
K_t = sdl.K_t // "T"
K_u = sdl.K_u // "U"
K_v = sdl.K_v // "V"
K_w = sdl.K_w // "W"
K_x = sdl.K_x // "X"
K_y = sdl.K_y // "Y"
K_z = sdl.K_z // "Z"
K_CAPSLOCK = sdl.K_CAPSLOCK // "CapsLock"
K_F1 = sdl.K_F1 // "F1"
K_F2 = sdl.K_F2 // "F2"
K_F3 = sdl.K_F3 // "F3"
K_F4 = sdl.K_F4 // "F4"
K_F5 = sdl.K_F5 // "F5"
K_F6 = sdl.K_F6 // "F6"
K_F7 = sdl.K_F7 // "F7"
K_F8 = sdl.K_F8 // "F8"
K_F9 = sdl.K_F9 // "F9"
K_F10 = sdl.K_F10 // "F10"
K_F11 = sdl.K_F11 // "F11"
K_F12 = sdl.K_F12 // "F12"
K_PRINTSCREEN = sdl.K_PRINTSCREEN // "PrintScreen"
K_SCROLLLOCK = sdl.K_SCROLLLOCK // "ScrollLock"
K_PAUSE = sdl.K_PAUSE // "Pause" (the Pause / Break key)
K_INSERT = sdl.K_INSERT // "Insert" (insert on PC, help on some Mac keyboards (but does send code 73, not 117))
K_HOME = sdl.K_HOME // "Home"
K_PAGEUP = sdl.K_PAGEUP // "PageUp"
K_DELETE = sdl.K_DELETE // "Delete"
K_END = sdl.K_END // "End"
K_PAGEDOWN = sdl.K_PAGEDOWN // "PageDown"
K_RIGHT = sdl.K_RIGHT // "Right" (the Right arrow key (navigation keypad))
K_LEFT = sdl.K_LEFT // "Left" (the Left arrow key (navigation keypad))
K_DOWN = sdl.K_DOWN // "Down" (the Down arrow key (navigation keypad))
K_UP = sdl.K_UP // "Up" (the Up arrow key (navigation keypad))
K_NUMLOCKCLEAR = sdl.K_NUMLOCKCLEAR // "Numlock" (the Num Lock key (PC) / the Clear key (Mac))
K_KP_DIVIDE = sdl.K_KP_DIVIDE // "Keypad /" (the / key (numeric keypad))
K_KP_MULTIPLY = sdl.K_KP_MULTIPLY // "Keypad *" (the * key (numeric keypad))
K_KP_MINUS = sdl.K_KP_MINUS // "Keypad -" (the - key (numeric keypad))
K_KP_PLUS = sdl.K_KP_PLUS // "Keypad +" (the + key (numeric keypad))
K_KP_ENTER = sdl.K_KP_ENTER // "Keypad Enter" (the Enter key (numeric keypad))
K_KP_1 = sdl.K_KP_1 // "Keypad 1" (the 1 key (numeric keypad))
K_KP_2 = sdl.K_KP_2 // "Keypad 2" ( | d", event.Code, event.Keyboard)
case DeviceMouse:
return fmt.Sprintf("Device: MOUSE Code:%d", event.Code, event.Mouse)
}
return "Device: Unkown"
}
// デバイスの種別を取り扱う列挙型です
type Device int32
// Device型の値
const (
DeviceUnkown Device = iota | identifier_body | |
event.go | numeric keypad))
K_APPLICATION = sdl.K_APPLICATION // "Application" (the Application / Compose / Context Menu (Windows) key)
K_POWER = sdl.K_POWER // "Power" (The USB document says this is a status flag, not a physical key - but some Mac keyboards do have a power key.)
K_KP_EQUALS = sdl.K_KP_EQUALS // "Keypad =" (the = key (numeric keypad))
K_F13 = sdl.K_F13 // "F13"
K_F14 = sdl.K_F14 // "F14"
K_F15 = sdl.K_F15 // "F15"
K_F16 = sdl.K_F16 // "F16"
K_F17 = sdl.K_F17 // "F17"
K_F18 = sdl.K_F18 // "F18"
K_F19 = sdl.K_F19 // "F19"
K_F20 = sdl.K_F20 // "F20"
K_F21 = sdl.K_F21 // "F21"
K_F22 = sdl.K_F22 // "F22"
K_F23 = sdl.K_F23 // "F23"
K_F24 = sdl.K_F24 // "F24"
K_EXECUTE = sdl.K_EXECUTE // "Execute"
K_HELP = sdl.K_HELP // "Help"
K_MENU = sdl.K_MENU // "Menu"
K_SELECT = sdl.K_SELECT // "Select"
K_STOP = sdl.K_STOP // "Stop"
K_AGAIN = sdl.K_AGAIN // "Again" (the Again key (Redo))
K_UNDO = sdl.K_UNDO // "Undo"
K_CUT = sdl.K_CUT // "Cut"
K_COPY = sdl.K_COPY // "Copy"
K_PASTE = sdl.K_PASTE // "Paste"
K_FIND = sdl.K_FIND // "Find"
K_MUTE = sdl.K_MUTE // "Mute"
K_VOLUMEUP = sdl.K_VOLUMEUP // "VolumeUp"
K_VOLUMEDOWN = sdl.K_VOLUMEDOWN // "VolumeDown"
K_KP_COMMA = sdl.K_KP_COMMA // "Keypad ," (the Comma key (numeric keypad))
K_KP_EQUALSAS400 = sdl.K_KP_EQUALSAS400 // "Keypad = (AS400)" (the Equals AS400 key (numeric keypad))
K_ALTERASE = sdl.K_ALTERASE // "AltErase" (Erase-Eaze)
K_SYSREQ = sdl.K_SYSREQ // "SysReq" (the SysReq key)
K_CANCEL = sdl.K_CANCEL // "Cancel"
K_CLEAR = sdl.K_CLEAR // "Clear"
K_PRIOR = sdl.K_PRIOR // "Prior"
K_RETURN2 = sdl.K_RETURN2 // "Return"
K_SEPARATOR = sdl.K_SEPARATOR // "Separator"
K_OUT = sdl.K_OUT // "Out"
K_OPER = sdl.K_OPER // "Oper"
K_CLEARAGAIN = sdl.K_CLEARAGAIN // "Clear / Again"
K_CRSEL = sdl.K_CRSEL // "CrSel"
K_EXSEL = sdl.K_EXSEL // "ExSel"
K_KP_00 = sdl.K_KP_00 // "Keypad 00" (the 00 key (numeric keypad))
K_KP_000 = sdl.K_KP_000 // "Keypad 000" (the 000 key (numeric keypad))
K_THOUSANDSSEPARATOR = sdl.K_THOUSANDSSEPARATOR // "ThousandsSeparator" (the Thousands Separator key)
K_DECIMALSEPARATOR = sdl.K_DECIMALSEPARATOR // "DecimalSeparator" (the Decimal Separator key)
K_CURRENCYUNIT = sdl.K_CURRENCYUNIT // "CurrencyUnit" (the Currency Unit key)
K_CURRENCYSUBUNIT = sdl.K_CURRENCYSUBUNIT // "CurrencySubUnit" (the Currency Subunit key)
K_KP_LEFTPAREN = sdl.K_KP_LEFTPAREN // "Keypad (" (the Left Parenthesis key (numeric keypad))
K_KP_RIGHTPAREN = sdl.K_KP_RIGHTPAREN // "Keypad )" (the Right Parenthesis key (numeric keypad))
K_KP_LEFTBRACE = sdl.K_KP_LEFTBRACE // "Keypad {" (the Left Brace key (numeric keypad))
K_KP_RIGHTBRACE = sdl.K_KP_RIGHTBRACE // "Keypad }" (the Right Brace key (numeric keypad))
K_KP_TAB = sdl.K_KP_TAB // "Keypad Tab" (the Tab key (numeric keypad))
K_KP_BACKSPACE = sdl.K_KP_BACKSPACE // "Keypad Backspace" (the Backspace key (numeric keypad))
K_KP_A = sdl.K_KP_A // "Keypad A" (the A key (numeric keypad))
K_KP_B = sdl.K_KP_B // "Keypad B" (the B key (numeric keypad))
K_KP_C = sdl.K_KP_C // "Keypad C" (the C key (numeric keypad))
K_KP_D = sdl.K_KP_D // "Keypad D" (the D key (numeric keypad))
K_KP_E = sdl.K_KP_E // "Keypad E" (the E key (numeric keypad))
K_KP_F = sdl.K_KP_F // "Keypad F" (the F key (numeric keypad))
K_KP_XOR = sdl.K_KP_XOR // "Keypad XOR" (the XOR key (numeric keypad))
K_KP_POWER = sdl.K_KP_POWER // "Keypad ^" (the Power key (numeric keypad))
K_KP_PERCENT = sdl.K_KP_PERCENT // "Keypad %" (the Percent key (numeric keypad))
K_KP_LESS = sdl.K_KP_LESS // "Keypad <" (the Less key (numeric keypad))
K_KP_GREATER = sdl.K_KP_GREATER // "Keypad >" (the Greater key (numeric keypad))
K_KP_AMPERSAND = sdl.K_KP_AMPERSAND // "Keypad &" (the & key (numeric keypad))
K_KP_DBLAMPERSAND = sdl.K_KP_DBLAMPERSAND // "Keypad &&" (the && key (numeric keypad))
K_KP_VERTICALBAR = sdl.K_KP_VERTICALBAR // "Keypad |" (the | key (numeric keypad))
K_KP_DBLVERTICALBAR = sdl.K_KP_DBLVERTICALBAR // "Keypad ||" (the || key (numeric keypad))
K_KP_COLON = sdl.K_KP_COLON // "Keypad :" (the : key (numeric keypad))
K_KP_HASH = sdl.K_KP_HASH // "Keypad #" (the # key (numeric keypad))
K_KP_SPACE = sdl.K_KP_SPACE // "Keypad Space" (the Space key (numeric keypad))
K_KP_AT = sdl.K_KP_AT // "Keypad @" (the @ key (numeric keypad))
K_KP_EXCLAM = sdl.K_KP_EXCLAM // "Keypad !" (the ! key (numeric keypad))
K_KP_MEMSTORE = sdl.K_KP_MEMSTORE // "Keypad MemStore" (the Mem Store key (numeric keypad))
K_KP_MEMRECALL = sdl.K_KP_MEMRECALL // "Keypad MemRecall" (the Mem Recall key (numeric keypad))
K_KP_MEMCLEAR = sdl.K_KP_MEMCLEAR // "Keypad MemClear" (the Mem Clear key (numeric keypad))
K_KP_MEMADD = sdl.K_KP_MEMADD // "Keypad MemAdd" (the Mem Add key (numeric keypad))
K_KP_MEMSUBTRACT = sdl.K_KP_MEMSUBTRACT // "Keypad MemSubtract" (the Mem Subtract key (numeric keypad))
K_KP_MEMMULTIPLY = sdl.K_KP_MEMMULTIPLY // "Keypad MemMultiply" (the Mem Multiply key (numeric keypad))
K_KP_MEMDIVIDE = sdl.K_KP_MEMDIVIDE // "Keypad MemDivide" (the Mem Divide key (numeric keypad))
K_KP_PLUSMINUS = sdl.K_KP_PLUSMINUS // "Keypad +/-" (the +/- key (numeric keypad))
K_KP_CLEAR = sdl.K_KP_CLEAR // "Keypad Clear" (the Clear key (numeric keypad))
K_KP_CLEARENTRY = sdl.K_KP_CLEARENTRY // "Keypad ClearEntry" (the Clear Entry key (numeric keypad))
K_KP_BINARY = sdl.K_KP_BINARY // "Keypad Binary" (the Binary key (numeric keypad))
K_KP_OCTAL = sdl.K_KP_OCTAL // "Keypad Octal" (the Octal key (numeric keypad))
K_KP_DECIMAL = sdl.K_KP_DECIMAL // "Keypad Decimal" (the Decimal key (numeric keypad))
K_KP_HEXADECIMAL = sdl.K_KP_HEXADECIMAL // "Keypad Hexadecimal" (the Hexadecimal key (numeric keypad))
K_LCTRL = sdl.K_LCTRL // "Left Ctrl"
K_LSHIFT = sdl.K_LSHIFT // "Left Shift" | K_LALT = sdl.K_LALT // "Left Alt" (alt, option) | random_line_split | |
sort.go | multiWayMerge *multiWayMerge
// spillAction save the Action for spill disk.
spillAction *chunk.SortAndSpillDiskAction
}
// Close implements the Executor Close interface.
func (e *SortExec) Close() error {
for _, container := range e.partitionList {
err := container.Close()
if err != nil {
return err
}
}
e.partitionList = e.partitionList[:0]
if e.rowChunks != nil {
e.memTracker.Consume(-e.rowChunks.GetMemTracker().BytesConsumed())
e.rowChunks = nil
}
e.memTracker = nil
e.diskTracker = nil
e.multiWayMerge = nil
if e.spillAction != nil {
e.spillAction.SetFinished()
}
e.spillAction = nil
return e.Children(0).Close()
}
// Open implements the Executor Open interface.
func (e *SortExec) Open(ctx context.Context) error {
e.fetched = false
e.Idx = 0
// To avoid duplicated initialization for TopNExec.
if e.memTracker == nil {
e.memTracker = memory.NewTracker(e.ID(), -1)
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
e.diskTracker = memory.NewTracker(e.ID(), -1)
e.diskTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.DiskTracker)
}
e.partitionList = e.partitionList[:0]
return e.Children(0).Open(ctx)
}
// Next implements the Executor Next interface.
// Sort constructs the result following these step:
// 1. Read as mush as rows into memory.
// 2. If memory quota is triggered, sort these rows in memory and put them into disk as partition 1, then reset
// the memory quota trigger and return to step 1
// 3. If memory quota is not triggered and child is consumed, sort these rows in memory as partition N.
// 4. Merge sort if the count of partitions is larger than 1. If there is only one partition in step 4, it works
// just like in-memory sort before.
func (e *SortExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if !e.fetched {
e.initCompareFuncs()
e.buildKeyColumns()
err := e.fetchRowChunks(ctx)
if err != nil {
return err
}
e.fetched = true
}
if len(e.partitionList) == 0 {
return nil
}
if len(e.partitionList) > 1 {
if err := e.externalSorting(req); err != nil {
return err
}
} else {
for !req.IsFull() && e.Idx < e.partitionList[0].NumRow() {
row, err := e.partitionList[0].GetSortedRow(e.Idx)
if err != nil {
return err
}
req.AppendRow(row)
e.Idx++
}
}
return nil
}
func (e *SortExec) externalSorting(req *chunk.Chunk) (err error) {
if e.multiWayMerge == nil {
e.multiWayMerge = &multiWayMerge{e.lessRow, e.compressRow, make([]partitionPointer, 0, len(e.partitionList))}
for i := 0; i < len(e.partitionList); i++ {
row, err := e.partitionList[i].GetSortedRow(0)
if err != nil {
return err
}
e.multiWayMerge.elements = append(e.multiWayMerge.elements, partitionPointer{row: row, partitionID: i, consumed: 0})
}
heap.Init(e.multiWayMerge)
}
for !req.IsFull() && e.multiWayMerge.Len() > 0 {
partitionPtr := e.multiWayMerge.elements[0]
req.AppendRow(partitionPtr.row)
partitionPtr.consumed++
if partitionPtr.consumed >= e.partitionList[partitionPtr.partitionID].NumRow() {
heap.Remove(e.multiWayMerge, 0)
continue
}
partitionPtr.row, err = e.partitionList[partitionPtr.partitionID].
GetSortedRow(partitionPtr.consumed)
if err != nil {
return err
}
e.multiWayMerge.elements[0] = partitionPtr
heap.Fix(e.multiWayMerge, 0)
}
return nil
}
func (e *SortExec) fetchRowChunks(ctx context.Context) error {
fields := retTypes(e)
byItemsDesc := make([]bool, len(e.ByItems))
for i, byItem := range e.ByItems {
byItemsDesc[i] = byItem.Desc
}
e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs)
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
if variable.EnableTmpStorageOnOOM.Load() {
e.spillAction = e.rowChunks.ActionSpill()
failpoint.Inject("testSortedRowContainerSpill", func(val failpoint.Value) {
if val.(bool) {
e.spillAction = e.rowChunks.ActionSpillForTest()
defer e.spillAction.WaitForTest()
}
})
e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction)
e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker)
e.rowChunks.GetDiskTracker().SetLabel(memory.LabelForRowChunks)
}
for {
chk := tryNewCacheChunk(e.Children(0))
err := Next(ctx, e.Children(0), chk)
if err != nil {
return err
}
rowCount := chk.NumRows()
if rowCount == 0 {
break
}
if err := e.rowChunks.Add(chk); err != nil {
if errors.Is(err, chunk.ErrCannotAddBecauseSorted) {
e.partitionList = append(e.partitionList, e.rowChunks)
e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs)
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker)
e.rowChunks.GetDiskTracker().SetLabel(memory.LabelForRowChunks)
e.spillAction = e.rowChunks.ActionSpill()
failpoint.Inject("testSortedRowContainerSpill", func(val failpoint.Value) {
if val.(bool) {
e.spillAction = e.rowChunks.ActionSpillForTest()
defer e.spillAction.WaitForTest()
}
})
e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction)
err = e.rowChunks.Add(chk)
}
if err != nil {
return err
}
}
}
failpoint.Inject("SignalCheckpointForSort", func(val failpoint.Value) {
if val.(bool) {
if e.Ctx().GetSessionVars().ConnectionID == 123456 {
e.Ctx().GetSessionVars().MemTracker.NeedKill.Store(true)
}
}
})
if e.rowChunks.NumRow() > 0 {
e.rowChunks.Sort()
e.partitionList = append(e.partitionList, e.rowChunks)
}
return nil
}
func (e *SortExec) initCompareFuncs() {
e.keyCmpFuncs = make([]chunk.CompareFunc, len(e.ByItems))
for i := range e.ByItems {
keyType := e.ByItems[i].Expr.GetType()
e.keyCmpFuncs[i] = chunk.GetCompareFunc(keyType)
}
}
func (e *SortExec) buildKeyColumns() {
e.keyColumns = make([]int, 0, len(e.ByItems))
for _, by := range e.ByItems {
col := by.Expr.(*expression.Column)
e.keyColumns = append(e.keyColumns, col.Index)
}
}
func (e *SortExec) lessRow(rowI, rowJ chunk.Row) bool {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp < 0 {
return true
} else if cmp > 0 {
return false
}
}
return false
}
func (e *SortExec) compressRow(rowI, rowJ chunk.Row) int {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp != 0 {
return cmp
}
}
return 0
}
type partitionPointer struct {
row chunk.Row
partitionID int
| partitionList []*chunk.SortedRowContainer
// multiWayMerge uses multi-way merge for spill disk.
// The multi-way merge algorithm can refer to https://en.wikipedia.org/wiki/K-way_merge_algorithm | random_line_split | |
sort.go | unk) (err error) {
if e.multiWayMerge == nil {
e.multiWayMerge = &multiWayMerge{e.lessRow, e.compressRow, make([]partitionPointer, 0, len(e.partitionList))}
for i := 0; i < len(e.partitionList); i++ {
row, err := e.partitionList[i].GetSortedRow(0)
if err != nil {
return err
}
e.multiWayMerge.elements = append(e.multiWayMerge.elements, partitionPointer{row: row, partitionID: i, consumed: 0})
}
heap.Init(e.multiWayMerge)
}
for !req.IsFull() && e.multiWayMerge.Len() > 0 {
partitionPtr := e.multiWayMerge.elements[0]
req.AppendRow(partitionPtr.row)
partitionPtr.consumed++
if partitionPtr.consumed >= e.partitionList[partitionPtr.partitionID].NumRow() {
heap.Remove(e.multiWayMerge, 0)
continue
}
partitionPtr.row, err = e.partitionList[partitionPtr.partitionID].
GetSortedRow(partitionPtr.consumed)
if err != nil {
return err
}
e.multiWayMerge.elements[0] = partitionPtr
heap.Fix(e.multiWayMerge, 0)
}
return nil
}
func (e *SortExec) fetchRowChunks(ctx context.Context) error {
fields := retTypes(e)
byItemsDesc := make([]bool, len(e.ByItems))
for i, byItem := range e.ByItems {
byItemsDesc[i] = byItem.Desc
}
e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs)
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
if variable.EnableTmpStorageOnOOM.Load() {
e.spillAction = e.rowChunks.ActionSpill()
failpoint.Inject("testSortedRowContainerSpill", func(val failpoint.Value) {
if val.(bool) {
e.spillAction = e.rowChunks.ActionSpillForTest()
defer e.spillAction.WaitForTest()
}
})
e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction)
e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker)
e.rowChunks.GetDiskTracker().SetLabel(memory.LabelForRowChunks)
}
for {
chk := tryNewCacheChunk(e.Children(0))
err := Next(ctx, e.Children(0), chk)
if err != nil {
return err
}
rowCount := chk.NumRows()
if rowCount == 0 {
break
}
if err := e.rowChunks.Add(chk); err != nil {
if errors.Is(err, chunk.ErrCannotAddBecauseSorted) {
e.partitionList = append(e.partitionList, e.rowChunks)
e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs)
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker)
e.rowChunks.GetDiskTracker().SetLabel(memory.LabelForRowChunks)
e.spillAction = e.rowChunks.ActionSpill()
failpoint.Inject("testSortedRowContainerSpill", func(val failpoint.Value) {
if val.(bool) {
e.spillAction = e.rowChunks.ActionSpillForTest()
defer e.spillAction.WaitForTest()
}
})
e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction)
err = e.rowChunks.Add(chk)
}
if err != nil {
return err
}
}
}
failpoint.Inject("SignalCheckpointForSort", func(val failpoint.Value) {
if val.(bool) {
if e.Ctx().GetSessionVars().ConnectionID == 123456 {
e.Ctx().GetSessionVars().MemTracker.NeedKill.Store(true)
}
}
})
if e.rowChunks.NumRow() > 0 {
e.rowChunks.Sort()
e.partitionList = append(e.partitionList, e.rowChunks)
}
return nil
}
func (e *SortExec) initCompareFuncs() {
e.keyCmpFuncs = make([]chunk.CompareFunc, len(e.ByItems))
for i := range e.ByItems {
keyType := e.ByItems[i].Expr.GetType()
e.keyCmpFuncs[i] = chunk.GetCompareFunc(keyType)
}
}
func (e *SortExec) buildKeyColumns() {
e.keyColumns = make([]int, 0, len(e.ByItems))
for _, by := range e.ByItems {
col := by.Expr.(*expression.Column)
e.keyColumns = append(e.keyColumns, col.Index)
}
}
func (e *SortExec) lessRow(rowI, rowJ chunk.Row) bool {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp < 0 {
return true
} else if cmp > 0 {
return false
}
}
return false
}
func (e *SortExec) compressRow(rowI, rowJ chunk.Row) int {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp != 0 {
return cmp
}
}
return 0
}
type partitionPointer struct {
row chunk.Row
partitionID int
consumed int
}
type multiWayMerge struct {
lessRowFunction func(rowI chunk.Row, rowJ chunk.Row) bool
compressRowFunction func(rowI chunk.Row, rowJ chunk.Row) int
elements []partitionPointer
}
func (h *multiWayMerge) Less(i, j int) bool {
rowI := h.elements[i].row
rowJ := h.elements[j].row
return h.lessRowFunction(rowI, rowJ)
}
func (h *multiWayMerge) Len() int {
return len(h.elements)
}
func (*multiWayMerge) Push(interface{}) {
// Should never be called.
}
func (h *multiWayMerge) Pop() interface{} {
h.elements = h.elements[:len(h.elements)-1]
return nil
}
func (h *multiWayMerge) Swap(i, j int) {
h.elements[i], h.elements[j] = h.elements[j], h.elements[i]
}
// TopNExec implements a Top-N algorithm and it is built from a SELECT statement with ORDER BY and LIMIT.
// Instead of sorting all the rows fetched from the table, it keeps the Top-N elements only in a heap to reduce memory usage.
type TopNExec struct {
SortExec
limit *plannercore.PhysicalLimit
totalLimit uint64
// rowChunks is the chunks to store row values.
rowChunks *chunk.List
// rowPointer store the chunk index and row index for each row.
rowPtrs []chunk.RowPtr
chkHeap *topNChunkHeap
}
// topNChunkHeap implements heap.Interface.
type topNChunkHeap struct {
*TopNExec
}
// Less implement heap.Interface, but since we mantains a max heap,
// this function returns true if row i is greater than row j.
func (h *topNChunkHeap) Less(i, j int) bool {
rowI := h.rowChunks.GetRow(h.rowPtrs[i])
rowJ := h.rowChunks.GetRow(h.rowPtrs[j])
return h.greaterRow(rowI, rowJ)
}
func (h *topNChunkHeap) greaterRow(rowI, rowJ chunk.Row) bool {
for i, colIdx := range h.keyColumns {
cmpFunc := h.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if h.ByItems[i].Desc {
cmp = -cmp
}
if cmp > 0 {
return true
} else if cmp < 0 {
return false
}
}
return false
}
func (h *topNChunkHeap) Len() int {
return len(h.rowPtrs)
}
func (*topNChunkHeap) Push(interface{}) {
// Should never be called.
}
func (h *topNChunkHeap) Pop() interface{} {
h.rowPtrs = h.rowPtrs[:len(h.rowPtrs)-1]
// We don't need the popped value, return nil to avoid memory allocation.
return nil
}
func (h *topNChunkHeap) Swap(i, j int) {
h.rowPtrs[i], h.rowPtrs[j] = h.rowPtrs[j], h.rowPtrs[i]
}
// keyColumnsLess is the less function for key columns.
func (e *TopNExec) | keyColumnsLess | identifier_name | |
sort.go | Exec) initCompareFuncs() {
e.keyCmpFuncs = make([]chunk.CompareFunc, len(e.ByItems))
for i := range e.ByItems {
keyType := e.ByItems[i].Expr.GetType()
e.keyCmpFuncs[i] = chunk.GetCompareFunc(keyType)
}
}
func (e *SortExec) buildKeyColumns() {
e.keyColumns = make([]int, 0, len(e.ByItems))
for _, by := range e.ByItems {
col := by.Expr.(*expression.Column)
e.keyColumns = append(e.keyColumns, col.Index)
}
}
func (e *SortExec) lessRow(rowI, rowJ chunk.Row) bool {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp < 0 {
return true
} else if cmp > 0 {
return false
}
}
return false
}
func (e *SortExec) compressRow(rowI, rowJ chunk.Row) int {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp != 0 {
return cmp
}
}
return 0
}
type partitionPointer struct {
row chunk.Row
partitionID int
consumed int
}
type multiWayMerge struct {
lessRowFunction func(rowI chunk.Row, rowJ chunk.Row) bool
compressRowFunction func(rowI chunk.Row, rowJ chunk.Row) int
elements []partitionPointer
}
func (h *multiWayMerge) Less(i, j int) bool {
rowI := h.elements[i].row
rowJ := h.elements[j].row
return h.lessRowFunction(rowI, rowJ)
}
func (h *multiWayMerge) Len() int {
return len(h.elements)
}
func (*multiWayMerge) Push(interface{}) {
// Should never be called.
}
func (h *multiWayMerge) Pop() interface{} {
h.elements = h.elements[:len(h.elements)-1]
return nil
}
func (h *multiWayMerge) Swap(i, j int) {
h.elements[i], h.elements[j] = h.elements[j], h.elements[i]
}
// TopNExec implements a Top-N algorithm and it is built from a SELECT statement with ORDER BY and LIMIT.
// Instead of sorting all the rows fetched from the table, it keeps the Top-N elements only in a heap to reduce memory usage.
type TopNExec struct {
SortExec
limit *plannercore.PhysicalLimit
totalLimit uint64
// rowChunks is the chunks to store row values.
rowChunks *chunk.List
// rowPointer store the chunk index and row index for each row.
rowPtrs []chunk.RowPtr
chkHeap *topNChunkHeap
}
// topNChunkHeap implements heap.Interface.
type topNChunkHeap struct {
*TopNExec
}
// Less implement heap.Interface, but since we mantains a max heap,
// this function returns true if row i is greater than row j.
func (h *topNChunkHeap) Less(i, j int) bool {
rowI := h.rowChunks.GetRow(h.rowPtrs[i])
rowJ := h.rowChunks.GetRow(h.rowPtrs[j])
return h.greaterRow(rowI, rowJ)
}
func (h *topNChunkHeap) greaterRow(rowI, rowJ chunk.Row) bool {
for i, colIdx := range h.keyColumns {
cmpFunc := h.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if h.ByItems[i].Desc {
cmp = -cmp
}
if cmp > 0 {
return true
} else if cmp < 0 {
return false
}
}
return false
}
func (h *topNChunkHeap) Len() int {
return len(h.rowPtrs)
}
func (*topNChunkHeap) Push(interface{}) {
// Should never be called.
}
func (h *topNChunkHeap) Pop() interface{} {
h.rowPtrs = h.rowPtrs[:len(h.rowPtrs)-1]
// We don't need the popped value, return nil to avoid memory allocation.
return nil
}
func (h *topNChunkHeap) Swap(i, j int) {
h.rowPtrs[i], h.rowPtrs[j] = h.rowPtrs[j], h.rowPtrs[i]
}
// keyColumnsLess is the less function for key columns.
func (e *TopNExec) keyColumnsLess(i, j chunk.RowPtr) bool {
rowI := e.rowChunks.GetRow(i)
rowJ := e.rowChunks.GetRow(j)
return e.lessRow(rowI, rowJ)
}
func (e *TopNExec) keyColumnsCompare(i, j chunk.RowPtr) int {
rowI := e.rowChunks.GetRow(i)
rowJ := e.rowChunks.GetRow(j)
return e.compressRow(rowI, rowJ)
}
func (e *TopNExec) initPointers() {
e.rowPtrs = make([]chunk.RowPtr, 0, e.rowChunks.Len())
e.memTracker.Consume(int64(8 * e.rowChunks.Len()))
for chkIdx := 0; chkIdx < e.rowChunks.NumChunks(); chkIdx++ {
rowChk := e.rowChunks.GetChunk(chkIdx)
for rowIdx := 0; rowIdx < rowChk.NumRows(); rowIdx++ {
e.rowPtrs = append(e.rowPtrs, chunk.RowPtr{ChkIdx: uint32(chkIdx), RowIdx: uint32(rowIdx)})
}
}
}
// Open implements the Executor Open interface.
func (e *TopNExec) Open(ctx context.Context) error {
e.memTracker = memory.NewTracker(e.ID(), -1)
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
e.fetched = false
e.Idx = 0
return e.Children(0).Open(ctx)
}
// Next implements the Executor Next interface.
func (e *TopNExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if !e.fetched {
e.totalLimit = e.limit.Offset + e.limit.Count
e.Idx = int(e.limit.Offset)
err := e.loadChunksUntilTotalLimit(ctx)
if err != nil {
return err
}
err = e.executeTopN(ctx)
if err != nil {
return err
}
e.fetched = true
}
if e.Idx >= len(e.rowPtrs) {
return nil
}
if !req.IsFull() {
numToAppend := mathutil.Min(len(e.rowPtrs)-e.Idx, req.RequiredRows()-req.NumRows())
rows := make([]chunk.Row, numToAppend)
for index := 0; index < numToAppend; index++ {
rows[index] = e.rowChunks.GetRow(e.rowPtrs[e.Idx])
e.Idx++
}
req.AppendRows(rows)
}
return nil
}
func (e *TopNExec) loadChunksUntilTotalLimit(ctx context.Context) error {
e.chkHeap = &topNChunkHeap{e}
e.rowChunks = chunk.NewList(retTypes(e), e.InitCap(), e.MaxChunkSize())
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
for uint64(e.rowChunks.Len()) < e.totalLimit {
srcChk := tryNewCacheChunk(e.Children(0))
// adjust required rows by total limit
srcChk.SetRequiredRows(int(e.totalLimit-uint64(e.rowChunks.Len())), e.MaxChunkSize())
err := Next(ctx, e.Children(0), srcChk)
if err != nil {
return err
}
if srcChk.NumRows() == 0 {
break
}
e.rowChunks.Add(srcChk)
}
e.initPointers()
e.initCompareFuncs()
e.buildKeyColumns()
return nil
}
const topNCompactionFactor = 4
func (e *TopNExec) executeTopN(ctx context.Context) error {
heap.Init(e.chkHeap)
for uint64(len(e.rowPtrs)) > e.totalLimit {
// The number of rows we loaded may exceeds total limit, remove greatest rows by Pop.
heap.Pop(e.chkHeap)
}
childRowChk := tryNewCacheChunk(e.Children(0))
for {
err := Next(ctx, e.Children(0), childRowChk)
if err != nil {
return err
}
if childRowChk.NumRows() == 0 {
break
}
err = e.processChildChk(childRowChk)
if err != nil {
return err
}
if e.rowChunks.Len() > len(e.rowPtrs)*topNCompactionFactor | {
err = e.doCompaction()
if err != nil {
return err
}
} | conditional_block | |
sort.go | return e.Children(0).Close()
}
// Open implements the Executor Open interface.
func (e *SortExec) Open(ctx context.Context) error {
e.fetched = false
e.Idx = 0
// To avoid duplicated initialization for TopNExec.
if e.memTracker == nil {
e.memTracker = memory.NewTracker(e.ID(), -1)
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
e.diskTracker = memory.NewTracker(e.ID(), -1)
e.diskTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.DiskTracker)
}
e.partitionList = e.partitionList[:0]
return e.Children(0).Open(ctx)
}
// Next implements the Executor Next interface.
// Sort constructs the result following these step:
// 1. Read as mush as rows into memory.
// 2. If memory quota is triggered, sort these rows in memory and put them into disk as partition 1, then reset
// the memory quota trigger and return to step 1
// 3. If memory quota is not triggered and child is consumed, sort these rows in memory as partition N.
// 4. Merge sort if the count of partitions is larger than 1. If there is only one partition in step 4, it works
// just like in-memory sort before.
func (e *SortExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if !e.fetched {
e.initCompareFuncs()
e.buildKeyColumns()
err := e.fetchRowChunks(ctx)
if err != nil {
return err
}
e.fetched = true
}
if len(e.partitionList) == 0 {
return nil
}
if len(e.partitionList) > 1 {
if err := e.externalSorting(req); err != nil {
return err
}
} else {
for !req.IsFull() && e.Idx < e.partitionList[0].NumRow() {
row, err := e.partitionList[0].GetSortedRow(e.Idx)
if err != nil {
return err
}
req.AppendRow(row)
e.Idx++
}
}
return nil
}
func (e *SortExec) externalSorting(req *chunk.Chunk) (err error) {
if e.multiWayMerge == nil {
e.multiWayMerge = &multiWayMerge{e.lessRow, e.compressRow, make([]partitionPointer, 0, len(e.partitionList))}
for i := 0; i < len(e.partitionList); i++ {
row, err := e.partitionList[i].GetSortedRow(0)
if err != nil {
return err
}
e.multiWayMerge.elements = append(e.multiWayMerge.elements, partitionPointer{row: row, partitionID: i, consumed: 0})
}
heap.Init(e.multiWayMerge)
}
for !req.IsFull() && e.multiWayMerge.Len() > 0 {
partitionPtr := e.multiWayMerge.elements[0]
req.AppendRow(partitionPtr.row)
partitionPtr.consumed++
if partitionPtr.consumed >= e.partitionList[partitionPtr.partitionID].NumRow() {
heap.Remove(e.multiWayMerge, 0)
continue
}
partitionPtr.row, err = e.partitionList[partitionPtr.partitionID].
GetSortedRow(partitionPtr.consumed)
if err != nil {
return err
}
e.multiWayMerge.elements[0] = partitionPtr
heap.Fix(e.multiWayMerge, 0)
}
return nil
}
func (e *SortExec) fetchRowChunks(ctx context.Context) error {
fields := retTypes(e)
byItemsDesc := make([]bool, len(e.ByItems))
for i, byItem := range e.ByItems {
byItemsDesc[i] = byItem.Desc
}
e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs)
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
if variable.EnableTmpStorageOnOOM.Load() {
e.spillAction = e.rowChunks.ActionSpill()
failpoint.Inject("testSortedRowContainerSpill", func(val failpoint.Value) {
if val.(bool) {
e.spillAction = e.rowChunks.ActionSpillForTest()
defer e.spillAction.WaitForTest()
}
})
e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction)
e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker)
e.rowChunks.GetDiskTracker().SetLabel(memory.LabelForRowChunks)
}
for {
chk := tryNewCacheChunk(e.Children(0))
err := Next(ctx, e.Children(0), chk)
if err != nil {
return err
}
rowCount := chk.NumRows()
if rowCount == 0 {
break
}
if err := e.rowChunks.Add(chk); err != nil {
if errors.Is(err, chunk.ErrCannotAddBecauseSorted) {
e.partitionList = append(e.partitionList, e.rowChunks)
e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs)
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker)
e.rowChunks.GetDiskTracker().SetLabel(memory.LabelForRowChunks)
e.spillAction = e.rowChunks.ActionSpill()
failpoint.Inject("testSortedRowContainerSpill", func(val failpoint.Value) {
if val.(bool) {
e.spillAction = e.rowChunks.ActionSpillForTest()
defer e.spillAction.WaitForTest()
}
})
e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction)
err = e.rowChunks.Add(chk)
}
if err != nil {
return err
}
}
}
failpoint.Inject("SignalCheckpointForSort", func(val failpoint.Value) {
if val.(bool) {
if e.Ctx().GetSessionVars().ConnectionID == 123456 {
e.Ctx().GetSessionVars().MemTracker.NeedKill.Store(true)
}
}
})
if e.rowChunks.NumRow() > 0 {
e.rowChunks.Sort()
e.partitionList = append(e.partitionList, e.rowChunks)
}
return nil
}
func (e *SortExec) initCompareFuncs() {
e.keyCmpFuncs = make([]chunk.CompareFunc, len(e.ByItems))
for i := range e.ByItems {
keyType := e.ByItems[i].Expr.GetType()
e.keyCmpFuncs[i] = chunk.GetCompareFunc(keyType)
}
}
func (e *SortExec) buildKeyColumns() {
e.keyColumns = make([]int, 0, len(e.ByItems))
for _, by := range e.ByItems {
col := by.Expr.(*expression.Column)
e.keyColumns = append(e.keyColumns, col.Index)
}
}
func (e *SortExec) lessRow(rowI, rowJ chunk.Row) bool {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp < 0 {
return true
} else if cmp > 0 {
return false
}
}
return false
}
func (e *SortExec) compressRow(rowI, rowJ chunk.Row) int {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp != 0 {
return cmp
}
}
return 0
}
type partitionPointer struct {
row chunk.Row
partitionID int
consumed int
}
type multiWayMerge struct {
lessRowFunction func(rowI chunk.Row, rowJ chunk.Row) bool
compressRowFunction func(rowI chunk.Row, rowJ chunk.Row) int
elements []partitionPointer
}
func (h *multiWayMerge) Less(i, j int) bool {
rowI := h.elements[i].row
rowJ := h.elements[j].row
return h.lessRowFunction | {
for _, container := range e.partitionList {
err := container.Close()
if err != nil {
return err
}
}
e.partitionList = e.partitionList[:0]
if e.rowChunks != nil {
e.memTracker.Consume(-e.rowChunks.GetMemTracker().BytesConsumed())
e.rowChunks = nil
}
e.memTracker = nil
e.diskTracker = nil
e.multiWayMerge = nil
if e.spillAction != nil {
e.spillAction.SetFinished()
}
e.spillAction = nil | identifier_body | |
eth_pubsub.rs | RichHeader, Log};
use sync::{SyncState, Notification};
use client_traits::{BlockChainClient, ChainNotify};
use ethereum_types::H256;
use light::cache::Cache;
use light::client::{LightChainClient, LightChainNotify};
use light::on_demand::OnDemandRequester;
use parity_runtime::Executor;
use parking_lot::{RwLock, Mutex};
use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork};
use types::{
chain_notify::{NewBlocks, ChainRouteType},
ids::BlockId,
encoded,
filter::Filter as EthFilter,
};
type Client = Sink<pubsub::Result>;
/// Eth PubSub implementation.
pub struct EthPubSubClient<C> {
handler: Arc<ChainNotificationHandler<C>>,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
sync_subscribers: Arc<RwLock<Subscribers<Client>>>,
}
impl<C> EthPubSubClient<C>
where
C: 'static + Send + Sync
{
/// adds a sync notification channel to the pubsub client
pub fn add_sync_notifier<F>(&mut self, receiver: Notification<SyncState>, f: F)
where
F: 'static + Fn(SyncState) -> Option<pubsub::PubSubSyncStatus> + Send
{
let weak_handler = Arc::downgrade(&self.handler);
self.handler.executor.spawn(
receiver.for_each(move |state| {
if let Some(status) = f(state) {
if let Some(handler) = weak_handler.upgrade() {
handler.notify_syncing(status);
return Ok(())
}
}
Err(())
})
)
}
}
impl<C> EthPubSubClient<C>
where
C: 'static + Send + Sync {
/// Creates new `EthPubSubClient`.
pub fn new(client: Arc<C>, executor: Executor, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>) -> Self {
let heads_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let logs_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let sync_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let handler = Arc::new(ChainNotificationHandler {
client,
executor,
heads_subscribers: heads_subscribers.clone(),
logs_subscribers: logs_subscribers.clone(),
transactions_subscribers: transactions_subscribers.clone(),
sync_subscribers: sync_subscribers.clone(),
});
let handler2 = Arc::downgrade(&handler);
handler.executor.spawn(pool_receiver
.for_each(move |hashes| {
if let Some(handler2) = handler2.upgrade() {
handler2.notify_new_transactions(&hashes.to_vec());
return Ok(())
}
Err(())
})
);
EthPubSubClient {
handler,
sync_subscribers,
heads_subscribers,
logs_subscribers,
transactions_subscribers,
}
}
/// Returns a chain notification handler.
pub fn handler(&self) -> Weak<ChainNotificationHandler<C>> {
Arc::downgrade(&self.handler)
}
}
impl<S, OD> EthPubSubClient<LightFetch<S, OD>>
where
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
OD: OnDemandRequester + 'static
{
/// Creates a new `EthPubSubClient` for `LightClient`.
pub fn light(
client: Arc<dyn LightChainClient>,
on_demand: Arc<OD>,
sync: Arc<S>,
cache: Arc<Mutex<Cache>>,
executor: Executor,
gas_price_percentile: usize,
pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>
) -> Self {
let fetch = LightFetch {
client,
on_demand,
sync,
cache,
gas_price_percentile,
};
EthPubSubClient::new(Arc::new(fetch), executor, pool_receiver)
}
}
/// PubSub Notification handler.
pub struct ChainNotificationHandler<C> {
client: Arc<C>,
executor: Executor,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
sync_subscribers: Arc<RwLock<Subscribers<Client>>>,
}
impl<C> ChainNotificationHandler<C> {
fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) {
executor.spawn(subscriber
.notify(Ok(result))
.map(|_| ())
.map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e))
);
}
fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) {
for subscriber in self.heads_subscribers.read().values() {
for &(ref header, ref extra_info) in headers {
Self::notify(&self.executor, subscriber, pubsub::Result::Header(Box::new(RichHeader {
inner: header.into(),
extra_info: extra_info.clone(),
})));
}
}
}
fn notify_syncing(&self, sync_status: pubsub::PubSubSyncStatus) {
for subscriber in self.sync_subscribers.read().values() {
Self::notify(&self.executor, subscriber, pubsub::Result::SyncState(sync_status.clone()));
}
}
fn notify_logs<F, T, Ex>(&self, enacted: &[(H256, Ex)], logs: F) where
F: Fn(EthFilter, &Ex) -> T,
Ex: Send,
T: IntoFuture<Item = Vec<Log>, Error = Error>,
T::Future: Send + 'static,
{
for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() {
let logs = futures::future::join_all(enacted
.iter() | logs(filter, ex).into_future()
})
.collect::<Vec<_>>()
);
let limit = filter.limit;
let executor = self.executor.clone();
let subscriber = subscriber.clone();
self.executor.spawn(logs
.map(move |logs| {
let logs = logs.into_iter().flat_map(|log| log).collect();
for log in limit_logs(logs, limit) {
Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log)))
}
})
.map_err(|e| warn!("Unable to fetch latest logs: {:?}", e))
);
}
}
/// Notify all subscribers about new transaction hashes.
fn notify_new_transactions(&self, hashes: &[H256]) {
for subscriber in self.transactions_subscribers.read().values() {
for hash in hashes {
Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash));
}
}
}
}
/// A light client wrapper struct.
pub trait LightClient: Send + Sync {
/// Get a recent block header.
fn block_header(&self, id: BlockId) -> Option<encoded::Header>;
/// Fetch logs.
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>>;
}
impl<S, OD> LightClient for LightFetch<S, OD>
where
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
OD: OnDemandRequester + 'static
{
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
self.client.block_header(id)
}
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> {
Box::new(LightFetch::logs(self, filter)) as BoxFuture<_>
}
}
impl<C: LightClient> LightChainNotify for ChainNotificationHandler<C> {
fn new_headers(&self, enacted: &[H256]) {
let headers = enacted
.iter()
.filter_map(|hash| self.client.block_header(BlockId::Hash(*hash)))
.map(|header| (header, Default::default()))
.collect::<Vec<_>>();
self.notify_heads(&headers);
self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::<Vec<_>>(), |filter, _| self.client.logs(filter))
}
}
impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> {
fn new_blocks(&self, new_blocks: NewBlocks) {
if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { return }
const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed";
let headers = new_blocks.route.route()
.iter()
.filter_map(|&(hash, ref typ)| {
match typ {
| .map(|&(hash, ref ex)| {
let mut filter = filter.clone();
filter.from_block = BlockId::Hash(hash);
filter.to_block = filter.from_block; | random_line_split |
eth_pubsub.rs | RichHeader, Log};
use sync::{SyncState, Notification};
use client_traits::{BlockChainClient, ChainNotify};
use ethereum_types::H256;
use light::cache::Cache;
use light::client::{LightChainClient, LightChainNotify};
use light::on_demand::OnDemandRequester;
use parity_runtime::Executor;
use parking_lot::{RwLock, Mutex};
use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork};
use types::{
chain_notify::{NewBlocks, ChainRouteType},
ids::BlockId,
encoded,
filter::Filter as EthFilter,
};
type Client = Sink<pubsub::Result>;
/// Eth PubSub implementation.
pub struct EthPubSubClient<C> {
handler: Arc<ChainNotificationHandler<C>>,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
sync_subscribers: Arc<RwLock<Subscribers<Client>>>,
}
impl<C> EthPubSubClient<C>
where
C: 'static + Send + Sync
{
/// adds a sync notification channel to the pubsub client
pub fn add_sync_notifier<F>(&mut self, receiver: Notification<SyncState>, f: F)
where
F: 'static + Fn(SyncState) -> Option<pubsub::PubSubSyncStatus> + Send
{
let weak_handler = Arc::downgrade(&self.handler);
self.handler.executor.spawn(
receiver.for_each(move |state| {
if let Some(status) = f(state) |
Err(())
})
)
}
}
impl<C> EthPubSubClient<C>
where
C: 'static + Send + Sync {
/// Creates new `EthPubSubClient`.
pub fn new(client: Arc<C>, executor: Executor, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>) -> Self {
let heads_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let logs_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let sync_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let handler = Arc::new(ChainNotificationHandler {
client,
executor,
heads_subscribers: heads_subscribers.clone(),
logs_subscribers: logs_subscribers.clone(),
transactions_subscribers: transactions_subscribers.clone(),
sync_subscribers: sync_subscribers.clone(),
});
let handler2 = Arc::downgrade(&handler);
handler.executor.spawn(pool_receiver
.for_each(move |hashes| {
if let Some(handler2) = handler2.upgrade() {
handler2.notify_new_transactions(&hashes.to_vec());
return Ok(())
}
Err(())
})
);
EthPubSubClient {
handler,
sync_subscribers,
heads_subscribers,
logs_subscribers,
transactions_subscribers,
}
}
/// Returns a chain notification handler.
pub fn handler(&self) -> Weak<ChainNotificationHandler<C>> {
Arc::downgrade(&self.handler)
}
}
impl<S, OD> EthPubSubClient<LightFetch<S, OD>>
where
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
OD: OnDemandRequester + 'static
{
/// Creates a new `EthPubSubClient` for `LightClient`.
pub fn light(
client: Arc<dyn LightChainClient>,
on_demand: Arc<OD>,
sync: Arc<S>,
cache: Arc<Mutex<Cache>>,
executor: Executor,
gas_price_percentile: usize,
pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>
) -> Self {
let fetch = LightFetch {
client,
on_demand,
sync,
cache,
gas_price_percentile,
};
EthPubSubClient::new(Arc::new(fetch), executor, pool_receiver)
}
}
/// PubSub Notification handler.
pub struct ChainNotificationHandler<C> {
client: Arc<C>,
executor: Executor,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
sync_subscribers: Arc<RwLock<Subscribers<Client>>>,
}
impl<C> ChainNotificationHandler<C> {
fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) {
executor.spawn(subscriber
.notify(Ok(result))
.map(|_| ())
.map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e))
);
}
fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) {
for subscriber in self.heads_subscribers.read().values() {
for &(ref header, ref extra_info) in headers {
Self::notify(&self.executor, subscriber, pubsub::Result::Header(Box::new(RichHeader {
inner: header.into(),
extra_info: extra_info.clone(),
})));
}
}
}
fn notify_syncing(&self, sync_status: pubsub::PubSubSyncStatus) {
for subscriber in self.sync_subscribers.read().values() {
Self::notify(&self.executor, subscriber, pubsub::Result::SyncState(sync_status.clone()));
}
}
fn notify_logs<F, T, Ex>(&self, enacted: &[(H256, Ex)], logs: F) where
F: Fn(EthFilter, &Ex) -> T,
Ex: Send,
T: IntoFuture<Item = Vec<Log>, Error = Error>,
T::Future: Send + 'static,
{
for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() {
let logs = futures::future::join_all(enacted
.iter()
.map(|&(hash, ref ex)| {
let mut filter = filter.clone();
filter.from_block = BlockId::Hash(hash);
filter.to_block = filter.from_block;
logs(filter, ex).into_future()
})
.collect::<Vec<_>>()
);
let limit = filter.limit;
let executor = self.executor.clone();
let subscriber = subscriber.clone();
self.executor.spawn(logs
.map(move |logs| {
let logs = logs.into_iter().flat_map(|log| log).collect();
for log in limit_logs(logs, limit) {
Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log)))
}
})
.map_err(|e| warn!("Unable to fetch latest logs: {:?}", e))
);
}
}
/// Notify all subscribers about new transaction hashes.
fn notify_new_transactions(&self, hashes: &[H256]) {
for subscriber in self.transactions_subscribers.read().values() {
for hash in hashes {
Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash));
}
}
}
}
/// A light client wrapper struct.
pub trait LightClient: Send + Sync {
/// Get a recent block header.
fn block_header(&self, id: BlockId) -> Option<encoded::Header>;
/// Fetch logs.
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>>;
}
impl<S, OD> LightClient for LightFetch<S, OD>
where
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
OD: OnDemandRequester + 'static
{
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
self.client.block_header(id)
}
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> {
Box::new(LightFetch::logs(self, filter)) as BoxFuture<_>
}
}
impl<C: LightClient> LightChainNotify for ChainNotificationHandler<C> {
fn new_headers(&self, enacted: &[H256]) {
let headers = enacted
.iter()
.filter_map(|hash| self.client.block_header(BlockId::Hash(*hash)))
.map(|header| (header, Default::default()))
.collect::<Vec<_>>();
self.notify_heads(&headers);
self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::<Vec<_>>(), |filter, _| self.client.logs(filter))
}
}
impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> {
fn new_blocks(&self, new_blocks: NewBlocks) {
if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { return }
const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed";
let headers = new_blocks.route.route()
.iter()
.filter_map(|&(hash, ref typ)| {
match | {
if let Some(handler) = weak_handler.upgrade() {
handler.notify_syncing(status);
return Ok(())
}
} | conditional_block |
eth_pubsub.rs | RichHeader, Log};
use sync::{SyncState, Notification};
use client_traits::{BlockChainClient, ChainNotify};
use ethereum_types::H256;
use light::cache::Cache;
use light::client::{LightChainClient, LightChainNotify};
use light::on_demand::OnDemandRequester;
use parity_runtime::Executor;
use parking_lot::{RwLock, Mutex};
use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork};
use types::{
chain_notify::{NewBlocks, ChainRouteType},
ids::BlockId,
encoded,
filter::Filter as EthFilter,
};
type Client = Sink<pubsub::Result>;
/// Eth PubSub implementation.
pub struct EthPubSubClient<C> {
handler: Arc<ChainNotificationHandler<C>>,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
sync_subscribers: Arc<RwLock<Subscribers<Client>>>,
}
impl<C> EthPubSubClient<C>
where
C: 'static + Send + Sync
{
/// adds a sync notification channel to the pubsub client
pub fn add_sync_notifier<F>(&mut self, receiver: Notification<SyncState>, f: F)
where
F: 'static + Fn(SyncState) -> Option<pubsub::PubSubSyncStatus> + Send
{
let weak_handler = Arc::downgrade(&self.handler);
self.handler.executor.spawn(
receiver.for_each(move |state| {
if let Some(status) = f(state) {
if let Some(handler) = weak_handler.upgrade() {
handler.notify_syncing(status);
return Ok(())
}
}
Err(())
})
)
}
}
impl<C> EthPubSubClient<C>
where
C: 'static + Send + Sync {
/// Creates new `EthPubSubClient`.
pub fn new(client: Arc<C>, executor: Executor, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>) -> Self {
let heads_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let logs_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let sync_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let handler = Arc::new(ChainNotificationHandler {
client,
executor,
heads_subscribers: heads_subscribers.clone(),
logs_subscribers: logs_subscribers.clone(),
transactions_subscribers: transactions_subscribers.clone(),
sync_subscribers: sync_subscribers.clone(),
});
let handler2 = Arc::downgrade(&handler);
handler.executor.spawn(pool_receiver
.for_each(move |hashes| {
if let Some(handler2) = handler2.upgrade() {
handler2.notify_new_transactions(&hashes.to_vec());
return Ok(())
}
Err(())
})
);
EthPubSubClient {
handler,
sync_subscribers,
heads_subscribers,
logs_subscribers,
transactions_subscribers,
}
}
/// Returns a chain notification handler.
pub fn handler(&self) -> Weak<ChainNotificationHandler<C>> {
Arc::downgrade(&self.handler)
}
}
impl<S, OD> EthPubSubClient<LightFetch<S, OD>>
where
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
OD: OnDemandRequester + 'static
{
/// Creates a new `EthPubSubClient` for `LightClient`.
pub fn light(
client: Arc<dyn LightChainClient>,
on_demand: Arc<OD>,
sync: Arc<S>,
cache: Arc<Mutex<Cache>>,
executor: Executor,
gas_price_percentile: usize,
pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>
) -> Self {
let fetch = LightFetch {
client,
on_demand,
sync,
cache,
gas_price_percentile,
};
EthPubSubClient::new(Arc::new(fetch), executor, pool_receiver)
}
}
/// PubSub Notification handler.
pub struct ChainNotificationHandler<C> {
client: Arc<C>,
executor: Executor,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
sync_subscribers: Arc<RwLock<Subscribers<Client>>>,
}
impl<C> ChainNotificationHandler<C> {
fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) {
executor.spawn(subscriber
.notify(Ok(result))
.map(|_| ())
.map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e))
);
}
fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) {
for subscriber in self.heads_subscribers.read().values() {
for &(ref header, ref extra_info) in headers {
Self::notify(&self.executor, subscriber, pubsub::Result::Header(Box::new(RichHeader {
inner: header.into(),
extra_info: extra_info.clone(),
})));
}
}
}
fn notify_syncing(&self, sync_status: pubsub::PubSubSyncStatus) {
for subscriber in self.sync_subscribers.read().values() {
Self::notify(&self.executor, subscriber, pubsub::Result::SyncState(sync_status.clone()));
}
}
fn notify_logs<F, T, Ex>(&self, enacted: &[(H256, Ex)], logs: F) where
F: Fn(EthFilter, &Ex) -> T,
Ex: Send,
T: IntoFuture<Item = Vec<Log>, Error = Error>,
T::Future: Send + 'static,
{
for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() {
let logs = futures::future::join_all(enacted
.iter()
.map(|&(hash, ref ex)| {
let mut filter = filter.clone();
filter.from_block = BlockId::Hash(hash);
filter.to_block = filter.from_block;
logs(filter, ex).into_future()
})
.collect::<Vec<_>>()
);
let limit = filter.limit;
let executor = self.executor.clone();
let subscriber = subscriber.clone();
self.executor.spawn(logs
.map(move |logs| {
let logs = logs.into_iter().flat_map(|log| log).collect();
for log in limit_logs(logs, limit) {
Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log)))
}
})
.map_err(|e| warn!("Unable to fetch latest logs: {:?}", e))
);
}
}
/// Notify all subscribers about new transaction hashes.
fn notify_new_transactions(&self, hashes: &[H256]) {
for subscriber in self.transactions_subscribers.read().values() {
for hash in hashes {
Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash));
}
}
}
}
/// A light client wrapper struct.
pub trait LightClient: Send + Sync {
/// Get a recent block header.
fn block_header(&self, id: BlockId) -> Option<encoded::Header>;
/// Fetch logs.
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>>;
}
impl<S, OD> LightClient for LightFetch<S, OD>
where
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
OD: OnDemandRequester + 'static
{
fn | (&self, id: BlockId) -> Option<encoded::Header> {
self.client.block_header(id)
}
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> {
Box::new(LightFetch::logs(self, filter)) as BoxFuture<_>
}
}
impl<C: LightClient> LightChainNotify for ChainNotificationHandler<C> {
fn new_headers(&self, enacted: &[H256]) {
let headers = enacted
.iter()
.filter_map(|hash| self.client.block_header(BlockId::Hash(*hash)))
.map(|header| (header, Default::default()))
.collect::<Vec<_>>();
self.notify_heads(&headers);
self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::<Vec<_>>(), |filter, _| self.client.logs(filter))
}
}
impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> {
fn new_blocks(&self, new_blocks: NewBlocks) {
if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { return }
const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed";
let headers = new_blocks.route.route()
.iter()
.filter_map(|&(hash, ref typ)| {
match typ | block_header | identifier_name |
eth_pubsub.rs | RichHeader, Log};
use sync::{SyncState, Notification};
use client_traits::{BlockChainClient, ChainNotify};
use ethereum_types::H256;
use light::cache::Cache;
use light::client::{LightChainClient, LightChainNotify};
use light::on_demand::OnDemandRequester;
use parity_runtime::Executor;
use parking_lot::{RwLock, Mutex};
use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork};
use types::{
chain_notify::{NewBlocks, ChainRouteType},
ids::BlockId,
encoded,
filter::Filter as EthFilter,
};
type Client = Sink<pubsub::Result>;
/// Eth PubSub implementation.
pub struct EthPubSubClient<C> {
handler: Arc<ChainNotificationHandler<C>>,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
sync_subscribers: Arc<RwLock<Subscribers<Client>>>,
}
impl<C> EthPubSubClient<C>
where
C: 'static + Send + Sync
{
/// adds a sync notification channel to the pubsub client
pub fn add_sync_notifier<F>(&mut self, receiver: Notification<SyncState>, f: F)
where
F: 'static + Fn(SyncState) -> Option<pubsub::PubSubSyncStatus> + Send
{
let weak_handler = Arc::downgrade(&self.handler);
self.handler.executor.spawn(
receiver.for_each(move |state| {
if let Some(status) = f(state) {
if let Some(handler) = weak_handler.upgrade() {
handler.notify_syncing(status);
return Ok(())
}
}
Err(())
})
)
}
}
impl<C> EthPubSubClient<C>
where
C: 'static + Send + Sync {
/// Creates new `EthPubSubClient`.
pub fn new(client: Arc<C>, executor: Executor, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>) -> Self {
let heads_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let logs_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let sync_subscribers = Arc::new(RwLock::new(Subscribers::default()));
let handler = Arc::new(ChainNotificationHandler {
client,
executor,
heads_subscribers: heads_subscribers.clone(),
logs_subscribers: logs_subscribers.clone(),
transactions_subscribers: transactions_subscribers.clone(),
sync_subscribers: sync_subscribers.clone(),
});
let handler2 = Arc::downgrade(&handler);
handler.executor.spawn(pool_receiver
.for_each(move |hashes| {
if let Some(handler2) = handler2.upgrade() {
handler2.notify_new_transactions(&hashes.to_vec());
return Ok(())
}
Err(())
})
);
EthPubSubClient {
handler,
sync_subscribers,
heads_subscribers,
logs_subscribers,
transactions_subscribers,
}
}
/// Returns a chain notification handler.
pub fn handler(&self) -> Weak<ChainNotificationHandler<C>> {
Arc::downgrade(&self.handler)
}
}
impl<S, OD> EthPubSubClient<LightFetch<S, OD>>
where
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
OD: OnDemandRequester + 'static
{
/// Creates a new `EthPubSubClient` for `LightClient`.
pub fn light(
client: Arc<dyn LightChainClient>,
on_demand: Arc<OD>,
sync: Arc<S>,
cache: Arc<Mutex<Cache>>,
executor: Executor,
gas_price_percentile: usize,
pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>
) -> Self {
let fetch = LightFetch {
client,
on_demand,
sync,
cache,
gas_price_percentile,
};
EthPubSubClient::new(Arc::new(fetch), executor, pool_receiver)
}
}
/// PubSub Notification handler.
pub struct ChainNotificationHandler<C> {
client: Arc<C>,
executor: Executor,
heads_subscribers: Arc<RwLock<Subscribers<Client>>>,
logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>,
transactions_subscribers: Arc<RwLock<Subscribers<Client>>>,
sync_subscribers: Arc<RwLock<Subscribers<Client>>>,
}
impl<C> ChainNotificationHandler<C> {
fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) {
executor.spawn(subscriber
.notify(Ok(result))
.map(|_| ())
.map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e))
);
}
fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) {
for subscriber in self.heads_subscribers.read().values() {
for &(ref header, ref extra_info) in headers {
Self::notify(&self.executor, subscriber, pubsub::Result::Header(Box::new(RichHeader {
inner: header.into(),
extra_info: extra_info.clone(),
})));
}
}
}
fn notify_syncing(&self, sync_status: pubsub::PubSubSyncStatus) {
for subscriber in self.sync_subscribers.read().values() {
Self::notify(&self.executor, subscriber, pubsub::Result::SyncState(sync_status.clone()));
}
}
fn notify_logs<F, T, Ex>(&self, enacted: &[(H256, Ex)], logs: F) where
F: Fn(EthFilter, &Ex) -> T,
Ex: Send,
T: IntoFuture<Item = Vec<Log>, Error = Error>,
T::Future: Send + 'static,
| Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log)))
}
})
.map_err(|e| warn!("Unable to fetch latest logs: {:?}", e))
);
}
}
/// Notify all subscribers about new transaction hashes.
fn notify_new_transactions(&self, hashes: &[H256]) {
for subscriber in self.transactions_subscribers.read().values() {
for hash in hashes {
Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash));
}
}
}
}
/// A light client wrapper struct.
pub trait LightClient: Send + Sync {
/// Get a recent block header.
fn block_header(&self, id: BlockId) -> Option<encoded::Header>;
/// Fetch logs.
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>>;
}
impl<S, OD> LightClient for LightFetch<S, OD>
where
S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork + 'static,
OD: OnDemandRequester + 'static
{
fn block_header(&self, id: BlockId) -> Option<encoded::Header> {
self.client.block_header(id)
}
fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> {
Box::new(LightFetch::logs(self, filter)) as BoxFuture<_>
}
}
impl<C: LightClient> LightChainNotify for ChainNotificationHandler<C> {
fn new_headers(&self, enacted: &[H256]) {
let headers = enacted
.iter()
.filter_map(|hash| self.client.block_header(BlockId::Hash(*hash)))
.map(|header| (header, Default::default()))
.collect::<Vec<_>>();
self.notify_heads(&headers);
self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::<Vec<_>>(), |filter, _| self.client.logs(filter))
}
}
impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> {
fn new_blocks(&self, new_blocks: NewBlocks) {
if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { return }
const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed";
let headers = new_blocks.route.route()
.iter()
.filter_map(|&(hash, ref typ)| {
match typ {
| {
for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() {
let logs = futures::future::join_all(enacted
.iter()
.map(|&(hash, ref ex)| {
let mut filter = filter.clone();
filter.from_block = BlockId::Hash(hash);
filter.to_block = filter.from_block;
logs(filter, ex).into_future()
})
.collect::<Vec<_>>()
);
let limit = filter.limit;
let executor = self.executor.clone();
let subscriber = subscriber.clone();
self.executor.spawn(logs
.map(move |logs| {
let logs = logs.into_iter().flat_map(|log| log).collect();
for log in limit_logs(logs, limit) { | identifier_body |
ext_modules.py | language governing permissions and
# limitations under the License.
import errno
import glob
import os
import shutil
import sys
from setuptools import Extension
class BadAutoconfSubstitutes(ValueError):
pass
def _get_ac_subst_bool(true_subst, false_subst):
"""
This function accepts arguments which are supposedly a result
of substitution by the code in the configure script that
was generated for a boolean flag.
It either returns the value of this flag, or fails if the
combination of these "substitution results" is not valid.
Usage example.
- configure.ac:
AM_CONDITIONAL([FLAG], 0])
AM_CONDITIONAL([FLAG2], 1)
- This file (before processing by the configure script):
>>> __get_ac_subst_bool('@FLAG_TRUE@', '@FLAG_FALSE@')
True
>>> __get_ac_subst_bool('@FLAG2_TRUE@', '@FLAG2_FALSE@')
False
>>> __get_ac_subst_bool('@BAD_FLAG_TRUE@', '@BAD_FLAG_FALSE@')
Traceback (most recent call last):
...
BadAutoconfSubstitutes: ...
Inside workings - what is really going on in this example
after running the configure script:
>>> __get_ac_subst_bool('', '#')
>>> True
>>> __get_ac_subst_bool('#', '')
>>> False
>>> __get_ac_subst_bool('@BAD_FLAG_TRUE@', '@BAD_FLAG_FALSE@')
Traceback (most recent call last):
...
BadAutoconfSubstitutes: ...
"""
if true_subst == "" and false_subst == "#":
return True
if true_subst == "#" and false_subst == "":
return False
raise BadAutoconfSubstitutes(
"Bad values substitutes were inserted (or nothing were inserted) "
"into this file by the configure script: %s and %s".format(
true_subst, false_subst
)
)
def cond_multiple_extra_objects(true_subst, false_subst, bundled, system):
"""
Depending on the provided results of substitution by the configure
script, return either `bundled` or `system` objects.
If adding bundled objects, check their existence
and fail if they are missing.
"""
use_bundled = _get_ac_subst_bool(true_subst, false_subst)
if use_bundled:
|
return system
def _cond_extra_object(true_subst, false_subst, bundled, system):
return cond_multiple_extra_objects(true_subst, false_subst,
[bundled], [system])
def _create_module(module_name):
abs_top_srcdir = '/home/hanshenriksande/Master/mesos/build/..'
abs_top_builddir = '/home/hanshenriksande/Master/mesos/build'
ext_src_dir = os.path.join(
'src', 'python', module_name, 'src', 'mesos', module_name)
ext_common_dir = os.path.join(
'src', 'python', 'native_common')
leveldb = os.path.join('3rdparty', 'leveldb-1.19')
zookeeper = os.path.join('3rdparty', 'zookeeper-3.4.8', 'src', 'c')
libprocess = os.path.join('3rdparty', 'libprocess')
# Even though a statically compiled libprocess should include glog,
# libev, gperftools, and protobuf before installation this isn't the
# case, so while a libtool managed build will correctly pull in these
# libraries when building the final result, we need to explicitly
# include them here (or more precisely, down where we actually include
# libev.a and libprofiler.a).
glog = os.path.join('3rdparty', 'glog-0.4.0')
gperftools = os.path.join('3rdparty', 'gperftools-2.5')
protobuf = os.path.join('3rdparty', 'protobuf-3.5.0')
# Build the list of source files. Note that each source must be
# relative to our current directory (where this script lives).
SOURCES = [
os.path.join('src', 'mesos', module_name, file)
for file in os.listdir(os.path.join(abs_top_srcdir, ext_src_dir))
if file.endswith('.cpp')
]
INCLUDE_DIRS = [
os.path.join(abs_top_srcdir, 'include'),
os.path.join(abs_top_builddir, 'include'),
# Needed for the *.pb.h protobuf includes.
os.path.join(abs_top_builddir, 'include', 'mesos'),
os.path.join(abs_top_builddir, 'src'),
os.path.join(abs_top_builddir, ext_src_dir),
os.path.join(abs_top_builddir, ext_common_dir),
os.path.join(abs_top_builddir, protobuf, 'src'),
]
LIBRARY_DIRS = []
EXTRA_OBJECTS = [
os.path.join(abs_top_builddir, 'src', '.libs', 'libmesos_no_3rdparty.a'),
os.path.join(abs_top_builddir, libprocess, '.libs', 'libprocess.a')
]
# For leveldb, we need to check for the presence of libleveldb.a, since
# it is possible to disable leveldb inside mesos.
libleveldb = os.path.join(abs_top_builddir, leveldb, 'out-static', 'libleveldb.a')
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
libleveldb,
'-lleveldb'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, zookeeper, '.libs', 'libzookeeper_mt.a'),
'-lzookeeper_mt'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, glog, '.libs', 'libglog.a'),
'-lglog'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, protobuf, 'src', '.libs', 'libprotobuf.a'),
'-lprotobuf'
)
if '#' == '':
libseccomp = os.path.join('3rdparty', 'libseccomp-2.3.3')
libseccomp = os.path.join(
abs_top_builddir, libseccomp, 'src', '.libs', 'libseccomp.a')
EXTRA_OBJECTS += _cond_extra_object(
"#",
"",
libseccomp,
'-lseccomp'
)
# libev is a special case because it needs to be enabled only when
# libevent *is not* enabled through the top level ./configure.
#
# TODO(hartem): this entire block MUST be removed once libev is deprecated
# in favor of libevent.
if not _get_ac_subst_bool(
'#',
''
):
libev = os.path.join('3rdparty', 'libev-4.22')
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, libev, '.libs', 'libev.a'),
'-lev'
)
else:
libevent_dir = os.path.join('3rdparty', 'libevent-2.0.22-stable')
libevent_dir = os.path.join(abs_top_builddir, libevent_dir, '.libs')
libevent_bundled = [
os.path.join(libevent_dir, 'libevent_core.a'),
os.path.join(libevent_dir, 'libevent_pthreads.a'),
]
libevent_system = ['-levent_core', '-levent_pthreads']
if _get_ac_subst_bool('#', ''):
libevent_bundled.append(
os.path.join(libevent_dir, 'libevent_openssl.a')
)
libevent_system.append('-levent_core')
EXTRA_OBJECTS += cond_multiple_extra_objects(
"#",
"",
libevent_bundled,
libevent_system
)
# For gperftools, we need to check for the presence of libprofiler.a, since
# it is possible to disable perftools inside libprocess.
libprofiler = os.path.join(
abs_top_builddir, gperftools, '.libs', 'libprofiler.a')
if os.path.exists(libprofiler):
EXTRA_OBJECTS.append(libprofiler)
# We link different grpc library variants based on whether SSL is enabled.
grpc = os.path.join('3rdparty', 'grpc-1.10.0')
grpc_variant = '_unsecure' if '#' == '#' else ''
libgrpcpp = os.path.join(abs_top_builddir, grpc, 'libs', 'opt', 'libgrpc++%s.a' % grpc_variant)
libgrpc = os.path.join(abs_top_builddir, grpc, 'libs', 'opt', 'libgrpc%s.a' % grpc_variant)
| for obj in bundled:
if not os.path.exists(obj):
raise RuntimeError("{} does not exist.".format(obj))
return bundled | conditional_block |
ext_modules.py | language governing permissions and
# limitations under the License.
import errno
import glob
import os
import shutil
import sys
from setuptools import Extension
class BadAutoconfSubstitutes(ValueError):
pass
def _get_ac_subst_bool(true_subst, false_subst):
| BadAutoconfSubstitutes: ...
Inside workings - what is really going on in this example
after running the configure script:
>>> __get_ac_subst_bool('', '#')
>>> True
>>> __get_ac_subst_bool('#', '')
>>> False
>>> __get_ac_subst_bool('@BAD_FLAG_TRUE@', '@BAD_FLAG_FALSE@')
Traceback (most recent call last):
...
BadAutoconfSubstitutes: ...
"""
if true_subst == "" and false_subst == "#":
return True
if true_subst == "#" and false_subst == "":
return False
raise BadAutoconfSubstitutes(
"Bad values substitutes were inserted (or nothing were inserted) "
"into this file by the configure script: %s and %s".format(
true_subst, false_subst
)
)
def cond_multiple_extra_objects(true_subst, false_subst, bundled, system):
"""
Depending on the provided results of substitution by the configure
script, return either `bundled` or `system` objects.
If adding bundled objects, check their existence
and fail if they are missing.
"""
use_bundled = _get_ac_subst_bool(true_subst, false_subst)
if use_bundled:
for obj in bundled:
if not os.path.exists(obj):
raise RuntimeError("{} does not exist.".format(obj))
return bundled
return system
def _cond_extra_object(true_subst, false_subst, bundled, system):
return cond_multiple_extra_objects(true_subst, false_subst,
[bundled], [system])
def _create_module(module_name):
abs_top_srcdir = '/home/hanshenriksande/Master/mesos/build/..'
abs_top_builddir = '/home/hanshenriksande/Master/mesos/build'
ext_src_dir = os.path.join(
'src', 'python', module_name, 'src', 'mesos', module_name)
ext_common_dir = os.path.join(
'src', 'python', 'native_common')
leveldb = os.path.join('3rdparty', 'leveldb-1.19')
zookeeper = os.path.join('3rdparty', 'zookeeper-3.4.8', 'src', 'c')
libprocess = os.path.join('3rdparty', 'libprocess')
# Even though a statically compiled libprocess should include glog,
# libev, gperftools, and protobuf before installation this isn't the
# case, so while a libtool managed build will correctly pull in these
# libraries when building the final result, we need to explicitly
# include them here (or more precisely, down where we actually include
# libev.a and libprofiler.a).
glog = os.path.join('3rdparty', 'glog-0.4.0')
gperftools = os.path.join('3rdparty', 'gperftools-2.5')
protobuf = os.path.join('3rdparty', 'protobuf-3.5.0')
# Build the list of source files. Note that each source must be
# relative to our current directory (where this script lives).
SOURCES = [
os.path.join('src', 'mesos', module_name, file)
for file in os.listdir(os.path.join(abs_top_srcdir, ext_src_dir))
if file.endswith('.cpp')
]
INCLUDE_DIRS = [
os.path.join(abs_top_srcdir, 'include'),
os.path.join(abs_top_builddir, 'include'),
# Needed for the *.pb.h protobuf includes.
os.path.join(abs_top_builddir, 'include', 'mesos'),
os.path.join(abs_top_builddir, 'src'),
os.path.join(abs_top_builddir, ext_src_dir),
os.path.join(abs_top_builddir, ext_common_dir),
os.path.join(abs_top_builddir, protobuf, 'src'),
]
LIBRARY_DIRS = []
EXTRA_OBJECTS = [
os.path.join(abs_top_builddir, 'src', '.libs', 'libmesos_no_3rdparty.a'),
os.path.join(abs_top_builddir, libprocess, '.libs', 'libprocess.a')
]
# For leveldb, we need to check for the presence of libleveldb.a, since
# it is possible to disable leveldb inside mesos.
libleveldb = os.path.join(abs_top_builddir, leveldb, 'out-static', 'libleveldb.a')
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
libleveldb,
'-lleveldb'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, zookeeper, '.libs', 'libzookeeper_mt.a'),
'-lzookeeper_mt'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, glog, '.libs', 'libglog.a'),
'-lglog'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, protobuf, 'src', '.libs', 'libprotobuf.a'),
'-lprotobuf'
)
if '#' == '':
libseccomp = os.path.join('3rdparty', 'libseccomp-2.3.3')
libseccomp = os.path.join(
abs_top_builddir, libseccomp, 'src', '.libs', 'libseccomp.a')
EXTRA_OBJECTS += _cond_extra_object(
"#",
"",
libseccomp,
'-lseccomp'
)
# libev is a special case because it needs to be enabled only when
# libevent *is not* enabled through the top level ./configure.
#
# TODO(hartem): this entire block MUST be removed once libev is deprecated
# in favor of libevent.
if not _get_ac_subst_bool(
'#',
''
):
libev = os.path.join('3rdparty', 'libev-4.22')
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, libev, '.libs', 'libev.a'),
'-lev'
)
else:
libevent_dir = os.path.join('3rdparty', 'libevent-2.0.22-stable')
libevent_dir = os.path.join(abs_top_builddir, libevent_dir, '.libs')
libevent_bundled = [
os.path.join(libevent_dir, 'libevent_core.a'),
os.path.join(libevent_dir, 'libevent_pthreads.a'),
]
libevent_system = ['-levent_core', '-levent_pthreads']
if _get_ac_subst_bool('#', ''):
libevent_bundled.append(
os.path.join(libevent_dir, 'libevent_openssl.a')
)
libevent_system.append('-levent_core')
EXTRA_OBJECTS += cond_multiple_extra_objects(
"#",
"",
libevent_bundled,
libevent_system
)
# For gperftools, we need to check for the presence of libprofiler.a, since
# it is possible to disable perftools inside libprocess.
libprofiler = os.path.join(
abs_top_builddir, gperftools, '.libs', 'libprofiler.a')
if os.path.exists(libprofiler):
EXTRA_OBJECTS.append(libprofiler)
# We link different grpc library variants based on whether SSL is enabled.
grpc = os.path.join('3rdparty', 'grpc-1.10.0')
grpc_variant = '_unsecure' if '#' == '#' else ''
libgrpcpp = os.path.join(abs_top_builddir, grpc, 'libs', 'opt', 'libgrpc++%s.a' % grpc_variant)
libgrpc = os.path.join(abs_top_builddir, grpc, 'libs', 'opt', 'libgrpc%s.a' % grpc_variant)
| """
This function accepts arguments which are supposedly a result
of substitution by the code in the configure script that
was generated for a boolean flag.
It either returns the value of this flag, or fails if the
combination of these "substitution results" is not valid.
Usage example.
- configure.ac:
AM_CONDITIONAL([FLAG], 0])
AM_CONDITIONAL([FLAG2], 1)
- This file (before processing by the configure script):
>>> __get_ac_subst_bool('@FLAG_TRUE@', '@FLAG_FALSE@')
True
>>> __get_ac_subst_bool('@FLAG2_TRUE@', '@FLAG2_FALSE@')
False
>>> __get_ac_subst_bool('@BAD_FLAG_TRUE@', '@BAD_FLAG_FALSE@')
Traceback (most recent call last):
... | identifier_body |
ext_modules.py | language governing permissions and
# limitations under the License.
import errno
import glob
import os
import shutil
import sys
from setuptools import Extension
class BadAutoconfSubstitutes(ValueError):
pass
def _get_ac_subst_bool(true_subst, false_subst):
"""
This function accepts arguments which are supposedly a result
of substitution by the code in the configure script that
was generated for a boolean flag.
It either returns the value of this flag, or fails if the
combination of these "substitution results" is not valid.
Usage example.
- configure.ac:
AM_CONDITIONAL([FLAG], 0])
AM_CONDITIONAL([FLAG2], 1)
- This file (before processing by the configure script):
>>> __get_ac_subst_bool('@FLAG_TRUE@', '@FLAG_FALSE@')
True
>>> __get_ac_subst_bool('@FLAG2_TRUE@', '@FLAG2_FALSE@')
False
>>> __get_ac_subst_bool('@BAD_FLAG_TRUE@', '@BAD_FLAG_FALSE@')
Traceback (most recent call last):
...
BadAutoconfSubstitutes: ...
Inside workings - what is really going on in this example
after running the configure script:
>>> __get_ac_subst_bool('', '#')
>>> True
>>> __get_ac_subst_bool('#', '')
>>> False
>>> __get_ac_subst_bool('@BAD_FLAG_TRUE@', '@BAD_FLAG_FALSE@')
Traceback (most recent call last):
...
BadAutoconfSubstitutes: ...
"""
if true_subst == "" and false_subst == "#":
return True
if true_subst == "#" and false_subst == "":
return False
raise BadAutoconfSubstitutes(
"Bad values substitutes were inserted (or nothing were inserted) "
"into this file by the configure script: %s and %s".format(
true_subst, false_subst
)
)
def cond_multiple_extra_objects(true_subst, false_subst, bundled, system):
"""
Depending on the provided results of substitution by the configure
script, return either `bundled` or `system` objects.
If adding bundled objects, check their existence
and fail if they are missing.
"""
use_bundled = _get_ac_subst_bool(true_subst, false_subst)
if use_bundled:
for obj in bundled:
if not os.path.exists(obj):
raise RuntimeError("{} does not exist.".format(obj))
return bundled
return system
def _cond_extra_object(true_subst, false_subst, bundled, system):
return cond_multiple_extra_objects(true_subst, false_subst,
[bundled], [system])
def _create_module(module_name):
abs_top_srcdir = '/home/hanshenriksande/Master/mesos/build/..'
abs_top_builddir = '/home/hanshenriksande/Master/mesos/build'
ext_src_dir = os.path.join(
'src', 'python', module_name, 'src', 'mesos', module_name)
ext_common_dir = os.path.join(
'src', 'python', 'native_common')
leveldb = os.path.join('3rdparty', 'leveldb-1.19')
zookeeper = os.path.join('3rdparty', 'zookeeper-3.4.8', 'src', 'c')
libprocess = os.path.join('3rdparty', 'libprocess')
# Even though a statically compiled libprocess should include glog,
# libev, gperftools, and protobuf before installation this isn't the
# case, so while a libtool managed build will correctly pull in these
# libraries when building the final result, we need to explicitly
# include them here (or more precisely, down where we actually include
# libev.a and libprofiler.a).
glog = os.path.join('3rdparty', 'glog-0.4.0')
gperftools = os.path.join('3rdparty', 'gperftools-2.5')
protobuf = os.path.join('3rdparty', 'protobuf-3.5.0')
# Build the list of source files. Note that each source must be
# relative to our current directory (where this script lives).
SOURCES = [
os.path.join('src', 'mesos', module_name, file)
for file in os.listdir(os.path.join(abs_top_srcdir, ext_src_dir))
if file.endswith('.cpp')
]
INCLUDE_DIRS = [
os.path.join(abs_top_srcdir, 'include'),
os.path.join(abs_top_builddir, 'include'),
# Needed for the *.pb.h protobuf includes.
os.path.join(abs_top_builddir, 'include', 'mesos'),
os.path.join(abs_top_builddir, 'src'),
os.path.join(abs_top_builddir, ext_src_dir),
os.path.join(abs_top_builddir, ext_common_dir),
os.path.join(abs_top_builddir, protobuf, 'src'),
]
LIBRARY_DIRS = []
EXTRA_OBJECTS = [
os.path.join(abs_top_builddir, 'src', '.libs', 'libmesos_no_3rdparty.a'),
os.path.join(abs_top_builddir, libprocess, '.libs', 'libprocess.a')
]
# For leveldb, we need to check for the presence of libleveldb.a, since
# it is possible to disable leveldb inside mesos.
libleveldb = os.path.join(abs_top_builddir, leveldb, 'out-static', 'libleveldb.a')
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
libleveldb,
'-lleveldb'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, zookeeper, '.libs', 'libzookeeper_mt.a'),
'-lzookeeper_mt'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, glog, '.libs', 'libglog.a'),
'-lglog'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, protobuf, 'src', '.libs', 'libprotobuf.a'),
'-lprotobuf'
)
if '#' == '':
libseccomp = os.path.join('3rdparty', 'libseccomp-2.3.3')
libseccomp = os.path.join(
abs_top_builddir, libseccomp, 'src', '.libs', 'libseccomp.a')
EXTRA_OBJECTS += _cond_extra_object(
"#",
"",
libseccomp,
'-lseccomp'
)
# libev is a special case because it needs to be enabled only when
# libevent *is not* enabled through the top level ./configure.
#
# TODO(hartem): this entire block MUST be removed once libev is deprecated
# in favor of libevent.
if not _get_ac_subst_bool(
'#',
''
): | "#",
os.path.join(abs_top_builddir, libev, '.libs', 'libev.a'),
'-lev'
)
else:
libevent_dir = os.path.join('3rdparty', 'libevent-2.0.22-stable')
libevent_dir = os.path.join(abs_top_builddir, libevent_dir, '.libs')
libevent_bundled = [
os.path.join(libevent_dir, 'libevent_core.a'),
os.path.join(libevent_dir, 'libevent_pthreads.a'),
]
libevent_system = ['-levent_core', '-levent_pthreads']
if _get_ac_subst_bool('#', ''):
libevent_bundled.append(
os.path.join(libevent_dir, 'libevent_openssl.a')
)
libevent_system.append('-levent_core')
EXTRA_OBJECTS += cond_multiple_extra_objects(
"#",
"",
libevent_bundled,
libevent_system
)
# For gperftools, we need to check for the presence of libprofiler.a, since
# it is possible to disable perftools inside libprocess.
libprofiler = os.path.join(
abs_top_builddir, gperftools, '.libs', 'libprofiler.a')
if os.path.exists(libprofiler):
EXTRA_OBJECTS.append(libprofiler)
# We link different grpc library variants based on whether SSL is enabled.
grpc = os.path.join('3rdparty', 'grpc-1.10.0')
grpc_variant = '_unsecure' if '#' == '#' else ''
libgrpcpp = os.path.join(abs_top_builddir, grpc, 'libs', 'opt', 'libgrpc++%s.a' % grpc_variant)
libgrpc = os.path.join(abs_top_builddir, grpc, 'libs', 'opt', 'libgrpc%s.a' % grpc_variant)
| libev = os.path.join('3rdparty', 'libev-4.22')
EXTRA_OBJECTS += _cond_extra_object(
"", | random_line_split |
ext_modules.py | language governing permissions and
# limitations under the License.
import errno
import glob
import os
import shutil
import sys
from setuptools import Extension
class BadAutoconfSubstitutes(ValueError):
pass
def _get_ac_subst_bool(true_subst, false_subst):
"""
This function accepts arguments which are supposedly a result
of substitution by the code in the configure script that
was generated for a boolean flag.
It either returns the value of this flag, or fails if the
combination of these "substitution results" is not valid.
Usage example.
- configure.ac:
AM_CONDITIONAL([FLAG], 0])
AM_CONDITIONAL([FLAG2], 1)
- This file (before processing by the configure script):
>>> __get_ac_subst_bool('@FLAG_TRUE@', '@FLAG_FALSE@')
True
>>> __get_ac_subst_bool('@FLAG2_TRUE@', '@FLAG2_FALSE@')
False
>>> __get_ac_subst_bool('@BAD_FLAG_TRUE@', '@BAD_FLAG_FALSE@')
Traceback (most recent call last):
...
BadAutoconfSubstitutes: ...
Inside workings - what is really going on in this example
after running the configure script:
>>> __get_ac_subst_bool('', '#')
>>> True
>>> __get_ac_subst_bool('#', '')
>>> False
>>> __get_ac_subst_bool('@BAD_FLAG_TRUE@', '@BAD_FLAG_FALSE@')
Traceback (most recent call last):
...
BadAutoconfSubstitutes: ...
"""
if true_subst == "" and false_subst == "#":
return True
if true_subst == "#" and false_subst == "":
return False
raise BadAutoconfSubstitutes(
"Bad values substitutes were inserted (or nothing were inserted) "
"into this file by the configure script: %s and %s".format(
true_subst, false_subst
)
)
def cond_multiple_extra_objects(true_subst, false_subst, bundled, system):
"""
Depending on the provided results of substitution by the configure
script, return either `bundled` or `system` objects.
If adding bundled objects, check their existence
and fail if they are missing.
"""
use_bundled = _get_ac_subst_bool(true_subst, false_subst)
if use_bundled:
for obj in bundled:
if not os.path.exists(obj):
raise RuntimeError("{} does not exist.".format(obj))
return bundled
return system
def _cond_extra_object(true_subst, false_subst, bundled, system):
return cond_multiple_extra_objects(true_subst, false_subst,
[bundled], [system])
def | (module_name):
abs_top_srcdir = '/home/hanshenriksande/Master/mesos/build/..'
abs_top_builddir = '/home/hanshenriksande/Master/mesos/build'
ext_src_dir = os.path.join(
'src', 'python', module_name, 'src', 'mesos', module_name)
ext_common_dir = os.path.join(
'src', 'python', 'native_common')
leveldb = os.path.join('3rdparty', 'leveldb-1.19')
zookeeper = os.path.join('3rdparty', 'zookeeper-3.4.8', 'src', 'c')
libprocess = os.path.join('3rdparty', 'libprocess')
# Even though a statically compiled libprocess should include glog,
# libev, gperftools, and protobuf before installation this isn't the
# case, so while a libtool managed build will correctly pull in these
# libraries when building the final result, we need to explicitly
# include them here (or more precisely, down where we actually include
# libev.a and libprofiler.a).
glog = os.path.join('3rdparty', 'glog-0.4.0')
gperftools = os.path.join('3rdparty', 'gperftools-2.5')
protobuf = os.path.join('3rdparty', 'protobuf-3.5.0')
# Build the list of source files. Note that each source must be
# relative to our current directory (where this script lives).
SOURCES = [
os.path.join('src', 'mesos', module_name, file)
for file in os.listdir(os.path.join(abs_top_srcdir, ext_src_dir))
if file.endswith('.cpp')
]
INCLUDE_DIRS = [
os.path.join(abs_top_srcdir, 'include'),
os.path.join(abs_top_builddir, 'include'),
# Needed for the *.pb.h protobuf includes.
os.path.join(abs_top_builddir, 'include', 'mesos'),
os.path.join(abs_top_builddir, 'src'),
os.path.join(abs_top_builddir, ext_src_dir),
os.path.join(abs_top_builddir, ext_common_dir),
os.path.join(abs_top_builddir, protobuf, 'src'),
]
LIBRARY_DIRS = []
EXTRA_OBJECTS = [
os.path.join(abs_top_builddir, 'src', '.libs', 'libmesos_no_3rdparty.a'),
os.path.join(abs_top_builddir, libprocess, '.libs', 'libprocess.a')
]
# For leveldb, we need to check for the presence of libleveldb.a, since
# it is possible to disable leveldb inside mesos.
libleveldb = os.path.join(abs_top_builddir, leveldb, 'out-static', 'libleveldb.a')
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
libleveldb,
'-lleveldb'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, zookeeper, '.libs', 'libzookeeper_mt.a'),
'-lzookeeper_mt'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, glog, '.libs', 'libglog.a'),
'-lglog'
)
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, protobuf, 'src', '.libs', 'libprotobuf.a'),
'-lprotobuf'
)
if '#' == '':
libseccomp = os.path.join('3rdparty', 'libseccomp-2.3.3')
libseccomp = os.path.join(
abs_top_builddir, libseccomp, 'src', '.libs', 'libseccomp.a')
EXTRA_OBJECTS += _cond_extra_object(
"#",
"",
libseccomp,
'-lseccomp'
)
# libev is a special case because it needs to be enabled only when
# libevent *is not* enabled through the top level ./configure.
#
# TODO(hartem): this entire block MUST be removed once libev is deprecated
# in favor of libevent.
if not _get_ac_subst_bool(
'#',
''
):
libev = os.path.join('3rdparty', 'libev-4.22')
EXTRA_OBJECTS += _cond_extra_object(
"",
"#",
os.path.join(abs_top_builddir, libev, '.libs', 'libev.a'),
'-lev'
)
else:
libevent_dir = os.path.join('3rdparty', 'libevent-2.0.22-stable')
libevent_dir = os.path.join(abs_top_builddir, libevent_dir, '.libs')
libevent_bundled = [
os.path.join(libevent_dir, 'libevent_core.a'),
os.path.join(libevent_dir, 'libevent_pthreads.a'),
]
libevent_system = ['-levent_core', '-levent_pthreads']
if _get_ac_subst_bool('#', ''):
libevent_bundled.append(
os.path.join(libevent_dir, 'libevent_openssl.a')
)
libevent_system.append('-levent_core')
EXTRA_OBJECTS += cond_multiple_extra_objects(
"#",
"",
libevent_bundled,
libevent_system
)
# For gperftools, we need to check for the presence of libprofiler.a, since
# it is possible to disable perftools inside libprocess.
libprofiler = os.path.join(
abs_top_builddir, gperftools, '.libs', 'libprofiler.a')
if os.path.exists(libprofiler):
EXTRA_OBJECTS.append(libprofiler)
# We link different grpc library variants based on whether SSL is enabled.
grpc = os.path.join('3rdparty', 'grpc-1.10.0')
grpc_variant = '_unsecure' if '#' == '#' else ''
libgrpcpp = os.path.join(abs_top_builddir, grpc, 'libs', 'opt', 'libgrpc++%s.a' % grpc_variant)
libgrpc = os.path.join(abs_top_builddir, grpc, 'libs', 'opt', 'libgrpc%s.a' % grpc_variant)
| _create_module | identifier_name |
time_client.rs | }: {:?}",
unique_id,
packet
);
out.reserve(packet.encoded_len());
packet
.encode(out)
.expect("Error encoding packet for time request");
}
///Send a time request
///
///Resolve `peer_config.host` using `resolver`. Take keys and cookies
/// from `secret_store`. If they aren't there, run NTS-KE to obtain
/// them. Send a time request over `socket_mutex` and record in
/// `core_state` that it's in flight.
pub async fn send_time_request(
resolver: &trust_dns_resolver::TokioAsyncResolver,
socket: &tokio::net::UdpSocket,
peer_name: &PeerName,
peer_config: &PeerConfig,
core_state: &RwLock<core::CoreState>,
secret_store: &SecretStore,
) -> Result<(), RequestError> {
let ip_addr = resolver
.lookup_ip(peer_config.host.as_str())
.await
.map_err(RequestError::ResolveError)?
.into_iter()
.next()
.expect("Got empty iterator from DNS lookup");
debug!(
"Resolved DNS for peer '{}': {} -> {}",
peer_name, peer_config.host, ip_addr
);
let peer_addr = SocketAddr::new(ip_addr, peer_config.port);
//These two secret_store calls each use separate transactions, so
// it's possible to get a cookie that doesn't correspond to to the
// c2s key if the results of an NTS-KE exchange get committed in
// between the two calls. This can be elicited in testing by
// setting an extremely short polling interval. Preventing this
// would be easy — just add a method to SecretStore that fetches
// both the C2S key and the cookie in a single transaction — but
// it wouldn't actually improve anything because the new S2C key
// will still get committed right afterward and we won't be able
// to decrypt the server's response. The problem is harmless in
// any case because we'll just recover on the next tick. Worst
// that happens is that NTS-KE gets run twice rather than just
// once.
let (c2s, cookie, cookies_left) = match (
secret_store
.get_c2s_key(peer_name)
.map_err(RequestError::C2SLookupError)?,
secret_store
.take_cookie(peer_name)
.map_err(RequestError::CookieLookupError)?,
) {
(Some(c2s), (Some(cookie), cookies_left)) => (c2s, cookie, cookies_left),
_ => {
let tcp_stream = net::TcpStream::connect(&peer_addr)
.await
.map_err(RequestError::TcpError)?;
debug!(
"TCP connection established for NTS-KE with peer '{}'",
peer_name
);
let mut tls_stream = peer_config
.tls_connector
.connect(peer_config.cert_name.as_ref(), tcp_stream)
.await
.map_err(RequestError::TlsHandshakeError)?;
debug!("TLS handshake completed with peer '{}'", peer_name);
let mut ntske_output = ntske::request_ntske(&mut tls_stream)
.await
.map_err(RequestError::TlsSessionError)?
.map_err(RequestError::NtskeProblem)?;
debug!("Successful NTS-KE with peer '{}'", peer_name);
let my_cookie = ntske_output
.cookies
.pop()
.ok_or(RequestError::NtskeNoCookies)?;
let cookies_left = ntske_output.cookies.len();
secret_store
.set_credentials(
peer_name,
&ntske_output.c2s,
&ntske_output.s2c,
ntske_output.cookies.as_slice(),
)
.map_err(RequestError::CredentialSaveError)?;
debug!(
"Stored session keys and {} cookies for peer '{}'",
cookies_left, peer_name
);
(ntske_output.c2s, my_cookie, cookies_left)
}
};
let query = core_state
.write()
.unwrap()
.on_tick(peer_name, &mut rand::thread_rng())
.map_err(RequestError::CoreTickError)?;
let cookies_requested = if cookies_left > 7 {
1
} else {
8 - cookies_left
};
let mut send_buf = Vec::new();
serialize_time_request(
&mut send_buf,
&query.unique_id,
&c2s,
cookie,
cookies_requested,
);
core_state
.write()
.unwrap()
.on_departure(peer_name)
.map_err(RequestError::CoreDepartureError)?;
debug!("Sending time request to peer '{}'", peer_name);
socket
.send_to(send_buf.as_slice(), &peer_addr)
.await
.map_err(RequestError::UdpSocketError)?;
Ok(())
}
///Enumeration of errors that can occur when processing a time response
#[derive(Debug)]
pub enum ResponseError {
DestTimeError(io::Error),
PacketDecodingError(prost::DecodeError),
NotAResponse,
AdDecodingError(prost::DecodeError),
WrongNonceLength,
WrongUniqueIdLength,
UnrecognizedErrorResponse,
NonMatchingUniqueId,
S2CLookupError(PeerName, StoreError),
S2CNotFound(PeerName),
DecryptionFailure(PeerName),
PlaintextDecodingError(PeerName, prost::DecodeError),
WrongEraLength(PeerName),
NoLocalClock(PeerName),
NoGlobalOffset(PeerName),
CoreError(PeerName, io::Error),
StoreCookiesError(PeerName, StoreError),
StoreClearError(PeerName, StoreError),
}
impl ResponseError {
fn level(&self) -> log::Level {
use log::Level::*;
use ResponseError::*;
match self {
DestTimeError(_) => Error,
PacketDecodingError(_) => Debug,
NotAResponse => Debug,
AdDecodingError(_) => Debug,
WrongNonceLength => Debug,
WrongUniqueIdLength => Debug,
UnrecognizedErrorResponse => Debug,
NonMatchingUniqueId => Debug,
S2CLookupError(_, _) => Error,
S2CNotFound(_) => Warn,
DecryptionFailure(_) => Warn,
PlaintextDecodingError(_, _) => Warn,
WrongEraLength(_) => Warn,
NoLocalClock(_) => Warn,
NoGlobalOffset(_) => Warn,
CoreError(_, _) => Error,
StoreCookiesError(_, _) => Error,
StoreClearError(_, _) => Error,
}
}
}
impl fmt::Display for ResponseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ResponseError::*;
match self {
DestTimeError(e) => write!(f, "Getting destination timestamp: {}", e),
PacketDecodingError(e) => write!(f, "Decoding packet: {}", e),
NotAResponse => write!(f, "Not a response packet"),
AdDecodingError(e) => write!(f, "Decoding associated data: {}", e),
WrongNonceLength => write!(f, "Wrong nonce length"),
WrongUniqueIdLength => write!(f, "Wrong unique-ID length"),
UnrecognizedErrorResponse => write!(f, "Unrecognized error response"),
NonMatchingUniqueId => {
write!(f, "Unique-ID does not correspond to any in-flight request")
}
S2CLookupError(peer, e) => write!(f, "Looking up S2C for peer '{}': {}", peer, e),
S2CNotFound(peer) => write!(f, "S2C key not found for peer '{}'", peer),
DecryptionFailure(peer) => write!(f, "Failed to decrypt response from peer '{}'", peer),
PlaintextDecodingError(peer, e) => {
write!(f, "Decoding plaintext send by peer '{}': {}", peer, e)
}
WrongEraLength(peer) => write!(
f,
"Response from peer '{}' has an era of the wrong length",
peer
),
NoLocalClock(peer) => write!(
f,
"Response from peer '{}' is missing its local-clock field",
peer
),
NoGlobalOffset(peer) => write!(
f,
"Response from peer '{}' is missing its global-offset field",
peer
),
CoreError(peer, e) => write!(
f,
"Updating core state for response from peer '{}': {}",
peer, e
),
StoreCookiesError(peer, e) => write!(
f,
"Writing new cookies from peer '{}' to secret store: {}",
peer, e
),
StoreClearError(peer, e) => write!(
f,
"Clearing secret store in response to crypto-NAK from peer '{}': {}",
peer, e
),
}
}
}
impl std::error::Error for ResponseError {}
///Data extracted from a [wire::ResponseEnvelope](../wire/struct.ResponseEnvelope.html)
pub struct ResponseEnvelopeData {
unique_id: core::UniqueId,
nonce: Aes128SivNonce,
ad: Vec<u8>,
ciphertext: Vec<u8>,
}
///Data extracted from a crypto-NAK response
pub struct Cryp | toNakData {
| identifier_name | |
time_client.rs | map_err(RequestError::CoreDepartureError)?;
debug!("Sending time request to peer '{}'", peer_name);
socket
.send_to(send_buf.as_slice(), &peer_addr)
.await
.map_err(RequestError::UdpSocketError)?;
Ok(())
}
///Enumeration of errors that can occur when processing a time response
#[derive(Debug)]
pub enum ResponseError {
DestTimeError(io::Error),
PacketDecodingError(prost::DecodeError),
NotAResponse,
AdDecodingError(prost::DecodeError),
WrongNonceLength,
WrongUniqueIdLength,
UnrecognizedErrorResponse,
NonMatchingUniqueId,
S2CLookupError(PeerName, StoreError),
S2CNotFound(PeerName),
DecryptionFailure(PeerName),
PlaintextDecodingError(PeerName, prost::DecodeError),
WrongEraLength(PeerName),
NoLocalClock(PeerName),
NoGlobalOffset(PeerName),
CoreError(PeerName, io::Error),
StoreCookiesError(PeerName, StoreError),
StoreClearError(PeerName, StoreError),
}
impl ResponseError {
fn level(&self) -> log::Level {
use log::Level::*;
use ResponseError::*;
match self {
DestTimeError(_) => Error,
PacketDecodingError(_) => Debug,
NotAResponse => Debug,
AdDecodingError(_) => Debug,
WrongNonceLength => Debug,
WrongUniqueIdLength => Debug,
UnrecognizedErrorResponse => Debug,
NonMatchingUniqueId => Debug,
S2CLookupError(_, _) => Error,
S2CNotFound(_) => Warn,
DecryptionFailure(_) => Warn,
PlaintextDecodingError(_, _) => Warn,
WrongEraLength(_) => Warn,
NoLocalClock(_) => Warn,
NoGlobalOffset(_) => Warn,
CoreError(_, _) => Error,
StoreCookiesError(_, _) => Error,
StoreClearError(_, _) => Error,
}
}
}
impl fmt::Display for ResponseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ResponseError::*;
match self {
DestTimeError(e) => write!(f, "Getting destination timestamp: {}", e),
PacketDecodingError(e) => write!(f, "Decoding packet: {}", e),
NotAResponse => write!(f, "Not a response packet"),
AdDecodingError(e) => write!(f, "Decoding associated data: {}", e),
WrongNonceLength => write!(f, "Wrong nonce length"),
WrongUniqueIdLength => write!(f, "Wrong unique-ID length"),
UnrecognizedErrorResponse => write!(f, "Unrecognized error response"),
NonMatchingUniqueId => {
write!(f, "Unique-ID does not correspond to any in-flight request")
}
S2CLookupError(peer, e) => write!(f, "Looking up S2C for peer '{}': {}", peer, e),
S2CNotFound(peer) => write!(f, "S2C key not found for peer '{}'", peer),
DecryptionFailure(peer) => write!(f, "Failed to decrypt response from peer '{}'", peer),
PlaintextDecodingError(peer, e) => {
write!(f, "Decoding plaintext send by peer '{}': {}", peer, e)
}
WrongEraLength(peer) => write!(
f,
"Response from peer '{}' has an era of the wrong length",
peer
),
NoLocalClock(peer) => write!(
f,
"Response from peer '{}' is missing its local-clock field",
peer
),
NoGlobalOffset(peer) => write!(
f,
"Response from peer '{}' is missing its global-offset field",
peer
),
CoreError(peer, e) => write!(
f,
"Updating core state for response from peer '{}': {}",
peer, e
),
StoreCookiesError(peer, e) => write!(
f,
"Writing new cookies from peer '{}' to secret store: {}",
peer, e
),
StoreClearError(peer, e) => write!(
f,
"Clearing secret store in response to crypto-NAK from peer '{}': {}",
peer, e
),
}
}
}
impl std::error::Error for ResponseError {}
///Data extracted from a [wire::ResponseEnvelope](../wire/struct.ResponseEnvelope.html)
pub struct ResponseEnvelopeData {
unique_id: core::UniqueId,
nonce: Aes128SivNonce,
ad: Vec<u8>,
ciphertext: Vec<u8>,
}
///Data extracted from a crypto-NAK response
pub struct CryptoNakData {
unique_id: core::UniqueId,
}
///Deserialize a time response as far as the envelope, but don't try to decrypt it
pub fn deserialize_response_envelope<Response: Buf>(
response: Response,
) -> Result<Result<ResponseEnvelopeData, CryptoNakData>, ResponseError> {
let packet = wire::Packet::decode(response).map_err(ResponseError::PacketDecodingError)?;
trace!("Deserialized time response packet: {:?}", packet);
match packet.msg {
Some(wire::packet::Msg::Response(envelope)) => {
let ad = wire::ResponseAd::decode(envelope.ad.as_ref())
.map_err(ResponseError::AdDecodingError)?;
let nonce = Aes128SivNonce::try_clone_from_slice(envelope.nonce.as_slice())
.map_err(|_| ResponseError::WrongNonceLength)?;
let unique_id = core::UniqueId::try_from(ad.unique_id.as_slice())
.map_err(|_| ResponseError::WrongUniqueIdLength)?;
Ok(Ok(ResponseEnvelopeData {
unique_id,
nonce,
ad: envelope.ad,
ciphertext: envelope.ciphertext,
}))
}
Some(wire::packet::Msg::Error(error)) => {
let unique_id = core::UniqueId::try_from(error.unique_id.as_slice())
.map_err(|_| ResponseError::WrongUniqueIdLength)?;
match error.error {
Some(wire::error::Error::CryptoNak(_)) => Ok(Err(CryptoNakData { unique_id })),
_ => Err(ResponseError::UnrecognizedErrorResponse),
}
}
_ => Err(ResponseError::NotAResponse),
}
}
///Deserialize the plaintext of a time response, returning cookies and
/// a [`core::Response`](../core/struct.Response.html).
pub fn deserialize_response_plaintext<Plaintext: Buf>(
peer_name: &PeerName,
unique_id: &core::UniqueId,
plaintext: Plaintext,
) -> Result<(Vec<Vec<u8>>, core::Response), ResponseError> {
let response = wire::Response::decode(plaintext)
.map_err(|e| ResponseError::PlaintextDecodingError(peer_name.clone(), e))?;
trace!("Deserialized time response plaintext: {:?}", response);
let era = Era(<[u8; 16]>::try_from(response.era.as_slice())
.map_err(|_| ResponseError::WrongEraLength(peer_name.clone()))?);
let global_offset = response
.offset
.ok_or_else(|| ResponseError::NoGlobalOffset(peer_name.clone()))?;
let local_clock = response
.local_clock
.ok_or_else(|| ResponseError::NoLocalClock(peer_name.clone()))?;
Ok((
response.cookies,
core::Response {
era,
unique_id: *unique_id,
global_offset: Timestamp::new(
global_offset.seconds as i64,
global_offset.nanoseconds as i64,
),
local_clock: Timestamp::new(local_clock.seconds as i64, local_clock.nanoseconds as i64),
},
))
}
///Process a time response
///
///Deserialize and decrypt the `response` using `secret_store` to look up keys.
/// Pass the response to `core_state`. Add any returned cookies to the store.
pub fn handle_time_response<Response: Buf>(
response: Response,
core_state: &RwLock<core::CoreState>,
secret_store: &SecretStore,
) -> Result<(), ResponseError> {
let dest_time = Timestamp::local_time().map_err(ResponseError::DestTimeError)?;
match deserialize_response_envelope(response)? {
Ok(envelope) => {
let peer_name = core_state
.read()
.unwrap()
.lookup_peer(&envelope.unique_id)
.ok_or(ResponseError::NonMatchingUniqueId)?;
//It's possible for S2CNotFound to happen when request B
// crosses request A on the wire, and response B is a
// crypto-NAK which causes us to clear our
// credentials. This can readily be elicited in testing
// setting an extremely short polling interval, but should
// never normally happen in production, barring
// adversarial behavior by the network or the peer. If it
// does, it's harmless; we'll log it at WARN level and
// recover on the next tick.
let s2c = secret_store
.get_s2c_key(&peer_name)
.map_err(|e| ResponseError::S2CLookupError(peer_name.clone(), e))?
.ok_or_else(|| ResponseError::S2CNotFound(peer_name.clone()))?;
let aead_s2c = Aes128SivAead::new(&s2c); | let plaintext = aead_s2c
.decrypt( | random_line_split | |
time_client.rs | match self {
DestTimeError(e) => write!(f, "Getting destination timestamp: {}", e),
PacketDecodingError(e) => write!(f, "Decoding packet: {}", e),
NotAResponse => write!(f, "Not a response packet"),
AdDecodingError(e) => write!(f, "Decoding associated data: {}", e),
WrongNonceLength => write!(f, "Wrong nonce length"),
WrongUniqueIdLength => write!(f, "Wrong unique-ID length"),
UnrecognizedErrorResponse => write!(f, "Unrecognized error response"),
NonMatchingUniqueId => {
write!(f, "Unique-ID does not correspond to any in-flight request")
}
S2CLookupError(peer, e) => write!(f, "Looking up S2C for peer '{}': {}", peer, e),
S2CNotFound(peer) => write!(f, "S2C key not found for peer '{}'", peer),
DecryptionFailure(peer) => write!(f, "Failed to decrypt response from peer '{}'", peer),
PlaintextDecodingError(peer, e) => {
write!(f, "Decoding plaintext send by peer '{}': {}", peer, e)
}
WrongEraLength(peer) => write!(
f,
"Response from peer '{}' has an era of the wrong length",
peer
),
NoLocalClock(peer) => write!(
f,
"Response from peer '{}' is missing its local-clock field",
peer
),
NoGlobalOffset(peer) => write!(
f,
"Response from peer '{}' is missing its global-offset field",
peer
),
CoreError(peer, e) => write!(
f,
"Updating core state for response from peer '{}': {}",
peer, e
),
StoreCookiesError(peer, e) => write!(
f,
"Writing new cookies from peer '{}' to secret store: {}",
peer, e
),
StoreClearError(peer, e) => write!(
f,
"Clearing secret store in response to crypto-NAK from peer '{}': {}",
peer, e
),
}
}
}
impl std::error::Error for ResponseError {}
///Data extracted from a [wire::ResponseEnvelope](../wire/struct.ResponseEnvelope.html)
pub struct ResponseEnvelopeData {
unique_id: core::UniqueId,
nonce: Aes128SivNonce,
ad: Vec<u8>,
ciphertext: Vec<u8>,
}
///Data extracted from a crypto-NAK response
pub struct CryptoNakData {
unique_id: core::UniqueId,
}
///Deserialize a time response as far as the envelope, but don't try to decrypt it
pub fn deserialize_response_envelope<Response: Buf>(
response: Response,
) -> Result<Result<ResponseEnvelopeData, CryptoNakData>, ResponseError> {
let packet = wire::Packet::decode(response).map_err(ResponseError::PacketDecodingError)?;
trace!("Deserialized time response packet: {:?}", packet);
match packet.msg {
Some(wire::packet::Msg::Response(envelope)) => {
let ad = wire::ResponseAd::decode(envelope.ad.as_ref())
.map_err(ResponseError::AdDecodingError)?;
let nonce = Aes128SivNonce::try_clone_from_slice(envelope.nonce.as_slice())
.map_err(|_| ResponseError::WrongNonceLength)?;
let unique_id = core::UniqueId::try_from(ad.unique_id.as_slice())
.map_err(|_| ResponseError::WrongUniqueIdLength)?;
Ok(Ok(ResponseEnvelopeData {
unique_id,
nonce,
ad: envelope.ad,
ciphertext: envelope.ciphertext,
}))
}
Some(wire::packet::Msg::Error(error)) => {
let unique_id = core::UniqueId::try_from(error.unique_id.as_slice())
.map_err(|_| ResponseError::WrongUniqueIdLength)?;
match error.error {
Some(wire::error::Error::CryptoNak(_)) => Ok(Err(CryptoNakData { unique_id })),
_ => Err(ResponseError::UnrecognizedErrorResponse),
}
}
_ => Err(ResponseError::NotAResponse),
}
}
///Deserialize the plaintext of a time response, returning cookies and
/// a [`core::Response`](../core/struct.Response.html).
pub fn deserialize_response_plaintext<Plaintext: Buf>(
peer_name: &PeerName,
unique_id: &core::UniqueId,
plaintext: Plaintext,
) -> Result<(Vec<Vec<u8>>, core::Response), ResponseError> {
let response = wire::Response::decode(plaintext)
.map_err(|e| ResponseError::PlaintextDecodingError(peer_name.clone(), e))?;
trace!("Deserialized time response plaintext: {:?}", response);
let era = Era(<[u8; 16]>::try_from(response.era.as_slice())
.map_err(|_| ResponseError::WrongEraLength(peer_name.clone()))?);
let global_offset = response
.offset
.ok_or_else(|| ResponseError::NoGlobalOffset(peer_name.clone()))?;
let local_clock = response
.local_clock
.ok_or_else(|| ResponseError::NoLocalClock(peer_name.clone()))?;
Ok((
response.cookies,
core::Response {
era,
unique_id: *unique_id,
global_offset: Timestamp::new(
global_offset.seconds as i64,
global_offset.nanoseconds as i64,
),
local_clock: Timestamp::new(local_clock.seconds as i64, local_clock.nanoseconds as i64),
},
))
}
///Process a time response
///
///Deserialize and decrypt the `response` using `secret_store` to look up keys.
/// Pass the response to `core_state`. Add any returned cookies to the store.
pub fn handle_time_response<Response: Buf>(
response: Response,
core_state: &RwLock<core::CoreState>,
secret_store: &SecretStore,
) -> Result<(), ResponseError> {
let dest_time = Timestamp::local_time().map_err(ResponseError::DestTimeError)?;
match deserialize_response_envelope(response)? {
Ok(envelope) => {
let peer_name = core_state
.read()
.unwrap()
.lookup_peer(&envelope.unique_id)
.ok_or(ResponseError::NonMatchingUniqueId)?;
//It's possible for S2CNotFound to happen when request B
// crosses request A on the wire, and response B is a
// crypto-NAK which causes us to clear our
// credentials. This can readily be elicited in testing
// setting an extremely short polling interval, but should
// never normally happen in production, barring
// adversarial behavior by the network or the peer. If it
// does, it's harmless; we'll log it at WARN level and
// recover on the next tick.
let s2c = secret_store
.get_s2c_key(&peer_name)
.map_err(|e| ResponseError::S2CLookupError(peer_name.clone(), e))?
.ok_or_else(|| ResponseError::S2CNotFound(peer_name.clone()))?;
let aead_s2c = Aes128SivAead::new(&s2c);
let plaintext = aead_s2c
.decrypt(
&envelope.nonce,
Payload {
aad: &envelope.ad,
msg: &envelope.ciphertext,
},
)
.map_err(|_| ResponseError::DecryptionFailure(peer_name.clone()))?;
let (cookies, response) = deserialize_response_plaintext(
&peer_name,
&envelope.unique_id,
plaintext.as_ref(),
)?;
core_state
.write()
.unwrap()
.on_response(&response, dest_time)
.map_err(|e| ResponseError::CoreError(peer_name.clone(), e))?;
secret_store
.give_cookies(&peer_name, cookies)
.map_err(|e| ResponseError::StoreCookiesError(peer_name.clone(), e))?;
debug!(
"Successfully handled time response from peer '{}'",
peer_name
);
Ok(())
}
Err(crypto_nak) => {
let peer_name = core_state
.read()
.unwrap()
.lookup_peer(&crypto_nak.unique_id)
.ok_or(ResponseError::NonMatchingUniqueId)?;
debug!("Received crypto-NAK from peer '{}'", peer_name);
secret_store
.clear_peer(&peer_name)
.map_err(|e| ResponseError::StoreClearError(peer_name.clone(), e))?;
Ok(())
}
}
}
///Listen for time response and process them
///
///Listen forever on `socket`. Process any responses that come in. If any
/// errors occur, log them and continue.
pub async fn time_response_listener(
socket: &tokio::net::UdpSocket,
core_state: &RwLock<core::CoreState>,
secret_store: &SecretStore,
) -> io::Result<()> {
| let mut recv_buf = [0; 65535];
loop {
let (recv_size, peer_addr) = socket.recv_from(&mut recv_buf).await?;
if let Err(e) = handle_time_response(&recv_buf[0..recv_size], core_state, secret_store) {
log!(
e.level(),
"Handling time response from {}: {}",
peer_addr,
e
);
}
}
}
| identifier_body | |
fst_builder.rs | inited: false,
}
}
// this should be call after new FstBuilder
pub fn init(&mut self) {
if self.do_share_suffix {
let reader = self.fst.bytes_store.get_reverse_reader();
let dedup_hash = NodeHash::new(&mut self.fst, reader);
self.dedup_hash = Some(dedup_hash);
}
for i in 0..10 {
let node = UnCompiledNode::new(self, i);
self.frontier.push(node);
}
self.inited = true;
}
pub fn term_count(&self) -> i64 {
self.frontier[0].input_count
}
fn compile_node(&mut self, node_index: usize, tail_length: u32) -> Result<CompiledAddress> {
debug_assert!(self.inited);
let node: i64;
let bytes_pos_start = self.fst.bytes_store.get_position();
let builder = self as *mut FstBuilder<F>;
unsafe {
if let Some(ref mut dedup_hash) = self.dedup_hash {
if (self.do_share_non_singleton_nodes || self.frontier[node_index].num_arcs <= 1)
&& tail_length <= self.share_max_tail_length
{
if self.frontier[node_index].num_arcs == 0 {
node = self.fst.add_node(&mut *builder, node_index)?;
self.last_frozen_node = node;
} else {
node = dedup_hash.add(&mut *builder, node_index)? as i64;
}
} else {
node = self.fst.add_node(&mut *builder, node_index)?;
}
} else {
node = self.fst.add_node(&mut *builder, node_index)?;
}
}
assert_ne!(node, -2);
let bytes_pos_end = self.fst.bytes_store.get_position();
if bytes_pos_end != bytes_pos_start {
// fst added a new node
assert!(bytes_pos_end > bytes_pos_start);
self.last_frozen_node = node;
}
self.frontier[node_index].clear();
Ok(node)
}
#[allow(unused_assignments)]
fn freeze_tail(&mut self, prefix_len_plus1: usize) -> Result<()> {
debug_assert!(self.inited);
let down_to = max(1, prefix_len_plus1);
if self.last_input.length < down_to {
return Ok(());
}
for i in 0..=self.last_input.length - down_to {
let idx = self.last_input.length - i;
let mut do_prune = false;
let mut do_compile = false;
let tmp = UnCompiledNode::new(self, 0);
let mut parent = mem::replace(&mut self.frontier[idx - 1], tmp);
if self.frontier[idx].input_count < self.min_suffix_count1 as i64 {
do_prune = true;
do_compile = true;
} else if idx > prefix_len_plus1 {
// prune if parent's input_count is less than suffix_min_count2
if parent.input_count < self.min_suffix_count2 as i64
|| (self.min_suffix_count2 == 1 && parent.input_count == 1 && idx > 1)
{
// my parent, about to be compiled, doesn't make the cut, so
// I'm definitely pruned
// if minSuffixCount2 is 1, we keep only up
// until the 'distinguished edge', ie we keep only the
// 'divergent' part of the FST. if my parent, about to be
// compiled, has inputCount 1 then we are already past the
// distinguished edge. NOTE: this only works if
// the FST outputs are not "compressible" (simple
// ords ARE compressible).
do_prune = true;
} else {
// my parent, about to be compiled, does make the cut, so
// I'm definitely not pruned
do_prune = false;
}
do_compile = true;
} else {
// if pruning is disabled (count is 0) we can always
// compile current node
do_compile = self.min_suffix_count2 == 0;
}
if self.frontier[idx].input_count < self.min_suffix_count2 as i64
|| (self.min_suffix_count2 == 1 && self.frontier[idx].input_count == 1 && idx > 1)
{
// drop all arcs
for arc_idx in 0..self.frontier[idx].num_arcs {
if let Node::UnCompiled(target) = self.frontier[idx].arcs[arc_idx].target {
self.frontier[target].clear();
}
}
self.frontier[idx].num_arcs = 0;
}
if do_prune {
// this node doesn't make it -- deref it
self.frontier[idx].clear();
parent.delete_last(self.last_input.int_at(idx - 1), &Node::UnCompiled(idx));
} else {
if self.min_suffix_count2 != 0 {
let tail_len = self.last_input.length - idx;
self.compile_all_targets(idx, tail_len)?;
}
let next_final_output = self.frontier[idx].output.clone();
// We "fake" the node as being final if it has no
// outgoing arcs; in theory we could leave it
// as non-final (the FST can represent this), but
// FSTEnum, Util, etc., have trouble w/ non-final
// dead-end states:
let is_final = self.frontier[idx].is_final || self.frontier[idx].num_arcs == 0;
if do_compile {
// this node makes it and we now compile it. first,
// compile any targets that were previously
// undecided:
let tail_len = (1 + self.last_input.length - idx) as u32;
let n = self.compile_node(idx, tail_len)?;
parent.replace_last(
self.last_input.int_at(idx - 1),
Node::Compiled(n),
next_final_output,
is_final,
);
} else {
// replaceLast just to install
// next_final_output/is_final onto the arc
parent.replace_last(
self.last_input.int_at(idx - 1),
Node::UnCompiled(0), // a stub node,
next_final_output,
is_final,
);
// this node will stay in play for now, since we are
// undecided on whether to prune it. later, it
// will be either compiled or pruned, so we must
// allocate a new node:
self.frontier[idx] = UnCompiledNode::new(self, idx as i32);
}
}
self.frontier[idx - 1] = parent;
}
Ok(())
}
/// Add the next input/output pair. The provided input
/// must be sorted after the previous one according to
/// `IntsRef#compareTo`. It's also OK to add the same
/// input twice in a row with different outputs, as long
/// as `OutputFactory` implements the `OutputFactory#merge`
/// method. Note that input is fully consumed after this
/// method is returned (so caller is free to reuse), but
/// output is not. So if your outputs are changeable (eg
/// `ByteSequenceOutputs`) then you cannot reuse across
/// calls.
pub fn add(&mut self, input: IntsRef, output: F::Value) -> Result<()> {
debug_assert!(self.inited);
assert!(self.last_input.length == 0 || input > self.last_input.get());
let mut output = output;
if self.frontier.len() < input.length + 1 {
for i in self.frontier.len()..input.length + 2 {
let node = UnCompiledNode::new(self, i as i32);
self.frontier.push(node);
}
}
if input.length == 0 {
// empty input: only allowed as first input. we have
// to special case this because the packed FST
// format cannot represent the empty input since
// 'finalness' is stored on the incoming arc, not on
// the node
self.frontier[0].input_count += 1;
self.frontier[0].is_final = true;
self.fst.set_empty_output(output);
return Ok(());
}
// compare shared prefix length
let mut pos1 = 0;
let mut pos2 = input.offset;
let pos1_stop = min(self.last_input.length, input.length);
loop {
self | {
let no_output = outputs.empty();
let fst = FST::new(input_type, outputs, bytes_page_bits as usize);
FstBuilder {
dedup_hash: None,
fst,
no_output,
min_suffix_count1,
min_suffix_count2,
do_share_non_singleton_nodes,
share_max_tail_length,
last_input: IntsRefBuilder::new(),
frontier: Vec::with_capacity(10),
last_frozen_node: 0,
reused_bytes_per_arc: Vec::with_capacity(4),
arc_count: 0,
node_count: 0,
allow_array_arcs,
do_share_suffix, | identifier_body | |
fst_builder.rs | next_final_output,
is_final,
);
} else {
// replaceLast just to install
// next_final_output/is_final onto the arc
parent.replace_last(
self.last_input.int_at(idx - 1),
Node::UnCompiled(0), // a stub node,
next_final_output,
is_final,
);
// this node will stay in play for now, since we are
// undecided on whether to prune it. later, it
// will be either compiled or pruned, so we must
// allocate a new node:
self.frontier[idx] = UnCompiledNode::new(self, idx as i32);
}
}
self.frontier[idx - 1] = parent;
}
Ok(())
}
/// Add the next input/output pair. The provided input
/// must be sorted after the previous one according to
/// `IntsRef#compareTo`. It's also OK to add the same
/// input twice in a row with different outputs, as long
/// as `OutputFactory` implements the `OutputFactory#merge`
/// method. Note that input is fully consumed after this
/// method is returned (so caller is free to reuse), but
/// output is not. So if your outputs are changeable (eg
/// `ByteSequenceOutputs`) then you cannot reuse across
/// calls.
pub fn add(&mut self, input: IntsRef, output: F::Value) -> Result<()> {
debug_assert!(self.inited);
assert!(self.last_input.length == 0 || input > self.last_input.get());
let mut output = output;
if self.frontier.len() < input.length + 1 {
for i in self.frontier.len()..input.length + 2 {
let node = UnCompiledNode::new(self, i as i32);
self.frontier.push(node);
}
}
if input.length == 0 {
// empty input: only allowed as first input. we have
// to special case this because the packed FST
// format cannot represent the empty input since
// 'finalness' is stored on the incoming arc, not on
// the node
self.frontier[0].input_count += 1;
self.frontier[0].is_final = true;
self.fst.set_empty_output(output);
return Ok(());
}
// compare shared prefix length
let mut pos1 = 0;
let mut pos2 = input.offset;
let pos1_stop = min(self.last_input.length, input.length);
loop {
self.frontier[pos1].input_count += 1;
if pos1 >= pos1_stop || self.last_input.int_at(pos1) != input.ints()[pos2] {
break;
}
pos1 += 1;
pos2 += 1;
}
let prefix_len_plus1 = pos1 + 1;
// minimize/compile states from previous input's
// orphan'd suffix
self.freeze_tail(prefix_len_plus1)?;
// init tail states for current input
for i in prefix_len_plus1..=input.length {
let node = Node::UnCompiled(i);
self.frontier[i - 1].add_arc(input.ints()[input.offset + i - 1], node);
self.frontier[i].input_count += 1;
}
let last_idx = input.length;
if self.last_input.length != input.length || prefix_len_plus1 != input.length + 1 {
self.frontier[last_idx].is_final = true;
self.frontier[last_idx].output = self.no_output.clone();
}
// push conflicting outputs forward, only as far as needed
for i in 1..prefix_len_plus1 {
let last_output = self.frontier[i - 1]
.get_last_output(input.ints()[input.offset + i - 1])
.clone();
let common_output_prefix: F::Value;
if last_output != self.no_output {
common_output_prefix = self.fst.outputs().common(&output, &last_output);
let word_suffix = self
.fst
.outputs()
.subtract(&last_output, &common_output_prefix);
self.frontier[i].prepend_output(word_suffix);
} else {
common_output_prefix = self.no_output.clone();
}
output = self.fst.outputs().subtract(&output, &common_output_prefix);
if last_output != self.no_output {
self.frontier[i - 1]
.set_last_output(input.ints()[input.offset + i - 1], common_output_prefix);
}
}
if self.last_input.length == input.length && prefix_len_plus1 == input.length + 1 {
// same input more than 1 time in a row, mapping to
// multiple outputs
self.frontier[last_idx].output = self
.fst
.outputs()
.merge(&self.frontier[last_idx].output, &output);
} else {
// this new arc is private to this new input; set its
// arc output to the leftover output:
self.frontier[prefix_len_plus1 - 1]
.set_last_output(input.ints()[input.offset + prefix_len_plus1 - 1], output);
}
// save last input
self.last_input.copy_ints_ref(&input);
Ok(())
}
// Returns final FST. NOTE: this will return None if nothing is accepted by the fst
pub fn finish(&mut self) -> Result<Option<FST<F>>> {
debug_assert!(self.inited);
// minimize nodes in the last word's suffix
self.freeze_tail(0)?;
if self.frontier[0].input_count < self.min_suffix_count1 as i64
|| self.frontier[0].input_count < self.min_suffix_count2 as i64
|| self.frontier[0].num_arcs == 0
{
if self.fst.empty_output.is_none()
|| (self.min_suffix_count1 > 0 || self.min_suffix_count2 > 0)
{
return Ok(None);
}
} else if self.min_suffix_count2 != 0 {
let tail_len = self.last_input.length;
self.compile_all_targets(0, tail_len)?;
}
let node = {
let tail_len = self.last_input.length as u32;
self.compile_node(0, tail_len)?
};
self.fst.finish(node)?;
// create a tmp for mem replace
let tmp_fst = FST::new(self.fst.input_type, self.fst.outputs().clone(), 1);
let fst = mem::replace(&mut self.fst, tmp_fst);
Ok(Some(fst))
}
fn compile_all_targets(&mut self, node_idx: usize, tail_length: usize) -> Result<()> {
for i in 0..self.frontier[node_idx].num_arcs {
if let Node::UnCompiled(index) = self.frontier[node_idx].arcs[i].target {
// not yet compiled
if self.frontier[index].num_arcs == 0 {
self.frontier[node_idx].arcs[i].is_final = true;
self.frontier[index].is_final = true;
}
self.frontier[node_idx].arcs[i].target =
Node::Compiled(self.compile_node(index, tail_length as u32 - 1)? as i64);
}
}
Ok(())
}
}
pub struct BuilderArc<F: OutputFactory> {
pub label: i32,
pub target: Node,
pub is_final: bool,
pub output: F::Value,
pub next_final_output: F::Value,
}
impl<F: OutputFactory> fmt::Debug for BuilderArc<F> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let target = match self.target {
Node::Compiled(c) => format!("Compiled({})", c),
Node::UnCompiled(_) => "UnCompiled".to_string(),
};
write!(
f,
"BuilderArc(label: {}, is_final: {}, output: {:?}, next_final_output: {:?}, target: \
{})",
self.label, self.is_final, self.output, self.next_final_output, target
)
}
}
impl<F> Clone for BuilderArc<F>
where
F: OutputFactory,
{
fn clone(&self) -> Self {
BuilderArc {
label: self.label,
target: self.target.clone(),
is_final: self.is_final,
output: self.output.clone(),
next_final_output: self.next_final_output.clone(),
}
}
}
/// used to dedup states (lookup already-frozen states)
struct NodeHash<F: OutputFactory> {
table: PagedGrowableWriter,
count: usize,
mask: usize,
fst: *mut FST<F>,
input: StoreBytesReader,
}
impl<F: OutputFactory> NodeHash<F> {
pub fn new(fst: &mut FST<F>, input: StoreBytesReader) -> Self {
let table = PagedGrowableWriter::new(16, 1 << 27, 8, COMPACT);
NodeHash {
table,
count: 0,
mask: 15,
fst: fst as *mut FST<F>,
input,
}
}
#[allow(clippy::mut_from_ref)]
fn | fst | identifier_name | |
fst_builder.rs | Outputs`) then you cannot reuse across
/// calls.
pub fn add(&mut self, input: IntsRef, output: F::Value) -> Result<()> {
debug_assert!(self.inited);
assert!(self.last_input.length == 0 || input > self.last_input.get());
let mut output = output;
if self.frontier.len() < input.length + 1 {
for i in self.frontier.len()..input.length + 2 {
let node = UnCompiledNode::new(self, i as i32);
self.frontier.push(node);
}
}
if input.length == 0 {
// empty input: only allowed as first input. we have
// to special case this because the packed FST
// format cannot represent the empty input since
// 'finalness' is stored on the incoming arc, not on
// the node
self.frontier[0].input_count += 1;
self.frontier[0].is_final = true;
self.fst.set_empty_output(output);
return Ok(());
}
// compare shared prefix length
let mut pos1 = 0;
let mut pos2 = input.offset;
let pos1_stop = min(self.last_input.length, input.length);
loop {
self.frontier[pos1].input_count += 1;
if pos1 >= pos1_stop || self.last_input.int_at(pos1) != input.ints()[pos2] {
break;
}
pos1 += 1;
pos2 += 1;
}
let prefix_len_plus1 = pos1 + 1;
// minimize/compile states from previous input's
// orphan'd suffix
self.freeze_tail(prefix_len_plus1)?;
// init tail states for current input
for i in prefix_len_plus1..=input.length {
let node = Node::UnCompiled(i);
self.frontier[i - 1].add_arc(input.ints()[input.offset + i - 1], node);
self.frontier[i].input_count += 1;
}
let last_idx = input.length;
if self.last_input.length != input.length || prefix_len_plus1 != input.length + 1 {
self.frontier[last_idx].is_final = true;
self.frontier[last_idx].output = self.no_output.clone();
}
// push conflicting outputs forward, only as far as needed
for i in 1..prefix_len_plus1 {
let last_output = self.frontier[i - 1]
.get_last_output(input.ints()[input.offset + i - 1])
.clone();
let common_output_prefix: F::Value;
if last_output != self.no_output {
common_output_prefix = self.fst.outputs().common(&output, &last_output);
let word_suffix = self
.fst
.outputs()
.subtract(&last_output, &common_output_prefix);
self.frontier[i].prepend_output(word_suffix);
} else {
common_output_prefix = self.no_output.clone();
}
output = self.fst.outputs().subtract(&output, &common_output_prefix);
if last_output != self.no_output {
self.frontier[i - 1]
.set_last_output(input.ints()[input.offset + i - 1], common_output_prefix);
}
}
if self.last_input.length == input.length && prefix_len_plus1 == input.length + 1 {
// same input more than 1 time in a row, mapping to
// multiple outputs
self.frontier[last_idx].output = self
.fst
.outputs()
.merge(&self.frontier[last_idx].output, &output);
} else {
// this new arc is private to this new input; set its
// arc output to the leftover output:
self.frontier[prefix_len_plus1 - 1]
.set_last_output(input.ints()[input.offset + prefix_len_plus1 - 1], output);
}
// save last input
self.last_input.copy_ints_ref(&input);
Ok(())
}
// Returns final FST. NOTE: this will return None if nothing is accepted by the fst
pub fn finish(&mut self) -> Result<Option<FST<F>>> {
debug_assert!(self.inited);
// minimize nodes in the last word's suffix
self.freeze_tail(0)?;
if self.frontier[0].input_count < self.min_suffix_count1 as i64
|| self.frontier[0].input_count < self.min_suffix_count2 as i64
|| self.frontier[0].num_arcs == 0
{
if self.fst.empty_output.is_none()
|| (self.min_suffix_count1 > 0 || self.min_suffix_count2 > 0)
{
return Ok(None);
}
} else if self.min_suffix_count2 != 0 {
let tail_len = self.last_input.length;
self.compile_all_targets(0, tail_len)?;
}
let node = {
let tail_len = self.last_input.length as u32;
self.compile_node(0, tail_len)?
};
self.fst.finish(node)?;
// create a tmp for mem replace
let tmp_fst = FST::new(self.fst.input_type, self.fst.outputs().clone(), 1);
let fst = mem::replace(&mut self.fst, tmp_fst);
Ok(Some(fst))
}
fn compile_all_targets(&mut self, node_idx: usize, tail_length: usize) -> Result<()> {
for i in 0..self.frontier[node_idx].num_arcs {
if let Node::UnCompiled(index) = self.frontier[node_idx].arcs[i].target {
// not yet compiled
if self.frontier[index].num_arcs == 0 {
self.frontier[node_idx].arcs[i].is_final = true;
self.frontier[index].is_final = true;
}
self.frontier[node_idx].arcs[i].target =
Node::Compiled(self.compile_node(index, tail_length as u32 - 1)? as i64);
}
}
Ok(())
}
}
pub struct BuilderArc<F: OutputFactory> {
pub label: i32,
pub target: Node,
pub is_final: bool,
pub output: F::Value,
pub next_final_output: F::Value,
}
impl<F: OutputFactory> fmt::Debug for BuilderArc<F> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let target = match self.target {
Node::Compiled(c) => format!("Compiled({})", c),
Node::UnCompiled(_) => "UnCompiled".to_string(),
};
write!(
f,
"BuilderArc(label: {}, is_final: {}, output: {:?}, next_final_output: {:?}, target: \
{})",
self.label, self.is_final, self.output, self.next_final_output, target
)
}
}
impl<F> Clone for BuilderArc<F>
where
F: OutputFactory,
{
fn clone(&self) -> Self {
BuilderArc {
label: self.label,
target: self.target.clone(),
is_final: self.is_final,
output: self.output.clone(),
next_final_output: self.next_final_output.clone(),
}
}
}
/// used to dedup states (lookup already-frozen states)
struct NodeHash<F: OutputFactory> {
table: PagedGrowableWriter,
count: usize,
mask: usize,
fst: *mut FST<F>,
input: StoreBytesReader,
}
impl<F: OutputFactory> NodeHash<F> {
pub fn new(fst: &mut FST<F>, input: StoreBytesReader) -> Self {
let table = PagedGrowableWriter::new(16, 1 << 27, 8, COMPACT);
NodeHash {
table,
count: 0,
mask: 15,
fst: fst as *mut FST<F>,
input,
}
}
#[allow(clippy::mut_from_ref)]
fn fst(&self) -> &mut FST<F> {
unsafe { &mut (*self.fst) }
}
fn nodes_equal(&mut self, node: &UnCompiledNode<F>, address: CompiledAddress) -> Result<bool> {
let reader = &mut self.input as *mut StoreBytesReader;
let mut scratch_arc = unsafe { self.fst().read_first_real_arc(address, &mut *reader)? };
if scratch_arc.bytes_per_arc > 0 && node.num_arcs != scratch_arc.num_arcs {
return Ok(false);
}
for idx in 0..node.num_arcs {
let arc = &node.arcs[idx];
if arc.label != scratch_arc.label || arc.is_final != scratch_arc.is_final() {
return Ok(false);
}
if let Some(ref output) = scratch_arc.output {
if output != &arc.output {
return Ok(false);
}
} else if !arc.output.is_empty() {
return Ok(false);
}
if let Some(ref output) = scratch_arc.next_final_output {
if output != &arc.next_final_output {
return Ok(false);
}
} else if !arc.next_final_output.is_empty() {
return Ok(false);
}
if let Node::Compiled(ref node) = arc.target {
if *node != scratch_arc.target | {
return Ok(false);
} | conditional_block | |
fst_builder.rs | == 1 && parent.input_count == 1 && idx > 1)
{
// my parent, about to be compiled, doesn't make the cut, so
// I'm definitely pruned
// if minSuffixCount2 is 1, we keep only up
// until the 'distinguished edge', ie we keep only the
// 'divergent' part of the FST. if my parent, about to be
// compiled, has inputCount 1 then we are already past the
// distinguished edge. NOTE: this only works if
// the FST outputs are not "compressible" (simple
// ords ARE compressible).
do_prune = true;
} else {
// my parent, about to be compiled, does make the cut, so
// I'm definitely not pruned
do_prune = false;
}
do_compile = true;
} else {
// if pruning is disabled (count is 0) we can always
// compile current node
do_compile = self.min_suffix_count2 == 0;
}
if self.frontier[idx].input_count < self.min_suffix_count2 as i64
|| (self.min_suffix_count2 == 1 && self.frontier[idx].input_count == 1 && idx > 1)
{
// drop all arcs
for arc_idx in 0..self.frontier[idx].num_arcs {
if let Node::UnCompiled(target) = self.frontier[idx].arcs[arc_idx].target {
self.frontier[target].clear();
}
}
self.frontier[idx].num_arcs = 0;
}
if do_prune {
// this node doesn't make it -- deref it
self.frontier[idx].clear();
parent.delete_last(self.last_input.int_at(idx - 1), &Node::UnCompiled(idx));
} else {
if self.min_suffix_count2 != 0 {
let tail_len = self.last_input.length - idx;
self.compile_all_targets(idx, tail_len)?;
}
let next_final_output = self.frontier[idx].output.clone();
// We "fake" the node as being final if it has no
// outgoing arcs; in theory we could leave it
// as non-final (the FST can represent this), but
// FSTEnum, Util, etc., have trouble w/ non-final
// dead-end states:
let is_final = self.frontier[idx].is_final || self.frontier[idx].num_arcs == 0;
if do_compile {
// this node makes it and we now compile it. first,
// compile any targets that were previously
// undecided:
let tail_len = (1 + self.last_input.length - idx) as u32;
let n = self.compile_node(idx, tail_len)?;
parent.replace_last(
self.last_input.int_at(idx - 1),
Node::Compiled(n),
next_final_output,
is_final,
);
} else {
// replaceLast just to install
// next_final_output/is_final onto the arc
parent.replace_last(
self.last_input.int_at(idx - 1),
Node::UnCompiled(0), // a stub node,
next_final_output,
is_final,
);
// this node will stay in play for now, since we are
// undecided on whether to prune it. later, it
// will be either compiled or pruned, so we must
// allocate a new node:
self.frontier[idx] = UnCompiledNode::new(self, idx as i32);
}
}
self.frontier[idx - 1] = parent;
}
Ok(())
}
/// Add the next input/output pair. The provided input
/// must be sorted after the previous one according to
/// `IntsRef#compareTo`. It's also OK to add the same
/// input twice in a row with different outputs, as long
/// as `OutputFactory` implements the `OutputFactory#merge`
/// method. Note that input is fully consumed after this
/// method is returned (so caller is free to reuse), but
/// output is not. So if your outputs are changeable (eg
/// `ByteSequenceOutputs`) then you cannot reuse across
/// calls.
pub fn add(&mut self, input: IntsRef, output: F::Value) -> Result<()> {
debug_assert!(self.inited);
assert!(self.last_input.length == 0 || input > self.last_input.get());
let mut output = output;
if self.frontier.len() < input.length + 1 {
for i in self.frontier.len()..input.length + 2 {
let node = UnCompiledNode::new(self, i as i32);
self.frontier.push(node);
}
}
if input.length == 0 {
// empty input: only allowed as first input. we have
// to special case this because the packed FST
// format cannot represent the empty input since
// 'finalness' is stored on the incoming arc, not on
// the node
self.frontier[0].input_count += 1;
self.frontier[0].is_final = true;
self.fst.set_empty_output(output);
return Ok(());
}
// compare shared prefix length
let mut pos1 = 0;
let mut pos2 = input.offset;
let pos1_stop = min(self.last_input.length, input.length);
loop {
self.frontier[pos1].input_count += 1;
if pos1 >= pos1_stop || self.last_input.int_at(pos1) != input.ints()[pos2] {
break;
}
pos1 += 1;
pos2 += 1;
}
let prefix_len_plus1 = pos1 + 1;
// minimize/compile states from previous input's
// orphan'd suffix
self.freeze_tail(prefix_len_plus1)?;
// init tail states for current input
for i in prefix_len_plus1..=input.length {
let node = Node::UnCompiled(i);
self.frontier[i - 1].add_arc(input.ints()[input.offset + i - 1], node);
self.frontier[i].input_count += 1;
}
let last_idx = input.length;
if self.last_input.length != input.length || prefix_len_plus1 != input.length + 1 {
self.frontier[last_idx].is_final = true;
self.frontier[last_idx].output = self.no_output.clone();
}
// push conflicting outputs forward, only as far as needed
for i in 1..prefix_len_plus1 {
let last_output = self.frontier[i - 1]
.get_last_output(input.ints()[input.offset + i - 1])
.clone();
let common_output_prefix: F::Value;
if last_output != self.no_output {
common_output_prefix = self.fst.outputs().common(&output, &last_output);
let word_suffix = self
.fst
.outputs()
.subtract(&last_output, &common_output_prefix);
self.frontier[i].prepend_output(word_suffix);
} else {
common_output_prefix = self.no_output.clone();
}
output = self.fst.outputs().subtract(&output, &common_output_prefix);
if last_output != self.no_output {
self.frontier[i - 1]
.set_last_output(input.ints()[input.offset + i - 1], common_output_prefix);
}
}
if self.last_input.length == input.length && prefix_len_plus1 == input.length + 1 {
// same input more than 1 time in a row, mapping to
// multiple outputs
self.frontier[last_idx].output = self
.fst
.outputs()
.merge(&self.frontier[last_idx].output, &output);
} else {
// this new arc is private to this new input; set its
// arc output to the leftover output:
self.frontier[prefix_len_plus1 - 1]
.set_last_output(input.ints()[input.offset + prefix_len_plus1 - 1], output);
}
// save last input
self.last_input.copy_ints_ref(&input);
Ok(())
}
// Returns final FST. NOTE: this will return None if nothing is accepted by the fst
pub fn finish(&mut self) -> Result<Option<FST<F>>> {
debug_assert!(self.inited);
// minimize nodes in the last word's suffix
self.freeze_tail(0)?;
if self.frontier[0].input_count < self.min_suffix_count1 as i64
|| self.frontier[0].input_count < self.min_suffix_count2 as i64
|| self.frontier[0].num_arcs == 0
{
if self.fst.empty_output.is_none()
|| (self.min_suffix_count1 > 0 || self.min_suffix_count2 > 0)
{
return Ok(None);
}
} else if self.min_suffix_count2 != 0 {
let tail_len = self.last_input.length;
self.compile_all_targets(0, tail_len)?;
}
let node = {
let tail_len = self.last_input.length as u32; | self.compile_node(0, tail_len)?
};
self.fst.finish(node)?; | random_line_split | |
furnace.rs | use web_sys::CanvasRenderingContext2d;
const FUEL_CAPACITY: usize = 10;
/// A list of fixed recipes, because dynamic get_recipes() can only return a Vec.
static RECIPES: Lazy<[Recipe; 2]> = Lazy::new(|| {
[
Recipe::new(
hash_map!(ItemType::IronOre => 1usize),
hash_map!(ItemType::IronPlate => 1usize),
20.,
50.,
),
Recipe::new(
hash_map!(ItemType::CopperOre => 1usize),
hash_map!(ItemType::CopperPlate => 1usize),
20.,
50.,
),
]
});
#[derive(Serialize, Deserialize)]
pub(crate) struct Furnace {
position: Position,
input_inventory: Inventory,
output_inventory: Inventory,
progress: Option<f64>,
power: f64,
max_power: f64,
recipe: Option<Recipe>,
}
impl Furnace {
pub(crate) fn new(position: &Position) -> Self {
Furnace {
position: *position,
input_inventory: Inventory::new(),
output_inventory: Inventory::new(),
progress: None,
power: 20.,
max_power: 20.,
recipe: None,
}
}
}
impl Structure for Furnace {
fn name(&self) -> &str {
"Furnace"
}
fn position(&self) -> &Position {
&self.position
}
fn draw(
&self,
state: &FactorishState,
context: &CanvasRenderingContext2d,
depth: i32,
is_toolbar: bool,
) -> Result<(), JsValue> {
if depth != 0 {
return Ok(());
};
let (x, y) = (self.position.x as f64 * 32., self.position.y as f64 * 32.);
match state.image_furnace.as_ref() {
Some(img) => {
let sx = if self.progress.is_some() && 0. < self.power {
((((state.sim_time * 5.) as isize) % 2 + 1) * 32) as f64
} else {
0.
};
context.draw_image_with_image_bitmap_and_sw_and_sh_and_dx_and_dy_and_dw_and_dh(
&img.bitmap,
sx,
0.,
32.,
32.,
x,
y,
32.,
32.,
)?;
}
None => return Err(JsValue::from_str("furnace image not available")),
}
if !is_toolbar {
crate::draw_fuel_alarm!(self, state, context);
}
Ok(())
}
fn desc(&self, _state: &FactorishState) -> String {
format!(
"{}<br>{}{}",
if self.recipe.is_some() {
// Progress bar
format!("{}{}{}{}",
format!("Progress: {:.0}%<br>", self.progress.unwrap_or(0.) * 100.),
"<div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>",
format!("<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>",
self.progress.unwrap_or(0.) * 100.),
format!(r#"Power: {:.1}kJ <div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>
<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>"#,
self.power,
if 0. < self.max_power { (self.power) / self.max_power * 100. } else { 0. }),
)
// getHTML(generateItemImage("time", true, this.recipe.time), true) + "<br>" +
// "Outputs: <br>" +
// getHTML(generateItemImage(this.recipe.output, true, 1), true) + "<br>";
} else {
String::from("No recipe")
},
format!("Input Items: <br>{}", self.input_inventory.describe()),
format!("Output Items: <br>{}", self.output_inventory.describe())
)
}
fn frame_proc(
&mut self,
_me: StructureId,
state: &mut FactorishState,
_structures: &mut StructureDynIter,
) -> Result<FrameProcResult, ()> {
if self.recipe.is_none() {
self.recipe = RECIPES
.iter()
.find(|recipe| {
recipe
.input
.iter()
.all(|(type_, count)| *count <= self.input_inventory.count_item(&type_))
})
.cloned();
}
if let Some(recipe) = &self.recipe {
let mut ret = FrameProcResult::None;
// First, check if we need to refill the energy buffer in order to continue the current work.
if self.input_inventory.get(&ItemType::CoalOre).is_some() {
// Refill the energy from the fuel
if self.power < recipe.power_cost {
self.power += COAL_POWER;
self.max_power = self.power;
self.input_inventory.remove_item(&ItemType::CoalOre);
ret = FrameProcResult::InventoryChanged(self.position);
}
}
if self.progress.is_none() {
// First, check if we have enough ingredients to finish this recipe.
// If we do, consume the ingredients and start the progress timer.
// We can't start as soon as the recipe is set because we may not have enough ingredients
// at the point we set the recipe.
if recipe
.input
.iter()
.map(|(item, count)| count <= &self.input_inventory.count_item(item))
.all(|b| b)
{
for (item, count) in &recipe.input {
self.input_inventory.remove_items(item, *count);
}
self.progress = Some(0.);
ret = FrameProcResult::InventoryChanged(self.position);
} else {
self.recipe = None;
return Ok(FrameProcResult::None); // Return here to avoid borrow checker
}
}
if let Some(prev_progress) = self.progress {
// Proceed only if we have sufficient energy in the buffer.
let progress = (self.power / recipe.power_cost)
.min(1. / recipe.recipe_time)
.min(1.);
if state.rng.next() < progress * 10. {
state
.temp_ents
.push(TempEnt::new(&mut state.rng, self.position));
}
if 1. <= prev_progress + progress {
self.progress = None;
// Produce outputs into inventory
for output_item in &recipe.output {
self.output_inventory.add_item(&output_item.0);
}
return Ok(FrameProcResult::InventoryChanged(self.position));
} else {
self.progress = Some(prev_progress + progress);
self.power -= progress * recipe.power_cost;
}
}
return Ok(ret);
}
Ok(FrameProcResult::None)
}
fn input(&mut self, o: &DropItem) -> Result<(), JsValue> {
// Fuels are always welcome.
if o.type_ == ItemType::CoalOre
&& self.input_inventory.count_item(&ItemType::CoalOre) < FUEL_CAPACITY
{
self.input_inventory.add_item(&ItemType::CoalOre);
return Ok(());
}
if self.recipe.is_none() {
match o.type_ {
ItemType::IronOre => {
self.recipe = Some(Recipe::new(
hash_map!(ItemType::IronOre => 1usize),
hash_map!(ItemType::IronPlate => 1usize),
20.,
50.,
));
}
ItemType::CopperOre => {
self.recipe = Some(Recipe::new(
hash_map!(ItemType::CopperOre => 1usize),
hash_map!(ItemType::CopperPlate => 1usize),
20.,
50.,
));
}
_ => {
return Err(JsValue::from_str(&format!(
"Cannot smelt {}",
item_to_str(&o.type_)
)))
}
}
}
if let Some(recipe) = &self.recipe {
if 0 < recipe.input.count_item(&o.type_) || 0 < recipe.output.count_item(&o.type_) {
self.input_inventory.add_item(&o.type_);
return Ok(());
} else {
return Err(JsValue::from_str("Item is not part of recipe"));
}
}
Err(JsValue::from_str("Recipe is not initialized"))
}
fn can_input(&self, item_type: &ItemType) -> bool {
if *item_type == ItemType::CoalOre
&& self.input_inventory.count_item | use wasm_bindgen::prelude::*; | random_line_split | |
furnace.rs | (position: &Position) -> Self {
Furnace {
position: *position,
input_inventory: Inventory::new(),
output_inventory: Inventory::new(),
progress: None,
power: 20.,
max_power: 20.,
recipe: None,
}
}
}
impl Structure for Furnace {
fn name(&self) -> &str {
"Furnace"
}
fn position(&self) -> &Position {
&self.position
}
fn draw(
&self,
state: &FactorishState,
context: &CanvasRenderingContext2d,
depth: i32,
is_toolbar: bool,
) -> Result<(), JsValue> {
if depth != 0 {
return Ok(());
};
let (x, y) = (self.position.x as f64 * 32., self.position.y as f64 * 32.);
match state.image_furnace.as_ref() {
Some(img) => {
let sx = if self.progress.is_some() && 0. < self.power {
((((state.sim_time * 5.) as isize) % 2 + 1) * 32) as f64
} else {
0.
};
context.draw_image_with_image_bitmap_and_sw_and_sh_and_dx_and_dy_and_dw_and_dh(
&img.bitmap,
sx,
0.,
32.,
32.,
x,
y,
32.,
32.,
)?;
}
None => return Err(JsValue::from_str("furnace image not available")),
}
if !is_toolbar {
crate::draw_fuel_alarm!(self, state, context);
}
Ok(())
}
fn desc(&self, _state: &FactorishState) -> String {
format!(
"{}<br>{}{}",
if self.recipe.is_some() {
// Progress bar
format!("{}{}{}{}",
format!("Progress: {:.0}%<br>", self.progress.unwrap_or(0.) * 100.),
"<div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>",
format!("<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>",
self.progress.unwrap_or(0.) * 100.),
format!(r#"Power: {:.1}kJ <div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>
<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>"#,
self.power,
if 0. < self.max_power { (self.power) / self.max_power * 100. } else { 0. }),
)
// getHTML(generateItemImage("time", true, this.recipe.time), true) + "<br>" +
// "Outputs: <br>" +
// getHTML(generateItemImage(this.recipe.output, true, 1), true) + "<br>";
} else {
String::from("No recipe")
},
format!("Input Items: <br>{}", self.input_inventory.describe()),
format!("Output Items: <br>{}", self.output_inventory.describe())
)
}
fn frame_proc(
&mut self,
_me: StructureId,
state: &mut FactorishState,
_structures: &mut StructureDynIter,
) -> Result<FrameProcResult, ()> {
if self.recipe.is_none() {
self.recipe = RECIPES
.iter()
.find(|recipe| {
recipe
.input
.iter()
.all(|(type_, count)| *count <= self.input_inventory.count_item(&type_))
})
.cloned();
}
if let Some(recipe) = &self.recipe {
let mut ret = FrameProcResult::None;
// First, check if we need to refill the energy buffer in order to continue the current work.
if self.input_inventory.get(&ItemType::CoalOre).is_some() {
// Refill the energy from the fuel
if self.power < recipe.power_cost {
self.power += COAL_POWER;
self.max_power = self.power;
self.input_inventory.remove_item(&ItemType::CoalOre);
ret = FrameProcResult::InventoryChanged(self.position);
}
}
if self.progress.is_none() {
// First, check if we have enough ingredients to finish this recipe.
// If we do, consume the ingredients and start the progress timer.
// We can't start as soon as the recipe is set because we may not have enough ingredients
// at the point we set the recipe.
if recipe
.input
.iter()
.map(|(item, count)| count <= &self.input_inventory.count_item(item))
.all(|b| b)
{
for (item, count) in &recipe.input {
self.input_inventory.remove_items(item, *count);
}
self.progress = Some(0.);
ret = FrameProcResult::InventoryChanged(self.position);
} else {
self.recipe = None;
return Ok(FrameProcResult::None); // Return here to avoid borrow checker
}
}
if let Some(prev_progress) = self.progress {
// Proceed only if we have sufficient energy in the buffer.
let progress = (self.power / recipe.power_cost)
.min(1. / recipe.recipe_time)
.min(1.);
if state.rng.next() < progress * 10. {
state
.temp_ents
.push(TempEnt::new(&mut state.rng, self.position));
}
if 1. <= prev_progress + progress {
self.progress = None;
// Produce outputs into inventory
for output_item in &recipe.output {
self.output_inventory.add_item(&output_item.0);
}
return Ok(FrameProcResult::InventoryChanged(self.position));
} else {
self.progress = Some(prev_progress + progress);
self.power -= progress * recipe.power_cost;
}
}
return Ok(ret);
}
Ok(FrameProcResult::None)
}
fn input(&mut self, o: &DropItem) -> Result<(), JsValue> {
// Fuels are always welcome.
if o.type_ == ItemType::CoalOre
&& self.input_inventory.count_item(&ItemType::CoalOre) < FUEL_CAPACITY
{
self.input_inventory.add_item(&ItemType::CoalOre);
return Ok(());
}
if self.recipe.is_none() {
match o.type_ {
ItemType::IronOre => {
self.recipe = Some(Recipe::new(
hash_map!(ItemType::IronOre => 1usize),
hash_map!(ItemType::IronPlate => 1usize),
20.,
50.,
));
}
ItemType::CopperOre => {
self.recipe = Some(Recipe::new(
hash_map!(ItemType::CopperOre => 1usize),
hash_map!(ItemType::CopperPlate => 1usize),
20.,
50.,
));
}
_ => {
return Err(JsValue::from_str(&format!(
"Cannot smelt {}",
item_to_str(&o.type_)
)))
}
}
}
if let Some(recipe) = &self.recipe {
if 0 < recipe.input.count_item(&o.type_) || 0 < recipe.output.count_item(&o.type_) {
self.input_inventory.add_item(&o.type_);
return Ok(());
} else {
return Err(JsValue::from_str("Item is not part of recipe"));
}
}
Err(JsValue::from_str("Recipe is not initialized"))
}
fn can_input(&self, item_type: &ItemType) -> bool {
if *item_type == ItemType::CoalOre
&& self.input_inventory.count_item(item_type) < FUEL_CAPACITY
{
return true;
}
if let Some(recipe) = &self.recipe {
recipe.input.get(item_type).is_some()
} else {
matches!(item_type, ItemType::IronOre | ItemType::CopperOre)
}
}
fn can_output(&self, _structures: &StructureDynIter) -> Inventory {
self.output_inventory.clone()
}
fn output(&mut self, _state: &mut FactorishState, item_type: &ItemType) -> Result<(), ()> {
if self.output_inventory.remove_item(item_type) {
Ok(())
} else {
Err(())
}
}
fn inventory(&self, is_input: bool) -> Option<&Inventory> {
Some(if is_input {
&self.input_inventory
} else {
&self.output_inventory
})
}
fn inventory_mut(&mut self, is_input: bool) -> Option<&mut Inventory> {
Some(if is_input {
&mut self.input_inventory
} else | {
&mut self.output_inventory
} | conditional_block | |
furnace.rs | usize),
20.,
50.,
),
]
});
#[derive(Serialize, Deserialize)]
pub(crate) struct Furnace {
position: Position,
input_inventory: Inventory,
output_inventory: Inventory,
progress: Option<f64>,
power: f64,
max_power: f64,
recipe: Option<Recipe>,
}
impl Furnace {
pub(crate) fn new(position: &Position) -> Self {
Furnace {
position: *position,
input_inventory: Inventory::new(),
output_inventory: Inventory::new(),
progress: None,
power: 20.,
max_power: 20.,
recipe: None,
}
}
}
impl Structure for Furnace {
fn name(&self) -> &str {
"Furnace"
}
fn position(&self) -> &Position {
&self.position
}
fn draw(
&self,
state: &FactorishState,
context: &CanvasRenderingContext2d,
depth: i32,
is_toolbar: bool,
) -> Result<(), JsValue> {
if depth != 0 {
return Ok(());
};
let (x, y) = (self.position.x as f64 * 32., self.position.y as f64 * 32.);
match state.image_furnace.as_ref() {
Some(img) => {
let sx = if self.progress.is_some() && 0. < self.power {
((((state.sim_time * 5.) as isize) % 2 + 1) * 32) as f64
} else {
0.
};
context.draw_image_with_image_bitmap_and_sw_and_sh_and_dx_and_dy_and_dw_and_dh(
&img.bitmap,
sx,
0.,
32.,
32.,
x,
y,
32.,
32.,
)?;
}
None => return Err(JsValue::from_str("furnace image not available")),
}
if !is_toolbar {
crate::draw_fuel_alarm!(self, state, context);
}
Ok(())
}
fn desc(&self, _state: &FactorishState) -> String {
format!(
"{}<br>{}{}",
if self.recipe.is_some() {
// Progress bar
format!("{}{}{}{}",
format!("Progress: {:.0}%<br>", self.progress.unwrap_or(0.) * 100.),
"<div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>",
format!("<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>",
self.progress.unwrap_or(0.) * 100.),
format!(r#"Power: {:.1}kJ <div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>
<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>"#,
self.power,
if 0. < self.max_power { (self.power) / self.max_power * 100. } else { 0. }),
)
// getHTML(generateItemImage("time", true, this.recipe.time), true) + "<br>" +
// "Outputs: <br>" +
// getHTML(generateItemImage(this.recipe.output, true, 1), true) + "<br>";
} else {
String::from("No recipe")
},
format!("Input Items: <br>{}", self.input_inventory.describe()),
format!("Output Items: <br>{}", self.output_inventory.describe())
)
}
fn frame_proc(
&mut self,
_me: StructureId,
state: &mut FactorishState,
_structures: &mut StructureDynIter,
) -> Result<FrameProcResult, ()> {
if self.recipe.is_none() {
self.recipe = RECIPES
.iter()
.find(|recipe| {
recipe
.input
.iter()
.all(|(type_, count)| *count <= self.input_inventory.count_item(&type_))
})
.cloned();
}
if let Some(recipe) = &self.recipe {
let mut ret = FrameProcResult::None;
// First, check if we need to refill the energy buffer in order to continue the current work.
if self.input_inventory.get(&ItemType::CoalOre).is_some() {
// Refill the energy from the fuel
if self.power < recipe.power_cost {
self.power += COAL_POWER;
self.max_power = self.power;
self.input_inventory.remove_item(&ItemType::CoalOre);
ret = FrameProcResult::InventoryChanged(self.position);
}
}
if self.progress.is_none() {
// First, check if we have enough ingredients to finish this recipe.
// If we do, consume the ingredients and start the progress timer.
// We can't start as soon as the recipe is set because we may not have enough ingredients
// at the point we set the recipe.
if recipe
.input
.iter()
.map(|(item, count)| count <= &self.input_inventory.count_item(item))
.all(|b| b)
{
for (item, count) in &recipe.input {
self.input_inventory.remove_items(item, *count);
}
self.progress = Some(0.);
ret = FrameProcResult::InventoryChanged(self.position);
} else {
self.recipe = None;
return Ok(FrameProcResult::None); // Return here to avoid borrow checker
}
}
if let Some(prev_progress) = self.progress {
// Proceed only if we have sufficient energy in the buffer.
let progress = (self.power / recipe.power_cost)
.min(1. / recipe.recipe_time)
.min(1.);
if state.rng.next() < progress * 10. {
state
.temp_ents
.push(TempEnt::new(&mut state.rng, self.position));
}
if 1. <= prev_progress + progress {
self.progress = None;
// Produce outputs into inventory
for output_item in &recipe.output {
self.output_inventory.add_item(&output_item.0);
}
return Ok(FrameProcResult::InventoryChanged(self.position));
} else {
self.progress = Some(prev_progress + progress);
self.power -= progress * recipe.power_cost;
}
}
return Ok(ret);
}
Ok(FrameProcResult::None)
}
fn input(&mut self, o: &DropItem) -> Result<(), JsValue> {
// Fuels are always welcome.
if o.type_ == ItemType::CoalOre
&& self.input_inventory.count_item(&ItemType::CoalOre) < FUEL_CAPACITY
{
self.input_inventory.add_item(&ItemType::CoalOre);
return Ok(());
}
if self.recipe.is_none() {
match o.type_ {
ItemType::IronOre => {
self.recipe = Some(Recipe::new(
hash_map!(ItemType::IronOre => 1usize),
hash_map!(ItemType::IronPlate => 1usize),
20.,
50.,
));
}
ItemType::CopperOre => {
self.recipe = Some(Recipe::new(
hash_map!(ItemType::CopperOre => 1usize),
hash_map!(ItemType::CopperPlate => 1usize),
20.,
50.,
));
}
_ => {
return Err(JsValue::from_str(&format!(
"Cannot smelt {}",
item_to_str(&o.type_)
)))
}
}
}
if let Some(recipe) = &self.recipe {
if 0 < recipe.input.count_item(&o.type_) || 0 < recipe.output.count_item(&o.type_) {
self.input_inventory.add_item(&o.type_);
return Ok(());
} else {
return Err(JsValue::from_str("Item is not part of recipe"));
}
}
Err(JsValue::from_str("Recipe is not initialized"))
}
fn can_input(&self, item_type: &ItemType) -> bool {
if *item_type == ItemType::CoalOre
&& self.input_inventory.count_item(item_type) < FUEL_CAPACITY
{
return true;
}
if let Some(recipe) = &self.recipe {
recipe.input.get(item_type).is_some()
} else {
matches!(item_type, ItemType::IronOre | ItemType::CopperOre)
}
}
fn can_output(&self, _structures: &StructureDynIter) -> Inventory {
self.output_inventory.clone()
}
fn output(&mut self, _state: &mut FactorishState, item_type: &ItemType) -> Result<(), ()> | {
if self.output_inventory.remove_item(item_type) {
Ok(())
} else {
Err(())
}
} | identifier_body | |
furnace.rs | ,
progress: Option<f64>,
power: f64,
max_power: f64,
recipe: Option<Recipe>,
}
impl Furnace {
pub(crate) fn new(position: &Position) -> Self {
Furnace {
position: *position,
input_inventory: Inventory::new(),
output_inventory: Inventory::new(),
progress: None,
power: 20.,
max_power: 20.,
recipe: None,
}
}
}
impl Structure for Furnace {
fn name(&self) -> &str {
"Furnace"
}
fn position(&self) -> &Position {
&self.position
}
fn draw(
&self,
state: &FactorishState,
context: &CanvasRenderingContext2d,
depth: i32,
is_toolbar: bool,
) -> Result<(), JsValue> {
if depth != 0 {
return Ok(());
};
let (x, y) = (self.position.x as f64 * 32., self.position.y as f64 * 32.);
match state.image_furnace.as_ref() {
Some(img) => {
let sx = if self.progress.is_some() && 0. < self.power {
((((state.sim_time * 5.) as isize) % 2 + 1) * 32) as f64
} else {
0.
};
context.draw_image_with_image_bitmap_and_sw_and_sh_and_dx_and_dy_and_dw_and_dh(
&img.bitmap,
sx,
0.,
32.,
32.,
x,
y,
32.,
32.,
)?;
}
None => return Err(JsValue::from_str("furnace image not available")),
}
if !is_toolbar {
crate::draw_fuel_alarm!(self, state, context);
}
Ok(())
}
fn desc(&self, _state: &FactorishState) -> String {
format!(
"{}<br>{}{}",
if self.recipe.is_some() {
// Progress bar
format!("{}{}{}{}",
format!("Progress: {:.0}%<br>", self.progress.unwrap_or(0.) * 100.),
"<div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>",
format!("<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>",
self.progress.unwrap_or(0.) * 100.),
format!(r#"Power: {:.1}kJ <div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>
<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>"#,
self.power,
if 0. < self.max_power { (self.power) / self.max_power * 100. } else { 0. }),
)
// getHTML(generateItemImage("time", true, this.recipe.time), true) + "<br>" +
// "Outputs: <br>" +
// getHTML(generateItemImage(this.recipe.output, true, 1), true) + "<br>";
} else {
String::from("No recipe")
},
format!("Input Items: <br>{}", self.input_inventory.describe()),
format!("Output Items: <br>{}", self.output_inventory.describe())
)
}
fn frame_proc(
&mut self,
_me: StructureId,
state: &mut FactorishState,
_structures: &mut StructureDynIter,
) -> Result<FrameProcResult, ()> {
if self.recipe.is_none() {
self.recipe = RECIPES
.iter()
.find(|recipe| {
recipe
.input
.iter()
.all(|(type_, count)| *count <= self.input_inventory.count_item(&type_))
})
.cloned();
}
if let Some(recipe) = &self.recipe {
let mut ret = FrameProcResult::None;
// First, check if we need to refill the energy buffer in order to continue the current work.
if self.input_inventory.get(&ItemType::CoalOre).is_some() {
// Refill the energy from the fuel
if self.power < recipe.power_cost {
self.power += COAL_POWER;
self.max_power = self.power;
self.input_inventory.remove_item(&ItemType::CoalOre);
ret = FrameProcResult::InventoryChanged(self.position);
}
}
if self.progress.is_none() {
// First, check if we have enough ingredients to finish this recipe.
// If we do, consume the ingredients and start the progress timer.
// We can't start as soon as the recipe is set because we may not have enough ingredients
// at the point we set the recipe.
if recipe
.input
.iter()
.map(|(item, count)| count <= &self.input_inventory.count_item(item))
.all(|b| b)
{
for (item, count) in &recipe.input {
self.input_inventory.remove_items(item, *count);
}
self.progress = Some(0.);
ret = FrameProcResult::InventoryChanged(self.position);
} else {
self.recipe = None;
return Ok(FrameProcResult::None); // Return here to avoid borrow checker
}
}
if let Some(prev_progress) = self.progress {
// Proceed only if we have sufficient energy in the buffer.
let progress = (self.power / recipe.power_cost)
.min(1. / recipe.recipe_time)
.min(1.);
if state.rng.next() < progress * 10. {
state
.temp_ents
.push(TempEnt::new(&mut state.rng, self.position));
}
if 1. <= prev_progress + progress {
self.progress = None;
// Produce outputs into inventory
for output_item in &recipe.output {
self.output_inventory.add_item(&output_item.0);
}
return Ok(FrameProcResult::InventoryChanged(self.position));
} else {
self.progress = Some(prev_progress + progress);
self.power -= progress * recipe.power_cost;
}
}
return Ok(ret);
}
Ok(FrameProcResult::None)
}
fn input(&mut self, o: &DropItem) -> Result<(), JsValue> {
// Fuels are always welcome.
if o.type_ == ItemType::CoalOre
&& self.input_inventory.count_item(&ItemType::CoalOre) < FUEL_CAPACITY
{
self.input_inventory.add_item(&ItemType::CoalOre);
return Ok(());
}
if self.recipe.is_none() {
match o.type_ {
ItemType::IronOre => {
self.recipe = Some(Recipe::new(
hash_map!(ItemType::IronOre => 1usize),
hash_map!(ItemType::IronPlate => 1usize),
20.,
50.,
));
}
ItemType::CopperOre => {
self.recipe = Some(Recipe::new(
hash_map!(ItemType::CopperOre => 1usize),
hash_map!(ItemType::CopperPlate => 1usize),
20.,
50.,
));
}
_ => {
return Err(JsValue::from_str(&format!(
"Cannot smelt {}",
item_to_str(&o.type_)
)))
}
}
}
if let Some(recipe) = &self.recipe {
if 0 < recipe.input.count_item(&o.type_) || 0 < recipe.output.count_item(&o.type_) {
self.input_inventory.add_item(&o.type_);
return Ok(());
} else {
return Err(JsValue::from_str("Item is not part of recipe"));
}
}
Err(JsValue::from_str("Recipe is not initialized"))
}
fn can_input(&self, item_type: &ItemType) -> bool {
if *item_type == ItemType::CoalOre
&& self.input_inventory.count_item(item_type) < FUEL_CAPACITY
{
return true;
}
if let Some(recipe) = &self.recipe {
recipe.input.get(item_type).is_some()
} else {
matches!(item_type, ItemType::IronOre | ItemType::CopperOre)
}
}
fn can_output(&self, _structures: &StructureDynIter) -> Inventory {
self.output_inventory.clone()
}
fn output(&mut self, _state: &mut FactorishState, item_type: &ItemType) -> Result<(), ()> {
if self.output_inventory.remove_item(item_type) {
Ok(())
} else {
Err(())
}
}
fn inventory(&self, is_input: bool) -> Option<&Inventory> {
Some(if is_input {
&self.input_inventory
} else {
&self.output_inventory
})
}
fn | inventory_mut | identifier_name | |
config.go | ruconfig `json:"lru_config"`
Rebalance rebalanceconf `json:"rebalance_conf"`
Cksum cksumconfig `json:"cksum_config"`
Ver versionconfig `json:"version_config"`
FSpaths map[string]string `json:"fspaths"`
TestFSP testfspathconf `json:"test_fspaths"`
Net netconfig `json:"netconfig"`
FSKeeper fskeeperconf `json:"fskeeper"`
Experimental experimental `json:"experimental"`
H2c bool `json:"h2c"`
}
type logconfig struct {
Dir string `json:"logdir"` // log directory
Level string `json:"loglevel"` // log level aka verbosity
MaxSize uint64 `json:"logmaxsize"` // size that triggers log rotation
MaxTotal uint64 `json:"logmaxtotal"` // max total size of all the logs in the log directory
}
type periodic struct {
StatsTimeStr string `json:"stats_time"`
KeepAliveTimeStr string `json:"keep_alive_time"`
// omitempty
StatsTime time.Duration `json:"-"`
KeepAliveTime time.Duration `json:"-"`
}
// timeoutconfig contains timeouts used for intra-cluster communication
type timeoutconfig struct {
DefaultStr string `json:"default"`
Default time.Duration `json:"-"` // omitempty
DefaultLongStr string `json:"default_long"`
DefaultLong time.Duration `json:"-"` //
MaxKeepaliveStr string `json:"max_keepalive"`
MaxKeepalive time.Duration `json:"-"` //
ProxyPingStr string `json:"proxy_ping"`
ProxyPing time.Duration `json:"-"` //
VoteRequestStr string `json:"vote_request"`
VoteRequest time.Duration `json:"-"` //
}
type proxyconfig struct {
Primary proxycnf `json:"primary"`
Original proxycnf `json:"original"`
}
type proxycnf struct {
ID string `json:"id"` // used to register caching servers/other proxies
URL string `json:"url"` // used to register caching servers/other proxies
Passthru bool `json:"passthru"` // false: get then redirect, true (default): redirect right away
}
type lruconfig struct {
LowWM uint32 `json:"lowwm"` // capacity usage low watermark
HighWM uint32 `json:"highwm"` // capacity usage high watermark
AtimeCacheMax uint64 `json:"atime_cache_max"` // atime cache - max num entries
DontEvictTimeStr string `json:"dont_evict_time"` // eviction is not permitted during [atime, atime + dont]
CapacityUpdTimeStr string `json:"capacity_upd_time"` // min time to update capacity
DontEvictTime time.Duration `json:"-"` // omitempty
CapacityUpdTime time.Duration `json:"-"` // ditto
LRUEnabled bool `json:"lru_enabled"` // LRU will only run when LRUEnabled is true
}
type rebalanceconf struct {
StartupDelayTimeStr string `json:"startup_delay_time"`
StartupDelayTime time.Duration `json:"-"` // omitempty
RebalancingEnabled bool `json:"rebalancing_enabled"`
}
type testfspathconf struct {
Root string `json:"root"`
Count int `json:"count"`
Instance int `json:"instance"`
}
type netconfig struct {
IPv4 string `json:"ipv4"`
L4 l4cnf `json:"l4"`
HTTP httpcnf `json:"http"`
}
type l4cnf struct {
Proto string `json:"proto"` // tcp, udp
Port string `json:"port"` // listening port
}
type httpcnf struct {
MaxNumTargets int `json:"max_num_targets"` // estimated max num targets (to count idle conns)
UseHTTPS bool `json:"use_https"` // use HTTPS instead of HTTP
Certificate string `json:"server_certificate"` // HTTPS: openssl certificate
Key string `json:"server_key"` // HTTPS: openssl key
}
type cksumconfig struct {
Checksum string `json:"checksum"` // DFC checksum: xxhash:none
ValidateColdGet bool `json:"validate_cold_get"` // MD5 (ETag) validation upon cold GET
}
type versionconfig struct {
ValidateWarmGet bool `json:"validate_warm_get"` // True: validate object version upon warm GET
Versioning string `json:"versioning"` // types of objects versioning is enabled for: all, cloud, local, none
}
type fskeeperconf struct {
FSCheckTimeStr string `json:"fs_check_time"`
FSCheckTime time.Duration `json:"-"` // omitempty
OfflineFSCheckTimeStr string `json:"offline_fs_check_time"`
OfflineFSCheckTime time.Duration `json:"-"` // omitempty
Enabled bool `json:"fskeeper_enabled"`
}
type experimental struct {
AckPut string `json:"ack_put"`
MaxMemMB int `json:"max_mem_mb"` // max memory size for the "memory" option - FIXME: niy
}
//==============================
//
// config functions
//
//==============================
func initconfigparam() error {
getConfig(clivars.conffile)
err := flag.Lookup("log_dir").Value.Set(ctx.config.Log.Dir)
if err != nil {
glog.Errorf("Failed to flag-set glog dir %q, err: %v", ctx.config.Log.Dir, err)
}
if err = CreateDir(ctx.config.Log.Dir); err != nil {
glog.Errorf("Failed to create log dir %q, err: %v", ctx.config.Log.Dir, err)
return err
}
if err = validateconf(); err != nil {
return err
}
// glog rotate
glog.MaxSize = ctx.config.Log.MaxSize
if glog.MaxSize > GiB {
glog.Errorf("Log.MaxSize %d exceeded 1GB, setting the default 1MB", glog.MaxSize)
glog.MaxSize = MiB
}
// CLI override
if clivars.statstime != 0 {
ctx.config.Periodic.StatsTime = clivars.statstime
}
if clivars.proxyurl != "" {
ctx.config.Proxy.Primary.ID = ""
ctx.config.Proxy.Primary.URL = clivars.proxyurl
}
if clivars.loglevel != "" {
if err = setloglevel(clivars.loglevel); err != nil {
glog.Errorf("Failed to set log level = %s, err: %v", clivars.loglevel, err)
}
} else {
if err = setloglevel(ctx.config.Log.Level); err != nil {
glog.Errorf("Failed to set log level = %s, err: %v", ctx.config.Log.Level, err)
}
}
if build != "" {
glog.Infof("Build: %s", build) // git rev-parse --short HEAD
}
glog.Infof("Logdir: %q Proto: %s Port: %s Verbosity: %s",
ctx.config.Log.Dir, ctx.config.Net.L4.Proto, ctx.config.Net.L4.Port, ctx.config.Log.Level)
glog.Infof("Config: %q Role: %s StatsTime: %v", clivars.conffile, clivars.role, ctx.config.Periodic.StatsTime)
return err
}
func getConfig(fpath string) {
raw, err := ioutil.ReadFile(fpath)
if err != nil {
glog.Errorf("Failed to read config %q, err: %v", fpath, err)
os.Exit(1)
}
err = json.Unmarshal(raw, &ctx.config)
if err != nil {
glog.Errorf("Failed to json-unmarshal config %q, err: %v", fpath, err)
os.Exit(1)
}
}
func validateVersion(version string) error |
// StartupDelayTimeStr string `json:"startup_delay_time"`
// StartupDelayTime time.Duration `json:"-"` // omitempty
func validateconf() (err error) {
// durations
if ctx.config.Periodic.StatsTime, err = time.ParseDuration(ctx.config.Periodic.StatsTimeStr); err != nil {
return fmt.Errorf("Bad stats-time format %s, err: %v", ctx.config.Periodic.StatsTimeStr, err)
}
if ctx.config.Timeout.Default, err = time.ParseDuration(ctx.config.Timeout.DefaultStr); err != nil {
return fmt.Errorf("Bad Timeout default format %s, err: %v", ctx.config.Timeout.DefaultStr, err)
}
if ctx.config.Timeout.DefaultLong, err = time.ParseDuration(ctx.config.Timeout.DefaultLongStr); err != | {
versions := []string{VersionAll, VersionCloud, VersionLocal, VersionNone}
versionValid := false
for _, v := range versions {
if v == version {
versionValid = true
break
}
}
if !versionValid {
return fmt.Errorf("Invalid version: %s - expecting one of %s", version, strings.Join(versions, ", "))
}
return nil
} | identifier_body |
config.go | ruconfig `json:"lru_config"`
Rebalance rebalanceconf `json:"rebalance_conf"`
Cksum cksumconfig `json:"cksum_config"`
Ver versionconfig `json:"version_config"`
FSpaths map[string]string `json:"fspaths"`
TestFSP testfspathconf `json:"test_fspaths"`
Net netconfig `json:"netconfig"`
FSKeeper fskeeperconf `json:"fskeeper"`
Experimental experimental `json:"experimental"`
H2c bool `json:"h2c"`
}
type logconfig struct {
Dir string `json:"logdir"` // log directory
Level string `json:"loglevel"` // log level aka verbosity
MaxSize uint64 `json:"logmaxsize"` // size that triggers log rotation
MaxTotal uint64 `json:"logmaxtotal"` // max total size of all the logs in the log directory
}
type periodic struct {
StatsTimeStr string `json:"stats_time"`
KeepAliveTimeStr string `json:"keep_alive_time"`
// omitempty
StatsTime time.Duration `json:"-"`
KeepAliveTime time.Duration `json:"-"`
}
// timeoutconfig contains timeouts used for intra-cluster communication
type timeoutconfig struct {
DefaultStr string `json:"default"`
Default time.Duration `json:"-"` // omitempty
DefaultLongStr string `json:"default_long"`
DefaultLong time.Duration `json:"-"` //
MaxKeepaliveStr string `json:"max_keepalive"`
MaxKeepalive time.Duration `json:"-"` //
ProxyPingStr string `json:"proxy_ping"`
ProxyPing time.Duration `json:"-"` //
VoteRequestStr string `json:"vote_request"`
VoteRequest time.Duration `json:"-"` //
}
type proxyconfig struct {
Primary proxycnf `json:"primary"`
Original proxycnf `json:"original"`
}
type proxycnf struct {
ID string `json:"id"` // used to register caching servers/other proxies
URL string `json:"url"` // used to register caching servers/other proxies
Passthru bool `json:"passthru"` // false: get then redirect, true (default): redirect right away
}
type lruconfig struct {
LowWM uint32 `json:"lowwm"` // capacity usage low watermark
HighWM uint32 `json:"highwm"` // capacity usage high watermark
AtimeCacheMax uint64 `json:"atime_cache_max"` // atime cache - max num entries
DontEvictTimeStr string `json:"dont_evict_time"` // eviction is not permitted during [atime, atime + dont]
CapacityUpdTimeStr string `json:"capacity_upd_time"` // min time to update capacity
DontEvictTime time.Duration `json:"-"` // omitempty
CapacityUpdTime time.Duration `json:"-"` // ditto
LRUEnabled bool `json:"lru_enabled"` // LRU will only run when LRUEnabled is true
}
type rebalanceconf struct {
StartupDelayTimeStr string `json:"startup_delay_time"`
StartupDelayTime time.Duration `json:"-"` // omitempty
RebalancingEnabled bool `json:"rebalancing_enabled"`
}
type testfspathconf struct {
Root string `json:"root"`
Count int `json:"count"`
Instance int `json:"instance"`
}
type netconfig struct {
IPv4 string `json:"ipv4"`
L4 l4cnf `json:"l4"`
HTTP httpcnf `json:"http"`
}
type l4cnf struct {
Proto string `json:"proto"` // tcp, udp
Port string `json:"port"` // listening port
}
type httpcnf struct {
MaxNumTargets int `json:"max_num_targets"` // estimated max num targets (to count idle conns)
UseHTTPS bool `json:"use_https"` // use HTTPS instead of HTTP
Certificate string `json:"server_certificate"` // HTTPS: openssl certificate
Key string `json:"server_key"` // HTTPS: openssl key
}
type cksumconfig struct {
Checksum string `json:"checksum"` // DFC checksum: xxhash:none
ValidateColdGet bool `json:"validate_cold_get"` // MD5 (ETag) validation upon cold GET
}
type versionconfig struct {
ValidateWarmGet bool `json:"validate_warm_get"` // True: validate object version upon warm GET
Versioning string `json:"versioning"` // types of objects versioning is enabled for: all, cloud, local, none
}
type fskeeperconf struct {
FSCheckTimeStr string `json:"fs_check_time"`
FSCheckTime time.Duration `json:"-"` // omitempty
OfflineFSCheckTimeStr string `json:"offline_fs_check_time"`
OfflineFSCheckTime time.Duration `json:"-"` // omitempty
Enabled bool `json:"fskeeper_enabled"`
}
type experimental struct {
AckPut string `json:"ack_put"`
MaxMemMB int `json:"max_mem_mb"` // max memory size for the "memory" option - FIXME: niy
}
//==============================
//
// config functions
//
//==============================
func initconfigparam() error {
getConfig(clivars.conffile)
err := flag.Lookup("log_dir").Value.Set(ctx.config.Log.Dir)
if err != nil {
glog.Errorf("Failed to flag-set glog dir %q, err: %v", ctx.config.Log.Dir, err)
}
if err = CreateDir(ctx.config.Log.Dir); err != nil {
glog.Errorf("Failed to create log dir %q, err: %v", ctx.config.Log.Dir, err)
return err
}
if err = validateconf(); err != nil {
return err
}
// glog rotate
glog.MaxSize = ctx.config.Log.MaxSize
if glog.MaxSize > GiB {
glog.Errorf("Log.MaxSize %d exceeded 1GB, setting the default 1MB", glog.MaxSize)
glog.MaxSize = MiB
}
// CLI override
if clivars.statstime != 0 {
ctx.config.Periodic.StatsTime = clivars.statstime
}
if clivars.proxyurl != "" {
ctx.config.Proxy.Primary.ID = ""
ctx.config.Proxy.Primary.URL = clivars.proxyurl
}
if clivars.loglevel != "" {
if err = setloglevel(clivars.loglevel); err != nil {
glog.Errorf("Failed to set log level = %s, err: %v", clivars.loglevel, err)
}
} else {
if err = setloglevel(ctx.config.Log.Level); err != nil {
glog.Errorf("Failed to set log level = %s, err: %v", ctx.config.Log.Level, err)
}
}
if build != "" {
glog.Infof("Build: %s", build) // git rev-parse --short HEAD
}
glog.Infof("Logdir: %q Proto: %s Port: %s Verbosity: %s",
ctx.config.Log.Dir, ctx.config.Net.L4.Proto, ctx.config.Net.L4.Port, ctx.config.Log.Level)
glog.Infof("Config: %q Role: %s StatsTime: %v", clivars.conffile, clivars.role, ctx.config.Periodic.StatsTime)
return err
}
func getConfig(fpath string) {
raw, err := ioutil.ReadFile(fpath)
if err != nil {
glog.Errorf("Failed to read config %q, err: %v", fpath, err)
os.Exit(1)
}
err = json.Unmarshal(raw, &ctx.config)
if err != nil |
}
func validateVersion(version string) error {
versions := []string{VersionAll, VersionCloud, VersionLocal, VersionNone}
versionValid := false
for _, v := range versions {
if v == version {
versionValid = true
break
}
}
if !versionValid {
return fmt.Errorf("Invalid version: %s - expecting one of %s", version, strings.Join(versions, ", "))
}
return nil
}
// StartupDelayTimeStr string `json:"startup_delay_time"`
// StartupDelayTime time.Duration `json:"-"` // omitempty
func validateconf() (err error) {
// durations
if ctx.config.Periodic.StatsTime, err = time.ParseDuration(ctx.config.Periodic.StatsTimeStr); err != nil {
return fmt.Errorf("Bad stats-time format %s, err: %v", ctx.config.Periodic.StatsTimeStr, err)
}
if ctx.config.Timeout.Default, err = time.ParseDuration(ctx.config.Timeout.DefaultStr); err != nil {
return fmt.Errorf("Bad Timeout default format %s, err: %v", ctx.config.Timeout.DefaultStr, err)
}
if ctx.config.Timeout.DefaultLong, err = time.ParseDuration(ctx.config.Timeout.DefaultLongStr); err != | {
glog.Errorf("Failed to json-unmarshal config %q, err: %v", fpath, err)
os.Exit(1)
} | conditional_block |
config.go | `json:"proto"` // tcp, udp
Port string `json:"port"` // listening port
}
type httpcnf struct {
MaxNumTargets int `json:"max_num_targets"` // estimated max num targets (to count idle conns)
UseHTTPS bool `json:"use_https"` // use HTTPS instead of HTTP
Certificate string `json:"server_certificate"` // HTTPS: openssl certificate
Key string `json:"server_key"` // HTTPS: openssl key
}
type cksumconfig struct {
Checksum string `json:"checksum"` // DFC checksum: xxhash:none
ValidateColdGet bool `json:"validate_cold_get"` // MD5 (ETag) validation upon cold GET
}
type versionconfig struct {
ValidateWarmGet bool `json:"validate_warm_get"` // True: validate object version upon warm GET
Versioning string `json:"versioning"` // types of objects versioning is enabled for: all, cloud, local, none
}
type fskeeperconf struct {
FSCheckTimeStr string `json:"fs_check_time"`
FSCheckTime time.Duration `json:"-"` // omitempty
OfflineFSCheckTimeStr string `json:"offline_fs_check_time"`
OfflineFSCheckTime time.Duration `json:"-"` // omitempty
Enabled bool `json:"fskeeper_enabled"`
}
type experimental struct {
AckPut string `json:"ack_put"`
MaxMemMB int `json:"max_mem_mb"` // max memory size for the "memory" option - FIXME: niy
}
//==============================
//
// config functions
//
//==============================
func initconfigparam() error {
getConfig(clivars.conffile)
err := flag.Lookup("log_dir").Value.Set(ctx.config.Log.Dir)
if err != nil {
glog.Errorf("Failed to flag-set glog dir %q, err: %v", ctx.config.Log.Dir, err)
}
if err = CreateDir(ctx.config.Log.Dir); err != nil {
glog.Errorf("Failed to create log dir %q, err: %v", ctx.config.Log.Dir, err)
return err
}
if err = validateconf(); err != nil {
return err
}
// glog rotate
glog.MaxSize = ctx.config.Log.MaxSize
if glog.MaxSize > GiB {
glog.Errorf("Log.MaxSize %d exceeded 1GB, setting the default 1MB", glog.MaxSize)
glog.MaxSize = MiB
}
// CLI override
if clivars.statstime != 0 {
ctx.config.Periodic.StatsTime = clivars.statstime
}
if clivars.proxyurl != "" {
ctx.config.Proxy.Primary.ID = ""
ctx.config.Proxy.Primary.URL = clivars.proxyurl
}
if clivars.loglevel != "" {
if err = setloglevel(clivars.loglevel); err != nil {
glog.Errorf("Failed to set log level = %s, err: %v", clivars.loglevel, err)
}
} else {
if err = setloglevel(ctx.config.Log.Level); err != nil {
glog.Errorf("Failed to set log level = %s, err: %v", ctx.config.Log.Level, err)
}
}
if build != "" {
glog.Infof("Build: %s", build) // git rev-parse --short HEAD
}
glog.Infof("Logdir: %q Proto: %s Port: %s Verbosity: %s",
ctx.config.Log.Dir, ctx.config.Net.L4.Proto, ctx.config.Net.L4.Port, ctx.config.Log.Level)
glog.Infof("Config: %q Role: %s StatsTime: %v", clivars.conffile, clivars.role, ctx.config.Periodic.StatsTime)
return err
}
func getConfig(fpath string) {
raw, err := ioutil.ReadFile(fpath)
if err != nil {
glog.Errorf("Failed to read config %q, err: %v", fpath, err)
os.Exit(1)
}
err = json.Unmarshal(raw, &ctx.config)
if err != nil {
glog.Errorf("Failed to json-unmarshal config %q, err: %v", fpath, err)
os.Exit(1)
}
}
func validateVersion(version string) error {
versions := []string{VersionAll, VersionCloud, VersionLocal, VersionNone}
versionValid := false
for _, v := range versions {
if v == version {
versionValid = true
break
}
}
if !versionValid {
return fmt.Errorf("Invalid version: %s - expecting one of %s", version, strings.Join(versions, ", "))
}
return nil
}
// StartupDelayTimeStr string `json:"startup_delay_time"`
// StartupDelayTime time.Duration `json:"-"` // omitempty
func validateconf() (err error) {
// durations
if ctx.config.Periodic.StatsTime, err = time.ParseDuration(ctx.config.Periodic.StatsTimeStr); err != nil {
return fmt.Errorf("Bad stats-time format %s, err: %v", ctx.config.Periodic.StatsTimeStr, err)
}
if ctx.config.Timeout.Default, err = time.ParseDuration(ctx.config.Timeout.DefaultStr); err != nil {
return fmt.Errorf("Bad Timeout default format %s, err: %v", ctx.config.Timeout.DefaultStr, err)
}
if ctx.config.Timeout.DefaultLong, err = time.ParseDuration(ctx.config.Timeout.DefaultLongStr); err != nil {
return fmt.Errorf("Bad Timeout default_long format %s, err %v", ctx.config.Timeout.DefaultLongStr, err)
}
if ctx.config.Periodic.KeepAliveTime, err = time.ParseDuration(ctx.config.Periodic.KeepAliveTimeStr); err != nil {
return fmt.Errorf("Bad keep_alive_time format %s, err: %v", ctx.config.Periodic.KeepAliveTimeStr, err)
}
if ctx.config.LRU.DontEvictTime, err = time.ParseDuration(ctx.config.LRU.DontEvictTimeStr); err != nil {
return fmt.Errorf("Bad dont_evict_time format %s, err: %v", ctx.config.LRU.DontEvictTimeStr, err)
}
if ctx.config.LRU.CapacityUpdTime, err = time.ParseDuration(ctx.config.LRU.CapacityUpdTimeStr); err != nil {
return fmt.Errorf("Bad capacity_upd_time format %s, err: %v", ctx.config.LRU.CapacityUpdTimeStr, err)
}
if ctx.config.Rebalance.StartupDelayTime, err = time.ParseDuration(ctx.config.Rebalance.StartupDelayTimeStr); err != nil {
return fmt.Errorf("Bad startup_delay_time format %s, err: %v", ctx.config.Rebalance.StartupDelayTimeStr, err)
}
hwm, lwm := ctx.config.LRU.HighWM, ctx.config.LRU.LowWM
if hwm <= 0 || lwm <= 0 || hwm < lwm || lwm > 100 || hwm > 100 {
return fmt.Errorf("Invalid LRU configuration %+v", ctx.config.LRU)
}
if ctx.config.TestFSP.Count == 0 {
for fp1 := range ctx.config.FSpaths {
for fp2 := range ctx.config.FSpaths {
if fp1 != fp2 && (strings.HasPrefix(fp1, fp2) || strings.HasPrefix(fp2, fp1)) {
return fmt.Errorf("Invalid fspaths: %q is a prefix or includes as a prefix %q", fp1, fp2)
}
}
}
}
if ctx.config.Cksum.Checksum != ChecksumXXHash && ctx.config.Cksum.Checksum != ChecksumNone {
return fmt.Errorf("Invalid checksum: %s - expecting %s or %s", ctx.config.Cksum.Checksum, ChecksumXXHash, ChecksumNone)
}
if err := validateVersion(ctx.config.Ver.Versioning); err != nil {
return err
}
if ctx.config.FSKeeper.FSCheckTime, err = time.ParseDuration(ctx.config.FSKeeper.FSCheckTimeStr); err != nil {
return fmt.Errorf("Bad FSKeeper fs_check_time format %s, err %v", ctx.config.FSKeeper.FSCheckTimeStr, err)
}
if ctx.config.FSKeeper.OfflineFSCheckTime, err = time.ParseDuration(ctx.config.FSKeeper.OfflineFSCheckTimeStr); err != nil {
return fmt.Errorf("Bad FSKeeper offline_fs_check_time format %s, err %v", ctx.config.FSKeeper.OfflineFSCheckTimeStr, err)
}
if ctx.config.Timeout.MaxKeepalive, err = time.ParseDuration(ctx.config.Timeout.MaxKeepaliveStr); err != nil {
return fmt.Errorf("Bad Timeout max_keepalive format %s, err %v", ctx.config.Timeout.MaxKeepaliveStr, err)
}
if ctx.config.Timeout.ProxyPing, err = time.ParseDuration(ctx.config.Timeout.ProxyPingStr); err != nil {
return fmt.Errorf("Bad Timeout proxy_ping format %s, err %v", ctx.config.Timeout.ProxyPingStr, err)
}
if ctx.config.Timeout.VoteRequest, err = time.ParseDuration(ctx.config.Timeout.VoteRequestStr); err != nil {
return fmt.Errorf("Bad Timeout vote_request format %s, err %v", ctx.config.Timeout.VoteRequestStr, err)
}
return nil
}
func | setloglevel | identifier_name | |
config.go | "github.com/golang/glog"
)
const (
KiB = 1024
MiB = 1024 * KiB
GiB = 1024 * MiB
)
// checksums: xattr, http header, and config
const (
xattrXXHashVal = "user.obj.dfchash"
xattrObjVersion = "user.obj.version"
ChecksumNone = "none"
ChecksumXXHash = "xxhash"
ChecksumMD5 = "md5"
VersionAll = "all"
VersionCloud = "cloud"
VersionLocal = "local"
VersionNone = "none"
)
const (
AckWhenInMem = "memory"
AckWhenOnDisk = "disk" // the default
)
const (
lbname = "localbuckets" // base name of the lbconfig file; not to confuse with config.Localbuckets mpath sub-directory
mpname = "mpaths" // base name to persist ctx.mountpaths
)
//==============================
//
// config types
//
//==============================
type dfconfig struct {
Confdir string `json:"confdir"`
CloudProvider string `json:"cloudprovider"`
CloudBuckets string `json:"cloud_buckets"`
LocalBuckets string `json:"local_buckets"`
// structs
Log logconfig `json:"log"`
Periodic periodic `json:"periodic"`
Timeout timeoutconfig `json:"timeout"`
Proxy proxyconfig `json:"proxyconfig"`
LRU lruconfig `json:"lru_config"`
Rebalance rebalanceconf `json:"rebalance_conf"`
Cksum cksumconfig `json:"cksum_config"`
Ver versionconfig `json:"version_config"`
FSpaths map[string]string `json:"fspaths"`
TestFSP testfspathconf `json:"test_fspaths"`
Net netconfig `json:"netconfig"`
FSKeeper fskeeperconf `json:"fskeeper"`
Experimental experimental `json:"experimental"`
H2c bool `json:"h2c"`
}
type logconfig struct {
Dir string `json:"logdir"` // log directory
Level string `json:"loglevel"` // log level aka verbosity
MaxSize uint64 `json:"logmaxsize"` // size that triggers log rotation
MaxTotal uint64 `json:"logmaxtotal"` // max total size of all the logs in the log directory
}
type periodic struct {
StatsTimeStr string `json:"stats_time"`
KeepAliveTimeStr string `json:"keep_alive_time"`
// omitempty
StatsTime time.Duration `json:"-"`
KeepAliveTime time.Duration `json:"-"`
}
// timeoutconfig contains timeouts used for intra-cluster communication
type timeoutconfig struct {
DefaultStr string `json:"default"`
Default time.Duration `json:"-"` // omitempty
DefaultLongStr string `json:"default_long"`
DefaultLong time.Duration `json:"-"` //
MaxKeepaliveStr string `json:"max_keepalive"`
MaxKeepalive time.Duration `json:"-"` //
ProxyPingStr string `json:"proxy_ping"`
ProxyPing time.Duration `json:"-"` //
VoteRequestStr string `json:"vote_request"`
VoteRequest time.Duration `json:"-"` //
}
type proxyconfig struct {
Primary proxycnf `json:"primary"`
Original proxycnf `json:"original"`
}
type proxycnf struct {
ID string `json:"id"` // used to register caching servers/other proxies
URL string `json:"url"` // used to register caching servers/other proxies
Passthru bool `json:"passthru"` // false: get then redirect, true (default): redirect right away
}
type lruconfig struct {
LowWM uint32 `json:"lowwm"` // capacity usage low watermark
HighWM uint32 `json:"highwm"` // capacity usage high watermark
AtimeCacheMax uint64 `json:"atime_cache_max"` // atime cache - max num entries
DontEvictTimeStr string `json:"dont_evict_time"` // eviction is not permitted during [atime, atime + dont]
CapacityUpdTimeStr string `json:"capacity_upd_time"` // min time to update capacity
DontEvictTime time.Duration `json:"-"` // omitempty
CapacityUpdTime time.Duration `json:"-"` // ditto
LRUEnabled bool `json:"lru_enabled"` // LRU will only run when LRUEnabled is true
}
type rebalanceconf struct {
StartupDelayTimeStr string `json:"startup_delay_time"`
StartupDelayTime time.Duration `json:"-"` // omitempty
RebalancingEnabled bool `json:"rebalancing_enabled"`
}
type testfspathconf struct {
Root string `json:"root"`
Count int `json:"count"`
Instance int `json:"instance"`
}
type netconfig struct {
IPv4 string `json:"ipv4"`
L4 l4cnf `json:"l4"`
HTTP httpcnf `json:"http"`
}
type l4cnf struct {
Proto string `json:"proto"` // tcp, udp
Port string `json:"port"` // listening port
}
type httpcnf struct {
MaxNumTargets int `json:"max_num_targets"` // estimated max num targets (to count idle conns)
UseHTTPS bool `json:"use_https"` // use HTTPS instead of HTTP
Certificate string `json:"server_certificate"` // HTTPS: openssl certificate
Key string `json:"server_key"` // HTTPS: openssl key
}
type cksumconfig struct {
Checksum string `json:"checksum"` // DFC checksum: xxhash:none
ValidateColdGet bool `json:"validate_cold_get"` // MD5 (ETag) validation upon cold GET
}
type versionconfig struct {
ValidateWarmGet bool `json:"validate_warm_get"` // True: validate object version upon warm GET
Versioning string `json:"versioning"` // types of objects versioning is enabled for: all, cloud, local, none
}
type fskeeperconf struct {
FSCheckTimeStr string `json:"fs_check_time"`
FSCheckTime time.Duration `json:"-"` // omitempty
OfflineFSCheckTimeStr string `json:"offline_fs_check_time"`
OfflineFSCheckTime time.Duration `json:"-"` // omitempty
Enabled bool `json:"fskeeper_enabled"`
}
type experimental struct {
AckPut string `json:"ack_put"`
MaxMemMB int `json:"max_mem_mb"` // max memory size for the "memory" option - FIXME: niy
}
//==============================
//
// config functions
//
//==============================
func initconfigparam() error {
getConfig(clivars.conffile)
err := flag.Lookup("log_dir").Value.Set(ctx.config.Log.Dir)
if err != nil {
glog.Errorf("Failed to flag-set glog dir %q, err: %v", ctx.config.Log.Dir, err)
}
if err = CreateDir(ctx.config.Log.Dir); err != nil {
glog.Errorf("Failed to create log dir %q, err: %v", ctx.config.Log.Dir, err)
return err
}
if err = validateconf(); err != nil {
return err
}
// glog rotate
glog.MaxSize = ctx.config.Log.MaxSize
if glog.MaxSize > GiB {
glog.Errorf("Log.MaxSize %d exceeded 1GB, setting the default 1MB", glog.MaxSize)
glog.MaxSize = MiB
}
// CLI override
if clivars.statstime != 0 {
ctx.config.Periodic.StatsTime = clivars.statstime
}
if clivars.proxyurl != "" {
ctx.config.Proxy.Primary.ID = ""
ctx.config.Proxy.Primary.URL = clivars.proxyurl
}
if clivars.loglevel != "" {
if err = setloglevel(clivars.loglevel); err != nil {
glog.Errorf("Failed to set log level = %s, err: %v", clivars.loglevel, err)
}
} else {
if err = setloglevel(ctx.config.Log.Level); err != nil {
glog.Errorf("Failed to set log level = %s, err: %v", ctx.config.Log.Level, err)
}
}
if build != "" {
glog.Infof("Build: %s", build) // git rev-parse --short HEAD
}
glog.Infof("Logdir: %q Proto: %s Port: %s Verbosity: %s",
ctx.config.Log.Dir, ctx.config.Net.L4.Proto, ctx.config.Net.L4.Port, ctx.config.Log.Level)
glog.Infof("Config: %q Role: %s StatsTime: %v", clivars.conffile, clivars.role, ctx.config.Periodic.StatsTime)
return err
}
func getConfig(fpath string) {
raw, err := ioutil.ReadFile(fpath)
if err != nil {
glog.Errorf("Failed to read config | "os"
"strings"
"time"
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.